diff --git a/share/man/man9/bus_release_resource.9 b/share/man/man9/bus_release_resource.9 index 9abc9fca1e6f..5203295a7488 100644 --- a/share/man/man9/bus_release_resource.9 +++ b/share/man/man9/bus_release_resource.9 @@ -1,104 +1,88 @@ .\" -*- nroff -*- .\" .\" Copyright (c) 2000 Alexander Langer .\" .\" All rights reserved. .\" .\" This program is free software. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" -.Dd May 18, 2000 +.Dd March 13, 2024 .Dt BUS_RELEASE_RESOURCE 9 .Os .Sh NAME .Nm bus_release_resource .Nd release resources on a bus .Sh SYNOPSIS .In sys/param.h .In sys/bus.h .Pp .In machine/bus.h .In sys/rman.h .In machine/resource.h .Ft int -.Fn bus_release_resource "device_t dev" "int type" "int rid" "struct resource *r" +.Fn bus_release_resource "device_t dev" "struct resource *r" .Sh DESCRIPTION Free a resource allocated by .Xr bus_alloc_resource 9 . The resource must not be in use on release, i.e., call an appropriate function before (e.g.\& .Xr bus_teardown_intr 9 for IRQs). .Bl -item .It .Fa dev is the device that owns the resource. .It -.Fa type -is the type of resource that is released. -It must be of the same type you allocated it as before. -See -.Xr bus_alloc_resource 9 -for valid types. -.It -.Fa rid -is the resource ID of the resource. -The -.Fa rid -value must be the same as the one returned by -.Xr bus_alloc_resource 9 . -.It .Fa r is the pointer to .Va struct resource , i.e., the resource itself, returned by .Xr bus_alloc_resource 9 . .El .Sh RETURN VALUES .Er EINVAL is returned, if the device .Fa dev has no parent, .Dv 0 otherwise. The kernel will panic, if it cannot release the resource. .Sh EXAMPLES .Bd -literal /* deactivate IRQ */ bus_teardown_intr(dev, foosoftc->irqres, foosoftc->irqid); /* release IRQ resource */ - bus_release_resource(dev, SYS_RES_IRQ, foosoftc->irqid, - foosoftc->irqres); + bus_release_resource(dev, foosoftc->irqres); /* release I/O port resource */ - bus_release_resource(dev, SYS_RES_IOPORT, foosoftc->portid, - foosoftc->portres); + bus_release_resource(dev, foosoftc->portres); .Ed .Sh SEE ALSO .Xr bus_alloc_resource 9 , .Xr device 9 , .Xr driver 9 .Sh AUTHORS This manual page was written by .An Alexander Langer Aq Mt alex@big.endian.de . diff --git a/sys/arm/mv/mv_pci.c b/sys/arm/mv/mv_pci.c index a24a71cd4ecf..eb1af5a4e237 100644 --- a/sys/arm/mv/mv_pci.c +++ b/sys/arm/mv/mv_pci.c @@ -1,1403 +1,1399 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2015 Semihalf * All rights reserved. * * Developed by Semihalf. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Marvell integrated PCI/PCI-Express controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "pcib_if.h" #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif /* * Code and data related to fdt-based PCI configuration. * * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was * always Marvell-specific so that was deleted and the code now lives here. */ struct mv_pci_range { u_long base_pci; u_long base_parent; u_long len; }; #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2) #define PCI_SPACE_LEN 0x00400000 static void mv_pci_range_dump(struct mv_pci_range *range) { #ifdef DEBUG printf("\n"); printf(" base_pci = 0x%08lx\n", range->base_pci); printf(" base_par = 0x%08lx\n", range->base_parent); printf(" len = 0x%08lx\n", range->len); #endif } static int mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { pcell_t ranges[FDT_RANGES_CELLS]; struct mv_pci_range *pci_space; pcell_t addr_cells, size_cells, par_addr_cells; pcell_t *rangesptr; pcell_t cell0, cell2; int tuple_size, tuples, i, rv, offset_cells, len; int portid, is_io_space; /* * Retrieve 'ranges' property. */ if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (EINVAL); if (addr_cells != 3 || size_cells != 2) return (ERANGE); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 3) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; /* * Initialize the ranges so that we don't have to worry about * having them all defined in the FDT. In particular, it is * perfectly fine not to want I/O space on PCI buses. */ bzero(io_space, sizeof(*io_space)); bzero(mem_space, sizeof(*mem_space)); rangesptr = &ranges[0]; offset_cells = 0; for (i = 0; i < tuples; i++) { cell0 = fdt_data_get((void *)rangesptr, 1); rangesptr++; /* cell1 */ rangesptr++; cell2 = fdt_data_get((void *)rangesptr, 1); rangesptr++; portid = fdt_data_get((void *)(rangesptr+1), 1); if (cell0 & 0x02000000) { pci_space = mem_space; is_io_space = 0; } else if (cell0 & 0x01000000) { pci_space = io_space; is_io_space = 1; } else { rv = ERANGE; goto out; } if (par_addr_cells == 3) { /* * This is a PCI subnode 'ranges'. Skip cell0 and * cell1 of this entry and only use cell2. */ offset_cells = 2; rangesptr += offset_cells; } if ((par_addr_cells - offset_cells) > 2) { rv = ERANGE; goto out; } pci_space->base_parent = fdt_data_get((void *)rangesptr, par_addr_cells - offset_cells); rangesptr += par_addr_cells - offset_cells; if (size_cells > 2) { rv = ERANGE; goto out; } pci_space->len = fdt_data_get((void *)rangesptr, size_cells); rangesptr += size_cells; pci_space->base_pci = cell2; if (pci_space->len == 0) { pci_space->len = PCI_SPACE_LEN; pci_space->base_parent = fdt_immr_va + PCI_SPACE_LEN * ( 2 * portid + is_io_space); } } rv = 0; out: return (rv); } static int mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space, struct mv_pci_range *mem_space) { int err; debugf("Processing PCI node: %x\n", node); if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) { debugf("could not decode parent PCI node 'ranges'\n"); return (err); } debugf("Post fixup dump:\n"); mv_pci_range_dump(io_space); mv_pci_range_dump(mem_space); return (0); } int mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va, vm_offset_t mem_va) { struct mv_pci_range io_space, mem_space; int error; if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0) return (error); devmap->pd_va = (io_va ? io_va : io_space.base_parent); devmap->pd_pa = io_space.base_parent; devmap->pd_size = io_space.len; devmap++; devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent); devmap->pd_pa = mem_space.base_parent; devmap->pd_size = mem_space.len; return (0); } /* * Code and data related to the Marvell pcib driver. */ #define PCI_CFG_ENA (1U << 31) #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16) #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11) #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8) #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc) #define PCI_REG_CFG_ADDR 0x0C78 #define PCI_REG_CFG_DATA 0x0C7C #define PCIE_REG_CFG_ADDR 0x18F8 #define PCIE_REG_CFG_DATA 0x18FC #define PCIE_REG_CONTROL 0x1A00 #define PCIE_CTRL_LINK1X 0x00000001 #define PCIE_REG_STATUS 0x1A04 #define PCIE_REG_IRQ_MASK 0x1910 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1) #define PCIE_CONTROL_HOT_RESET (1 << 24) #define PCIE_LINK_TIMEOUT 1000000 #define PCIE_STATUS_LINK_DOWN 1 #define PCIE_STATUS_DEV_OFFS 16 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */ #define PCI_MIN_IO_ALLOC 4 #define PCI_MIN_MEM_ALLOC 16 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t)) struct mv_pcib_softc { device_t sc_dev; struct rman sc_mem_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; int sc_win_target; int sc_mem_win_attr; struct rman sc_io_rman; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; int sc_io_win_attr; struct resource *sc_res; bus_space_handle_t sc_bsh; bus_space_tag_t sc_bst; int sc_rid; struct mtx sc_msi_mtx; uint32_t sc_msi_bitmap; int sc_busnr; /* Host bridge bus number */ int sc_devnr; /* Host bridge device number */ int sc_type; int sc_mode; /* Endpoint / Root Complex */ int sc_msi_supported; int sc_skip_enable_procedure; int sc_enable_find_root_slot; struct ofw_bus_iinfo sc_pci_iinfo; int ap_segment; /* PCI domain */ }; /* Local forward prototypes */ static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *); static void mv_pcib_hw_cfginit(void); static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, int); static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_init(struct mv_pcib_softc *, int, int); static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int); static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int); static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t); static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t); static int mv_pcib_mem_init(struct mv_pcib_softc *); /* Forward prototypes */ static int mv_pcib_probe(device_t); static int mv_pcib_attach(device_t); static struct rman *mv_pcib_get_rman(device_t, int, u_int); static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int mv_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); -static int mv_pcib_release_resource(device_t, device_t, int, int, - struct resource *); +static int mv_pcib_release_resource(device_t, device_t, struct resource *); static int mv_pcib_activate_resource(device_t, device_t, struct resource *); static int mv_pcib_deactivate_resource(device_t, device_t, struct resource *); static int mv_pcib_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int mv_pcib_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int mv_pcib_maxslots(device_t); static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int mv_pcib_route_interrupt(device_t, device_t, int); static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *); static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int mv_pcib_release_msi(device_t, device_t, int, int *); /* * Bus interface definitions. */ static device_method_t mv_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mv_pcib_probe), DEVMETHOD(device_attach, mv_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar), DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar), DEVMETHOD(bus_get_rman, mv_pcib_get_rman), DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, mv_pcib_adjust_resource), DEVMETHOD(bus_release_resource, mv_pcib_release_resource), DEVMETHOD(bus_activate_resource, mv_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, mv_pcib_deactivate_resource), DEVMETHOD(bus_map_resource, mv_pcib_map_resource), DEVMETHOD(bus_unmap_resource, mv_pcib_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, mv_pcib_maxslots), DEVMETHOD(pcib_read_config, mv_pcib_read_config), DEVMETHOD(pcib_write_config, mv_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, mv_pcib_release_msi), DEVMETHOD(pcib_map_msi, mv_pcib_map_msi), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t mv_pcib_driver = { "pcib", mv_pcib_methods, sizeof(struct mv_pcib_softc), }; DRIVER_MODULE(mv_pcib, ofwbus, mv_pcib_driver, 0, 0); DRIVER_MODULE(mv_pcib, pcib_ctrl, mv_pcib_driver, 0, 0); static struct mtx pcicfg_mtx; static int mv_pcib_probe(device_t self) { phandle_t node; node = ofw_bus_get_node(self); if (!mv_fdt_is_type(node, "pci")) return (ENXIO); if (!(ofw_bus_is_compatible(self, "mrvl,pcie") || ofw_bus_is_compatible(self, "mrvl,pci") || ofw_bus_node_is_compatible( OF_parent(node), "marvell,armada-370-pcie"))) return (ENXIO); if (!ofw_bus_status_okay(self)) return (ENXIO); device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } static int mv_pcib_attach(device_t self) { struct mv_pcib_softc *sc; phandle_t node, parnode; uint32_t val, reg0; int err, bus, devfn, port_id; sc = device_get_softc(self); sc->sc_dev = self; node = ofw_bus_get_node(self); parnode = OF_parent(node); if (OF_getencprop(node, "marvell,pcie-port", &(port_id), sizeof(port_id)) <= 0) { /* If port ID does not exist in the FDT set value to 0 */ if (!OF_hasprop(node, "marvell,pcie-port")) port_id = 0; else return(ENXIO); } sc->ap_segment = port_id; if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id); sc->sc_skip_enable_procedure = 1; } else if (ofw_bus_node_is_compatible(parnode, "marvell,armada-370-pcie")) { sc->sc_type = MV_TYPE_PCIE; sc->sc_win_target = MV_WIN_PCIE_TARGET_ARMADA38X(port_id); sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR_ARMADA38X(port_id); sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR_ARMADA38X(port_id); sc->sc_enable_find_root_slot = 1; } else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) { sc->sc_type = MV_TYPE_PCI; sc->sc_win_target = MV_WIN_PCI_TARGET; sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR; sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR; } else return (ENXIO); /* * Retrieve our mem-mapped registers range. */ sc->sc_rid = 0; sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(self, "could not map memory\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_res); sc->sc_bsh = rman_get_bushandle(sc->sc_res); val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL); sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT : MV_MODE_ENDPOINT); /* * Get PCI interrupt info. */ if (sc->sc_mode == MV_MODE_ROOT) ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t)); /* * Configure decode windows for PCI(E) access. */ if (mv_pcib_decode_win(node, sc) != 0) return (ENXIO); mv_pcib_hw_cfginit(); /* * Enable PCIE device. */ mv_pcib_enable(sc, port_id); /* * Memory management. */ err = mv_pcib_mem_init(sc); if (err) return (err); /* * Preliminary bus enumeration to find first linked devices and set * appropriate bus number from which should start the actual enumeration */ for (bus = 0; bus < PCI_BUSMAX; bus++) { for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) { reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4); if (reg0 == (~0U)) continue; /* no device */ else { sc->sc_busnr = bus; /* update bus number */ break; } } } if (sc->sc_mode == MV_MODE_ROOT) { err = mv_pcib_init(sc, sc->sc_busnr, mv_pcib_maxslots(sc->sc_dev)); if (err) goto error; device_add_child(self, "pci", -1); } else { sc->sc_devnr = 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS); device_add_child(self, "pci_ep", -1); } mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); return (bus_generic_attach(self)); error: /* XXX SYS_RES_ should be released here */ rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static void mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit) { uint32_t val; int timeout; if (sc->sc_skip_enable_procedure) goto pcib_enable_root_mode; /* * Check if PCIE device is enabled. */ if ((sc->sc_skip_enable_procedure == 0) && (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit))) { write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) & ~(CPU_CONTROL_PCIE_DISABLE(unit))); timeout = PCIE_LINK_TIMEOUT; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) { DELAY(1000); timeout -= 1000; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS); } } pcib_enable_root_mode: if (sc->sc_mode == MV_MODE_ROOT) { /* * Enable PCI bridge. */ val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND); val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val); } } static int mv_pcib_mem_init(struct mv_pcib_softc *sc) { int err; /* * Memory management. */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_mem_rman); if (err) return (err); sc->sc_io_rman.rm_type = RMAN_ARRAY; err = rman_init(&sc->sc_io_rman); if (err) { rman_fini(&sc->sc_mem_rman); return (err); } err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1); if (err) goto error; err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1); if (err) goto error; return (0); error: rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_io_rman); return (err); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } /* * The idea of this allocator is taken from ARM No-Cache memory * management code (sys/arm/arm/vm_machdep.c). */ static bus_addr_t pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask) { uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i += bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); /* * Need to init the BAR register with 0xffffffff before correct * value can be read. */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4); bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(sc, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4); if (width == 2) mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4, 0, 4); return (width); } static void mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func) { bus_addr_t io_base, mem_base; uint32_t io_limit, mem_limit; int secbus; io_base = sc->sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; /* Configure I/O decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); /* Configure memory decode registers */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); /* Disable memory prefetch decode */ mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SECBUS_1, 1); /* Configure buses behind the bridge */ mv_pcib_init(sc, secbus, PCI_SLOTMAX); } static int mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); error = mv_pcib_init_all_bars(sc, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_COMMAND, command, 1); /* Handle PCI-PCI bridges */ class = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_CLASS, 1); subclass = mv_pcib_read_config(sc->sc_dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; mv_pcib_init_bridge(sc, bus, slot, func); } } /* Enable all ABCD interrupts */ pcib_write_irq_mask(sc, (0xF << 24)); return (0); } static int mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; /* Program the base address registers */ while (bar < maxbar) { i = mv_pcib_init_bar(sc, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(sc->sc_dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static struct rman * mv_pcib_get_rman(device_t dev, int type, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif default: return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags)); } if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; } if ((start < sc->sc_mem_base) || (start + count - 1 != end) || (end > sc->sc_mem_base + sc->sc_mem_size - 1)) return (NULL); return (bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int mv_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif default: return (bus_generic_adjust_resource(dev, child, r, start, end)); } } static int -mv_pcib_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +mv_pcib_release_resource(device_t dev, device_t child, struct resource *res) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_release_resource(dev, child, type, - rid, res)); + return (bus_generic_rman_release_resource(dev, child, res)); #ifdef PCI_RES_BUS case PCI_RES_BUS: - return (pci_domain_release_bus(sc->ap_segment, child, rid, res)); + return (pci_domain_release_bus(sc->ap_segment, child, res)); #endif default: - return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, - type, rid, res)); + return (bus_generic_release_resource(dev, child, res)); } } static int mv_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(dev, child, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_activate_bus(sc->ap_segment, child, r)); #endif default: return (bus_generic_activate_resource(dev, child, r)); } } static int mv_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { #ifdef PCI_RES_BUS struct mv_pcib_softc *sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(dev, child, r)); #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->ap_segment, child, r)); #endif default: return (bus_generic_deactivate_resource(dev, child, r)); } } static int mv_pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = fdtbus_bs_tag; map->r_bushandle = start; map->r_size = length; return (0); } static int mv_pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (0); default: return (EINVAL); } } static int mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_busnr; return (0); case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); } return (ENOENT); } static int mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct mv_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busnr = value; return (0); } return (ENOENT); } static inline void pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask) { if (sc->sc_type != MV_TYPE_PCIE) return; bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask); } static void mv_pcib_hw_cfginit(void) { static int opened = 0; if (opened) return; mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; } static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t addr, data, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); data = ~0; switch (bytes) { case 1: data = bus_space_read_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3)); break; case 2: data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2))); break; case 4: data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh, cd)); break; } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { uint32_t addr, ca, cd; ca = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR; cd = (sc->sc_type != MV_TYPE_PCI) ? PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA; addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) | PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg); mtx_lock_spin(&pcicfg_mtx); bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_bst, sc->sc_bsh, cd + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_bst, sc->sc_bsh, cd + (reg & 2), htole16(data)); break; case 4: bus_space_write_4(sc->sc_bst, sc->sc_bsh, cd, htole32(data)); break; } mtx_unlock_spin(&pcicfg_mtx); } static int mv_pcib_maxslots(device_t dev) { struct mv_pcib_softc *sc = device_get_softc(dev); return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX); } static int mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func) { struct mv_pcib_softc *sc = device_get_softc(dev); uint32_t vendor, device; /* On platforms other than Armada38x, root link is always at slot 0 */ if (!sc->sc_enable_find_root_slot) return (slot == 0); vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR, PCIR_VENDOR_LENGTH); device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE, PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK; return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X); } static uint32_t mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return ~0 if link is inactive or trying to read from Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return (~0U); return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes)); } static void mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct mv_pcib_softc *sc = device_get_softc(dev); /* Return if link is inactive or trying to write to Root */ if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) & PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func)) return; mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes); } static int mv_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct mv_pcib_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[4]; int icells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (icells > 0) return (ofw_bus_map_intr(dev, iparent, icells, mintr)); /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc) { struct mv_pci_range io_space, mem_space; device_t dev; int error; dev = sc->sc_dev; if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) { device_printf(dev, "could not retrieve 'ranges' data\n"); return (error); } /* Configure CPU decoding windows */ error = decode_win_cpu_set(sc->sc_win_target, sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0); if (error < 0) { device_printf(dev, "could not set up CPU decode " "window for PCI IO\n"); return (ENXIO); } error = decode_win_cpu_set(sc->sc_win_target, sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len, mem_space.base_parent); if (error < 0) { device_printf(dev, "could not set up CPU decode " "windows for PCI MEM\n"); return (ENXIO); } sc->sc_io_base = io_space.base_parent; sc->sc_io_size = io_space.len; sc->sc_mem_base = mem_space.base_parent; sc->sc_mem_size = mem_space.len; return (0); } static int mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct mv_pcib_softc *sc; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); irq = irq - MSI_IRQ; /* validate parameters */ if (isclr(&sc->sc_msi_bitmap, irq)) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } mv_msi_data(irq, addr, data); debugf("%s: irq: %d addr: %jx data: %x\n", __func__, irq, *addr, *data); return (0); } static int mv_pcib_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct mv_pcib_softc *sc; u_int start = 0, i; sc = device_get_softc(dev); if (!sc->sc_msi_supported) return (ENOTSUP); if (powerof2(count) == 0 || count > MSI_IRQ_NUM) return (EINVAL); mtx_lock(&sc->sc_msi_mtx); for (start = 0; (start + count) < MSI_IRQ_NUM; start++) { for (i = start; i < start + count; i++) { if (isset(&sc->sc_msi_bitmap, i)) break; } if (i == start + count) break; } if ((start + count) == MSI_IRQ_NUM) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = start; i < start + count; i++) { setbit(&sc->sc_msi_bitmap, i); *irqs++ = MSI_IRQ + i; } debugf("%s: start: %x count: %x\n", __func__, start, count); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs) { struct mv_pcib_softc *sc; u_int i; sc = device_get_softc(dev); if(!sc->sc_msi_supported) return (ENOTSUP); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ); mtx_unlock(&sc->sc_msi_mtx); return (0); } diff --git a/sys/arm64/cavium/thunder_pcie_fdt.c b/sys/arm64/cavium/thunder_pcie_fdt.c index bf00688fb041..f173a28b637d 100644 --- a/sys/arm64/cavium/thunder_pcie_fdt.c +++ b/sys/arm64/cavium/thunder_pcie_fdt.c @@ -1,301 +1,301 @@ /* * Copyright (C) 2016 Cavium Inc. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_pcie_common.h" #include "pcib_if.h" #ifdef THUNDERX_PASS_1_1_ERRATA static struct resource * thunder_pcie_fdt_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int thunder_pcie_fdt_release_resource(device_t, device_t, - int, int, struct resource*); + struct resource*); #endif static int thunder_pcie_fdt_attach(device_t); static int thunder_pcie_fdt_probe(device_t); static int thunder_pcie_fdt_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static const struct ofw_bus_devinfo *thunder_pcie_ofw_get_devinfo(device_t, device_t); /* OFW bus interface */ struct thunder_pcie_ofw_devinfo { struct ofw_bus_devinfo di_dinfo; struct resource_list di_rl; }; static device_method_t thunder_pcie_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pcie_fdt_probe), DEVMETHOD(device_attach, thunder_pcie_fdt_attach), #ifdef THUNDERX_PASS_1_1_ERRATA DEVMETHOD(bus_alloc_resource, thunder_pcie_fdt_alloc_resource), DEVMETHOD(bus_release_resource, thunder_pcie_fdt_release_resource), #endif /* pcib interface */ DEVMETHOD(pcib_get_id, thunder_pcie_fdt_get_id), /* ofw interface */ DEVMETHOD(ofw_bus_get_devinfo, thunder_pcie_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(pcib, thunder_pcie_fdt_driver, thunder_pcie_fdt_methods, sizeof(struct generic_pcie_fdt_softc), generic_pcie_fdt_driver); DRIVER_MODULE(thunder_pcib, simplebus, thunder_pcie_fdt_driver, 0, 0); DRIVER_MODULE(thunder_pcib, ofwbus, thunder_pcie_fdt_driver, 0, 0); static const struct ofw_bus_devinfo * thunder_pcie_ofw_get_devinfo(device_t bus __unused, device_t child) { struct thunder_pcie_ofw_devinfo *di; di = device_get_ivars(child); return (&di->di_dinfo); } static void get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells) { *addr_cells = 2; /* Find address cells if present */ OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells)); *size_cells = 2; /* Find size cells if present */ OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells)); } static int thunder_pcie_ofw_bus_attach(device_t dev) { struct thunder_pcie_ofw_devinfo *di; device_t child; phandle_t parent, node; pcell_t addr_cells, size_cells; parent = ofw_bus_get_node(dev); if (parent > 0) { get_addr_size_cells(parent, &addr_cells, &size_cells); /* Iterate through all bus subordinates */ for (node = OF_child(parent); node > 0; node = OF_peer(node)) { /* Allocate and populate devinfo. */ di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) { free(di, M_DEVBUF); continue; } /* Initialize and populate resource list. */ resource_list_init(&di->di_rl); ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells, &di->di_rl); ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL); /* Add newbus device for this FDT node */ child = device_add_child(dev, NULL, -1); if (child == NULL) { resource_list_free(&di->di_rl); ofw_bus_gen_destroy_devinfo(&di->di_dinfo); free(di, M_DEVBUF); continue; } device_set_ivars(child, di); } } return (0); } static int thunder_pcie_fdt_probe(device_t dev) { /* Check if we're running on Cavium ThunderX */ if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0)) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic") || ofw_bus_is_compatible(dev, "cavium,thunder-pcie") || ofw_bus_is_compatible(dev, "cavium,pci-host-thunder-ecam")) { device_set_desc(dev, "Cavium Integrated PCI/PCI-E Controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int thunder_pcie_fdt_attach(device_t dev) { struct generic_pcie_fdt_softc *sc; sc = device_get_softc(dev); thunder_pcie_identify_ecam(dev, &sc->base.ecam); sc->base.coherent = 1; /* Attach OFW bus */ if (thunder_pcie_ofw_bus_attach(dev) != 0) return (ENXIO); return (pci_host_generic_fdt_attach(dev)); } static int thunder_pcie_fdt_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { phandle_t node; int bsf; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); node = ofw_bus_get_node(pci); if (OF_hasprop(node, "msi-map")) return (generic_pcie_get_id(pci, child, type, id)); bsf = pci_get_rid(child); *id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf; return (0); } #ifdef THUNDERX_PASS_1_1_ERRATA struct resource * thunder_pcie_fdt_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_fdt_softc *sc; struct thunder_pcie_ofw_devinfo *di; struct resource_list_entry *rle; int i; /* * For PCIe devices that do not have FDT nodes pass * the request to the core driver. */ if ((int)ofw_bus_get_node(child) <= 0) return (thunder_pcie_alloc_resource(dev, child, type, rid, start, end, count, flags)); /* For other devices use OFW method */ sc = device_get_softc(dev); if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* Find defaults for this rid */ rle = resource_list_find(&di->di_rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (i = 0; i < MAX_RANGES_TUPLES; i++) { if (start >= sc->base.ranges[i].phys_base && end < (sc->base.ranges[i].pci_base + sc->base.ranges[i].size)) { start -= sc->base.ranges[i].phys_base; start += sc->base.ranges[i].pci_base; end -= sc->base.ranges[i].phys_base; end += sc->base.ranges[i].pci_base; break; } } if (i == MAX_RANGES_TUPLES) { device_printf(dev, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int -thunder_pcie_fdt_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *res) +thunder_pcie_fdt_release_resource(device_t dev, device_t child, + struct resource *res) { if ((int)ofw_bus_get_node(child) <= 0) - return (pci_host_generic_core_release_resource(dev, child, type, - rid, res)); + return (pci_host_generic_core_release_resource(dev, child, + res)); - return (bus_generic_release_resource(dev, child, type, rid, res)); + return (bus_generic_release_resource(dev, child, res)); } #endif diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c index f7c3c5ee1c0a..dd6ef14f1a6e 100644 --- a/sys/arm64/cavium/thunder_pcie_pem.c +++ b/sys/arm64/cavium/thunder_pcie_pem.c @@ -1,965 +1,961 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* PCIe external MAC root complex driver (PEM) for Cavium Thunder SOC */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define THUNDER_PEM_DEVICE_ID 0xa020 #define THUNDER_PEM_VENDOR_ID 0x177d /* ThunderX specific defines */ #define THUNDER_PEMn_REG_BASE(unit) (0x87e0c0000000UL | ((unit) << 24)) #define PCIERC_CFG002 0x08 #define PCIERC_CFG006 0x18 #define PCIERC_CFG032 0x80 #define PCIERC_CFG006_SEC_BUS(reg) (((reg) >> 8) & 0xFF) #define PEM_CFG_RD_REG_ALIGN(reg) ((reg) & ~0x3) #define PEM_CFG_RD_REG_DATA(val) (((val) >> 32) & 0xFFFFFFFF) #define PEM_CFG_RD 0x30 #define PEM_CFG_LINK_MASK 0x3 #define PEM_CFG_LINK_RDY 0x3 #define PEM_CFG_SLIX_TO_REG(slix) ((slix) << 4) #define SBNUM_OFFSET 0x8 #define SBNUM_MASK 0xFF #define PEM_ON_REG 0x420 #define PEM_CTL_STATUS 0x0 #define PEM_LINK_ENABLE (1 << 4) #define PEM_LINK_DLLA (1 << 29) #define PEM_LINK_LT (1 << 27) #define PEM_BUS_SHIFT (24) #define PEM_SLOT_SHIFT (19) #define PEM_FUNC_SHIFT (16) #define SLIX_S2M_REGX_ACC 0x874001000000UL #define SLIX_S2M_REGX_ACC_SIZE 0x1000 #define SLIX_S2M_REGX_ACC_SPACING 0x001000000000UL #define SLI_BASE 0x880000000000UL #define SLI_WINDOW_SPACING 0x004000000000UL #define SLI_PCI_OFFSET 0x001000000000UL #define SLI_NODE_SHIFT (44) #define SLI_NODE_MASK (3) #define SLI_GROUP_SHIFT (40) #define SLI_ID_SHIFT (24) #define SLI_ID_MASK (7) #define SLI_PEMS_PER_GROUP (3) #define SLI_GROUPS_PER_NODE (2) #define SLI_PEMS_PER_NODE (SLI_PEMS_PER_GROUP * SLI_GROUPS_PER_NODE) #define SLI_ACC_REG_CNT (256) /* * Each PEM device creates its own bus with * own address translation, so we can adjust bus addresses * as we want. To support 32-bit cards let's assume * PCI window assignment looks as following: * * 0x00000000 - 0x000FFFFF IO * 0x00100000 - 0xFFFFFFFF Memory */ #define PCI_IO_BASE 0x00000000UL #define PCI_IO_SIZE 0x00100000UL #define PCI_MEMORY_BASE PCI_IO_SIZE #define PCI_MEMORY_SIZE 0xFFF00000UL #define RID_PEM_SPACE 1 static int thunder_pem_activate_resource(device_t, device_t, struct resource *); static int thunder_pem_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static struct resource * thunder_pem_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int thunder_pem_alloc_msi(device_t, device_t, int, int, int *); static int thunder_pem_release_msi(device_t, device_t, int, int *); static int thunder_pem_alloc_msix(device_t, device_t, int *); static int thunder_pem_release_msix(device_t, device_t, int); static int thunder_pem_map_msi(device_t, device_t, int, uint64_t *, uint32_t *); static int thunder_pem_get_id(device_t, device_t, enum pci_id_type, uintptr_t *); static int thunder_pem_attach(device_t); static int thunder_pem_deactivate_resource(device_t, device_t, struct resource *); static int thunder_pem_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int thunder_pem_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static bus_dma_tag_t thunder_pem_get_dma_tag(device_t, device_t); static int thunder_pem_detach(device_t); static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *, int); static int thunder_pem_link_init(struct thunder_pem_softc *); static int thunder_pem_maxslots(device_t); static int thunder_pem_probe(device_t); static uint32_t thunder_pem_read_config(device_t, u_int, u_int, u_int, u_int, int); static int thunder_pem_read_ivar(device_t, device_t, int, uintptr_t *); static void thunder_pem_release_all(device_t); -static int thunder_pem_release_resource(device_t, device_t, int, int, - struct resource *); +static int thunder_pem_release_resource(device_t, device_t, struct resource *); static struct rman * thunder_pem_get_rman(device_t, int, u_int); static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *, int, int); static void thunder_pem_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int thunder_pem_write_ivar(device_t, device_t, int, uintptr_t); /* Global handlers for SLI interface */ static bus_space_handle_t sli0_s2m_regx_base = 0; static bus_space_handle_t sli1_s2m_regx_base = 0; static device_method_t thunder_pem_methods[] = { /* Device interface */ DEVMETHOD(device_probe, thunder_pem_probe), DEVMETHOD(device_attach, thunder_pem_attach), DEVMETHOD(device_detach, thunder_pem_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, thunder_pem_read_ivar), DEVMETHOD(bus_write_ivar, thunder_pem_write_ivar), DEVMETHOD(bus_get_rman, thunder_pem_get_rman), DEVMETHOD(bus_alloc_resource, thunder_pem_alloc_resource), DEVMETHOD(bus_release_resource, thunder_pem_release_resource), DEVMETHOD(bus_adjust_resource, thunder_pem_adjust_resource), DEVMETHOD(bus_activate_resource, thunder_pem_activate_resource), DEVMETHOD(bus_deactivate_resource, thunder_pem_deactivate_resource), DEVMETHOD(bus_map_resource, thunder_pem_map_resource), DEVMETHOD(bus_unmap_resource, thunder_pem_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, thunder_pem_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, thunder_pem_maxslots), DEVMETHOD(pcib_read_config, thunder_pem_read_config), DEVMETHOD(pcib_write_config, thunder_pem_write_config), DEVMETHOD(pcib_alloc_msix, thunder_pem_alloc_msix), DEVMETHOD(pcib_release_msix, thunder_pem_release_msix), DEVMETHOD(pcib_alloc_msi, thunder_pem_alloc_msi), DEVMETHOD(pcib_release_msi, thunder_pem_release_msi), DEVMETHOD(pcib_map_msi, thunder_pem_map_msi), DEVMETHOD(pcib_get_id, thunder_pem_get_id), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, thunder_pem_driver, thunder_pem_methods, sizeof(struct thunder_pem_softc)); extern struct bus_space memmap_bus; DRIVER_MODULE(thunder_pem, pci, thunder_pem_driver, 0, 0); MODULE_DEPEND(thunder_pem, pci, 1, 1, 1); static int thunder_pem_maxslots(device_t dev) { #if 0 /* max slots per bus acc. to standard */ return (PCI_SLOTMAX); #else /* * ARM64TODO Workaround - otherwise an em(4) interface appears to be * present on every PCI function on the bus to which it is connected */ return (0); #endif } static int thunder_pem_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct thunder_pem_softc *sc; int secondary_bus = 0; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { secondary_bus = thunder_pem_config_reg_read(sc, PCIERC_CFG006); *result = PCIERC_CFG006_SEC_BUS(secondary_bus); return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->id; return (0); } return (ENOENT); } static int thunder_pem_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static int thunder_pem_activate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->id, child, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_activate_resource(dev, child, r)); default: return (bus_generic_activate_resource(dev, child, r)); } } static int thunder_pem_deactivate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->id, child, r)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_deactivate_resource(dev, child, r)); default: return (bus_generic_deactivate_resource(dev, child, r)); } } static int thunder_pem_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct thunder_pem_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(dev); start = range_addr_pci_to_phys(sc->ranges, start); error = bus_space_map(&memmap_bus, start, length, 0, &map->r_bushandle); if (error) return (error); map->r_bustag = &memmap_bus; map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int thunder_pem_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } static int thunder_pem_adjust_resource(device_t dev, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->id, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(dev, child, res, start, end)); default: return (bus_generic_adjust_resource(dev, child, res, start, end)); } } static bus_dma_tag_t thunder_pem_get_dma_tag(device_t dev, device_t child) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static int thunder_pem_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSI(device_get_parent(bus), child, count, maxcount, irqs)); } static int thunder_pem_release_msi(device_t pci, device_t child, int count, int *irqs) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSI(device_get_parent(bus), child, count, irqs)); } static int thunder_pem_alloc_msix(device_t pci, device_t child, int *irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_ALLOC_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_release_msix(device_t pci, device_t child, int irq) { device_t bus; bus = device_get_parent(pci); return (PCIB_RELEASE_MSIX(device_get_parent(bus), child, irq)); } static int thunder_pem_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { device_t bus; bus = device_get_parent(pci); return (PCIB_MAP_MSI(device_get_parent(bus), child, irq, addr, data)); } static int thunder_pem_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { int bsf; int pem; if (type != PCI_ID_MSI) return (pcib_get_id(pci, child, type, id)); bsf = pci_get_rid(child); /* PEM (PCIe MAC/root complex) number is equal to domain */ pem = pci_get_domain(child); /* * Set appropriate device ID (passed by the HW along with * the transaction to memory) for different root complex * numbers using hard-coded domain portion for each group. */ if (pem < 3) *id = (0x1 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 6) *id = (0x3 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 9) *id = (0x9 << PCI_RID_DOMAIN_SHIFT) | bsf; else if (pem < 12) *id = (0xB << PCI_RID_DOMAIN_SHIFT) | bsf; else return (ENXIO); return (0); } static int thunder_pem_identify(device_t dev) { struct thunder_pem_softc *sc; rman_res_t start; sc = device_get_softc(dev); start = rman_get_start(sc->reg); /* Calculate PEM designations from its address */ sc->node = (start >> SLI_NODE_SHIFT) & SLI_NODE_MASK; sc->id = ((start >> SLI_ID_SHIFT) & SLI_ID_MASK) + (SLI_PEMS_PER_NODE * sc->node); sc->sli = sc->id % SLI_PEMS_PER_GROUP; sc->sli_group = (sc->id / SLI_PEMS_PER_GROUP) % SLI_GROUPS_PER_NODE; sc->sli_window_base = SLI_BASE | (((uint64_t)sc->node) << SLI_NODE_SHIFT) | ((uint64_t)sc->sli_group << SLI_GROUP_SHIFT); sc->sli_window_base += SLI_WINDOW_SPACING * sc->sli; return (0); } static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *sc, int sli_group, int slix) { uint64_t regval; bus_space_handle_t handle = 0; KASSERT(slix >= 0 && slix <= SLI_ACC_REG_CNT, ("Invalid SLI index")); if (sli_group == 0) handle = sli0_s2m_regx_base; else if (sli_group == 1) handle = sli1_s2m_regx_base; else device_printf(sc->dev, "SLI group is not correct\n"); if (handle) { /* Clear lower 32-bits of the SLIx register */ regval = bus_space_read_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix)); regval &= ~(0xFFFFFFFFUL); bus_space_write_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix), regval); } } static int thunder_pem_link_init(struct thunder_pem_softc *sc) { uint64_t regval; /* check whether PEM is safe to access. */ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_ON_REG); if ((regval & PEM_CFG_LINK_MASK) != PEM_CFG_LINK_RDY) { device_printf(sc->dev, "PEM%d is not ON\n", sc->id); return (ENXIO); } regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS); regval |= PEM_LINK_ENABLE; bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS, regval); /* Wait 1ms as per Cavium specification */ DELAY(1000); regval = thunder_pem_config_reg_read(sc, PCIERC_CFG032); if (((regval & PEM_LINK_DLLA) == 0) || ((regval & PEM_LINK_LT) != 0)) { device_printf(sc->dev, "PCIe RC: Port %d Link Timeout\n", sc->id); return (ENXIO); } return (0); } static int thunder_pem_init(struct thunder_pem_softc *sc) { int i, retval = 0; retval = thunder_pem_link_init(sc); if (retval) { device_printf(sc->dev, "%s failed\n", __func__); return retval; } /* To support 32-bit PCIe devices, set S2M_REGx_ACC[BA]=0x0 */ for (i = 0; i < SLI_ACC_REG_CNT; i++) { thunder_pem_slix_s2m_regx_acc_modify(sc, sc->sli_group, i); } return (retval); } static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *sc, int reg) { uint64_t data; /* Write to ADDR register */ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, PEM_CFG_RD_REG_ALIGN(reg)); bus_space_barrier(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, 8, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Read from DATA register */ data = PEM_CFG_RD_REG_DATA(bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD)); return (data); } static uint32_t thunder_pem_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint64_t offset; uint32_t data; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: data = bus_space_read_1(t, h, reg); break; case 2: data = le16toh(bus_space_read_2(t, h, reg)); break; case 4: data = le32toh(bus_space_read_4(t, h, reg)); break; default: data = ~0U; break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); return (data); } static void thunder_pem_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { uint64_t offset; struct thunder_pem_softc *sc; bus_space_tag_t t; bus_space_handle_t h; if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; sc = device_get_softc(dev); /* Calculate offset */ offset = (bus << PEM_BUS_SHIFT) | (slot << PEM_SLOT_SHIFT) | (func << PEM_FUNC_SHIFT); t = sc->reg_bst; h = sc->pem_sli_base; bus_space_map(sc->reg_bst, sc->sli_window_base + offset, PCIE_REGMAX, 0, &h); switch (bytes) { case 1: bus_space_write_1(t, h, reg, val); break; case 2: bus_space_write_2(t, h, reg, htole16(val)); break; case 4: bus_space_write_4(t, h, reg, htole32(val)); break; default: break; } bus_space_unmap(sc->reg_bst, h, PCIE_REGMAX); } static struct resource * thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct thunder_pem_softc *sc = device_get_softc(dev); struct resource *res; device_t parent_dev; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->id, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); return (BUS_ALLOC_RESOURCE(parent_dev, dev, type, rid, start, end, count, flags)); } if (!RMAN_IS_DEFAULT_RANGE(start, end)) { /* * We might get PHYS addresses here inherited from EFI. * Convert to PCI if necessary. */ if (range_addr_is_phys(sc->ranges, start, count)) { start = range_addr_phys_to_pci(sc->ranges, start); end = start + count - 1; } } if (bootverbose) { device_printf(dev, "thunder_pem_alloc_resource: start=%#lx, end=%#lx, count=%#lx\n", start, end, count); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL && bootverbose) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016lx, end=%016lx, count=%016lx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int -thunder_pem_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +thunder_pem_release_resource(device_t dev, device_t child, struct resource *res) { device_t parent_dev; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct thunder_pem_softc *sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_release_bus(sc->id, child, rid, res)); + return (pci_domain_release_bus(sc->id, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_release_resource(dev, child, type, - rid, res)); + return (bus_generic_rman_release_resource(dev, child, res)); default: /* Find parent device. On ThunderX we know an exact path. */ parent_dev = device_get_parent(device_get_parent(dev)); - return (BUS_RELEASE_RESOURCE(parent_dev, child, - type, rid, res)); + return (BUS_RELEASE_RESOURCE(parent_dev, child, res)); } } static struct rman * thunder_pem_get_rman(device_t bus, int type, u_int flags) { struct thunder_pem_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } static int thunder_pem_probe(device_t dev) { uint16_t pci_vendor_id; uint16_t pci_device_id; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); if ((pci_vendor_id == THUNDER_PEM_VENDOR_ID) && (pci_device_id == THUNDER_PEM_DEVICE_ID)) { device_set_desc_copy(dev, THUNDER_PEM_DESC); return (0); } return (ENXIO); } static int thunder_pem_attach(device_t dev) { struct resource_map_request req; struct resource_map map; devclass_t pci_class; device_t parent; struct thunder_pem_softc *sc; int error; int rid; int tuple; uint64_t base, size; struct rman *rman; sc = device_get_softc(dev); sc->dev = dev; /* Allocate memory for resource */ pci_class = devclass_find("pci"); parent = device_get_parent(dev); if (device_get_devclass(parent) == pci_class) rid = PCIR_BAR(0); else rid = RID_PEM_SPACE; sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED); if (sc->reg == NULL) { device_printf(dev, "Failed to allocate resource\n"); return (ENXIO); } resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->reg, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->reg, &map); sc->reg_bst = rman_get_bustag(sc->reg); sc->reg_bsh = rman_get_bushandle(sc->reg); /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* Map SLI, do it only once */ if (!sli0_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC, SLIX_S2M_REGX_ACC_SIZE, 0, &sli0_s2m_regx_base); } if (!sli1_s2m_regx_base) { bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC + SLIX_S2M_REGX_ACC_SPACING, SLIX_S2M_REGX_ACC_SIZE, 0, &sli1_s2m_regx_base); } if ((sli0_s2m_regx_base == 0) || (sli1_s2m_regx_base == 0)) { device_printf(dev, "bus_space_map failed to map slix_s2m_regx_base\n"); goto fail; } /* Identify PEM */ if (thunder_pem_identify(dev) != 0) goto fail; /* Initialize rman and allocate regions */ sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PEM PCIe Memory"; error = rman_init(&sc->mem_rman); if (error != 0) { device_printf(dev, "memory rman_init() failed. error = %d\n", error); goto fail; } sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PEM PCIe IO"; error = rman_init(&sc->io_rman); if (error != 0) { device_printf(dev, "IO rman_init() failed. error = %d\n", error); goto fail_mem; } /* * We ignore the values that may have been provided in FDT * and configure ranges according to the below formula * for all types of devices. This is because some DTBs provided * by EFI do not have proper ranges property or don't have them * at all. */ /* Fill memory window */ sc->ranges[0].pci_base = PCI_MEMORY_BASE; sc->ranges[0].size = PCI_MEMORY_SIZE; sc->ranges[0].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[0].pci_base; sc->ranges[0].flags = SYS_RES_MEMORY; /* Fill IO window */ sc->ranges[1].pci_base = PCI_IO_BASE; sc->ranges[1].size = PCI_IO_SIZE; sc->ranges[1].phys_base = sc->sli_window_base + SLI_PCI_OFFSET + sc->ranges[1].pci_base; sc->ranges[1].flags = SYS_RES_IOPORT; for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; if (size == 0) continue; /* empty range element */ rman = thunder_pem_get_rman(dev, sc->ranges[tuple].flags, 0); if (rman != NULL) error = rman_manage_region(rman, base, base + size - 1); else error = EINVAL; if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); rman_fini(&sc->mem_rman); return (error); } if (bootverbose) { device_printf(dev, "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Flags:0x%jx\n", sc->ranges[tuple].pci_base, sc->ranges[tuple].phys_base, sc->ranges[tuple].size, sc->ranges[tuple].flags); } } if (thunder_pem_init(sc)) { device_printf(dev, "Failure during PEM init\n"); goto fail_io; } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail_io: rman_fini(&sc->io_rman); fail_mem: rman_fini(&sc->mem_rman); fail: bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); return (ENXIO); } static void thunder_pem_release_all(device_t dev) { struct thunder_pem_softc *sc; sc = device_get_softc(dev); rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); if (sc->reg != NULL) bus_free_resource(dev, SYS_RES_MEMORY, sc->reg); } static int thunder_pem_detach(device_t dev) { thunder_pem_release_all(dev); return (0); } diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index df2017e69a86..ad1af9373fb7 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4650 +1,4649 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != rman_get_type(r) || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int -acpi_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +acpi_release_resource(device_t bus, device_t child, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) - return (bus_generic_rman_release_resource(bus, child, type, rid, r)); + return (bus_generic_rman_release_resource(bus, child, r)); - return (bus_generic_rl_release_resource(bus, child, type, rid, r)); + return (bus_generic_rl_release_resource(bus, child, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int acpi_activate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_activate_resource(bus, child, r)); return (bus_generic_activate_resource(bus, child, r)); } static int acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_deactivate_resource(bus, child, r)); return (bus_generic_deactivate_resource(bus, child, r)); } static int acpi_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_generic_map_resource(bus, child, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { if (acpi_is_resource_managed(bus, r)) { r = acpi_managed_resource(bus, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(bus, child, r, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, -1); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c index fdf8e84d14e0..4c3d62a66d58 100644 --- a/sys/dev/acpica/acpi_pcib_acpi.c +++ b/sys/dev/acpica/acpi_pcib_acpi.c @@ -1,829 +1,828 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_ACPI") struct acpi_hpcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; bus_dma_tag_t ap_dma_tag; int ap_flags; uint32_t ap_osc_ctl; int ap_segment; /* PCI domain */ int ap_bus; /* bios-assigned bus number */ int ap_addr; /* device/func of PCI-Host bridge */ ACPI_BUFFER ap_prt; /* interrupt routing table */ #ifdef NEW_PCIB struct pcib_host_resources ap_host_res; #endif }; static int acpi_pcib_acpi_probe(device_t bus); static int acpi_pcib_acpi_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin); static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); #ifdef NEW_PCIB static int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); #ifdef PCI_RES_BUS static int acpi_pcib_acpi_release_resource(device_t dev, - device_t child, int type, int rid, - struct resource *r); + device_t child, struct resource *r); static int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, struct resource *r); static int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, struct resource *r); #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature); static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pcib_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_acpi_probe), DEVMETHOD(device_attach, acpi_pcib_acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_release_resource, acpi_pcib_acpi_release_resource), DEVMETHOD(bus_activate_resource, acpi_pcib_acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_pcib_acpi_deactivate_resource), #else DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt), DEVMETHOD(pcib_alloc_msi, acpi_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, acpi_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, acpi_pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep), DEVMETHOD(pcib_request_feature, acpi_pcib_request_feature), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, acpi_pcib_acpi_driver, acpi_pcib_acpi_methods, sizeof(struct acpi_hpcib_softc)); DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_acpi_probe(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; int root; if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ENXIO); root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0; AcpiOsFree(devinfo); if (!root || pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI Host-PCI bridge"); return (0); } #ifdef NEW_PCIB static ACPI_STATUS acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context) { struct acpi_hpcib_softc *sc; UINT64 length, min, max; u_int flags; int error, type; sc = context; switch (res->Type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_DEPENDENT: panic("host bridge has depenedent resources"); case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: if (res->Data.Address.ProducerConsumer != ACPI_PRODUCER) break; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: min = res->Data.Address16.Address.Minimum; max = res->Data.Address16.Address.Maximum; length = res->Data.Address16.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS32: min = res->Data.Address32.Address.Minimum; max = res->Data.Address32.Address.Maximum; length = res->Data.Address32.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS64: min = res->Data.Address64.Address.Minimum; max = res->Data.Address64.Address.Maximum; length = res->Data.Address64.Address.AddressLength; break; default: KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, ("should never happen")); min = res->Data.ExtAddress64.Address.Minimum; max = res->Data.ExtAddress64.Address.Maximum; length = res->Data.ExtAddress64.Address.AddressLength; break; } if (length == 0) break; if (min + length - 1 != max && (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED || res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED)) break; flags = 0; switch (res->Data.Address.ResourceType) { case ACPI_MEMORY_RANGE: type = SYS_RES_MEMORY; if (res->Type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) { if (res->Data.Address.Info.Mem.Caching == ACPI_PREFETCHABLE_MEMORY) flags |= RF_PREFETCHABLE; } else { /* * XXX: Parse prefetch flag out of * TypeSpecific. */ } break; case ACPI_IO_RANGE: type = SYS_RES_IOPORT; break; #ifdef PCI_RES_BUS case ACPI_BUS_NUMBER_RANGE: type = PCI_RES_BUS; break; #endif default: return (AE_OK); } if (min + length - 1 != max) device_printf(sc->ap_dev, "Length mismatch for %d range: %jx vs %jx\n", type, (uintmax_t)(max - min + 1), (uintmax_t)length); #ifdef __i386__ if (min > ULONG_MAX) { device_printf(sc->ap_dev, "Ignoring %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); break; } if (max > ULONG_MAX) { device_printf(sc->ap_dev, "Truncating end of %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); max = ULONG_MAX; } #endif error = pcib_host_res_decodes(&sc->ap_host_res, type, min, max, flags); if (error) panic("Failed to manage %d range (%#jx-%#jx): %d", type, (uintmax_t)min, (uintmax_t)max, error); break; default: break; } return (AE_OK); } #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static bool get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp, rman_res_t *endp) { struct resource_list_entry *rle; rle = resource_list_find(&sc->ap_host_res.hr_rl, PCI_RES_BUS, 0); if (rle == NULL) return (false); *startp = rle->start; *endp = rle->end; return (true); } #endif static int acpi_pcib_osc(struct acpi_hpcib_softc *sc, uint32_t osc_ctl) { ACPI_STATUS status; uint32_t cap_set[3]; static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = { 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40, 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66 }; /* * Don't invoke _OSC if a control is already granted. * However, always invoke _OSC during attach when 0 is passed. */ if (osc_ctl != 0 && (sc->ap_osc_ctl & osc_ctl) == osc_ctl) return (0); /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */ cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF | PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI; /* Active State Power Management, Clock Power Management Capability */ if (pci_enable_aspm) cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM | PCIM_OSC_SUPPORT_CPMC; /* Control Field */ cap_set[PCI_OSC_CTL] = sc->ap_osc_ctl | osc_ctl; status = acpi_EvaluateOSC(sc->ap_handle, pci_host_bridge_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { sc->ap_osc_ctl |= osc_ctl; return (0); } device_printf(sc->ap_dev, "_OSC failed: %s\n", AcpiFormatException(status)); return (EIO); } /* * _OSC may return an error in the status word, but will * update the control mask always. _OSC should not revoke * previously-granted controls. */ if ((cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) != sc->ap_osc_ctl) device_printf(sc->ap_dev, "_OSC revoked %#x\n", (cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) ^ sc->ap_osc_ctl); sc->ap_osc_ctl = cap_set[PCI_OSC_CTL]; if ((sc->ap_osc_ctl & osc_ctl) != osc_ctl) return (EIO); return (0); } static int acpi_pcib_acpi_attach(device_t dev) { struct acpi_hpcib_softc *sc; ACPI_STATUS status; static int bus0_seen = 0; u_int slot, func, busok; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *bus_res; rman_res_t end, start; int rid; #endif int error, domain; uint8_t busno; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. */ if (!acpi_DeviceIsPresent(dev)) return (ENXIO); acpi_pcib_osc(sc, 0); /* * Get our segment number by evaluating _SEG. * It's OK for this to not exist. */ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* If it's not found, assume 0. */ sc->ap_segment = 0; } /* * Get the address (device and function) of the associated * PCI-Host bridge device from _ADR. Assume we don't have one if * it doesn't exist. */ status = acpi_GetInteger(sc->ap_handle, "_ADR", &sc->ap_addr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) device_printf(dev, "could not evaluate _ADR - %s\n", AcpiFormatException(status)); sc->ap_addr = -1; } #ifdef NEW_PCIB /* * Determine which address ranges this bridge decodes and setup * resource managers for those ranges. */ if (pcib_host_res_init(sc->ap_dev, &sc->ap_host_res) != 0) panic("failed to init hostb resources"); if (!acpi_disabled("hostres")) { status = AcpiWalkResources(sc->ap_handle, "_CRS", acpi_pcib_producer_handler, sc); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) device_printf(sc->ap_dev, "failed to parse resources: %s\n", AcpiFormatException(status)); } #endif /* * Get our base bus number by evaluating _BBN. * If this doesn't work, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? * XXX It seems many BIOS's with multiple Host-PCI bridges do not set * _BBN correctly. They set _BBN to zero for all bridges. Thus, * if _BBN is zero and PCI bus 0 already exists, we try to read our * bus number from the configuration registers at address _ADR. * We only do this for domain/segment 0 in the hopes that this is * only needed for old single-domain machines. */ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return (ENXIO); } else { /* If it's not found, assume 0. */ sc->ap_bus = 0; } } /* * If this is segment 0, the bus is zero, and PCI bus 0 already * exists, read the bus number via PCI config space. */ busok = 1; if (sc->ap_segment == 0 && sc->ap_bus == 0 && bus0_seen) { busok = 0; if (sc->ap_addr != -1) { /* XXX: We assume bus 0. */ slot = ACPI_ADR_PCI_SLOT(sc->ap_addr); func = ACPI_ADR_PCI_FUNC(sc->ap_addr); if (bootverbose) device_printf(dev, "reading config registers from 0:%d:%d\n", slot, func); if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0) device_printf(dev, "couldn't read bus number from cfg space\n"); else { sc->ap_bus = busno; busok = 1; } } } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * If nothing else worked, hope that ACPI at least lays out the * Host-PCI bridges in order and that as a result the next free * bus number is our bus number. */ if (busok == 0) { /* * If we have a region of bus numbers, use the first * number for our bus. */ if (get_decoded_bus_range(sc, &start, &end)) sc->ap_bus = start; else { rid = 0; bus_res = pci_domain_alloc_bus(sc->ap_segment, dev, &rid, 0, PCI_BUSMAX, 1, 0); if (bus_res == NULL) { device_printf(dev, "could not allocate bus number\n"); pcib_host_res_free(dev, &sc->ap_host_res); return (ENXIO); } sc->ap_bus = rman_get_start(bus_res); - pci_domain_release_bus(sc->ap_segment, dev, rid, bus_res); + pci_domain_release_bus(sc->ap_segment, dev, bus_res); } } else { /* * If there is a decoded bus range, assume the bus number is * the first value in the range. Warn if _BBN doesn't match. */ if (get_decoded_bus_range(sc, &start, &end)) { if (sc->ap_bus != start) { device_printf(dev, "WARNING: BIOS configured bus number (%d) is " "not within decoded bus number range " "(%ju - %ju).\n", sc->ap_bus, (uintmax_t)start, (uintmax_t)end); device_printf(dev, "Using range start (%ju) as bus number.\n", (uintmax_t)start); sc->ap_bus = start; } } } #else /* * If nothing else worked, hope that ACPI at least lays out the * host-PCI bridges in order and that as a result our unit number * is actually our bus number. There are several reasons this * might not be true. */ if (busok == 0) { sc->ap_bus = device_get_unit(dev); device_printf(dev, "trying bus number %d\n", sc->ap_bus); } #endif /* If this is bus 0 on segment 0, note that it has been seen already. */ if (sc->ap_segment == 0 && sc->ap_bus == 0) bus0_seen = 1; acpi_pcib_fetch_prt(dev, &sc->ap_prt); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag); if (error != 0) goto errout; error = bus_get_domain(dev, &domain); if (error == 0) error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain); /* Don't fail to attach if the domain can't be queried or set. */ error = 0; bus_generic_probe(dev); if (device_add_child(dev, "pci", -1) == NULL) { bus_dma_tag_destroy(sc->ap_dma_tag); sc->ap_dma_tag = NULL; error = ENXIO; goto errout; } return (bus_generic_attach(dev)); errout: device_printf(device_get_parent(dev), "couldn't attach pci bus\n"); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pcib_host_res_free(dev, &sc->ap_host_res); #endif return (error); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->ap_segment; return (0); case PCIB_IVAR_BUS: *result = sc->ap_bus; return (0); case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)sc->ap_flags; return (0); } return (ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->ap_bus = value; return (0); case ACPI_IVAR_HANDLE: sc->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: sc->ap_flags = (int)value; return (0); } return (ENOENT); } static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); return (pci_cfgregread(sc->ap_segment, bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); pci_cfgregwrite(sc->ap_segment, bus, slot, func, reg, data, bytes); } static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_hpcib_softc *sc = device_get_softc(pcib); return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { struct acpi_hpcib_softc *sc; device_t bus, hostb; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); sc = device_get_softc(pcib); if (sc->ap_addr == -1) return (0); /* XXX: Assumes all bridges are on bus 0. */ hostb = pci_find_dbsf(sc->ap_segment, 0, ACPI_ADR_PCI_SLOT(sc->ap_addr), ACPI_ADR_PCI_FUNC(sc->ap_addr)); if (hostb != NULL) pci_ht_map_msi(hostb, *addr); return (0); } struct resource * acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef NEW_PCIB struct acpi_hpcib_softc *sc; struct resource *res; #endif #if defined(__i386__) || defined(__amd64__) start = hostb_alloc_start(type, start, end, count); #endif #ifdef NEW_PCIB sc = device_get_softc(dev); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); #endif res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end, count, flags); /* * XXX: If this is a request for a specific range, assume it is * correct and pass it up to the parent. What we probably want to * do long-term is explicitly trust any firmware-configured * resources during the initial bus scan on boot and then disable * this after that. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); return (res); #else return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); #endif } #ifdef NEW_PCIB int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); #ifdef PCI_RES_BUS if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); #endif return (pcib_host_res_adjust(&sc->ap_host_res, child, r, start, end)); } #ifdef PCI_RES_BUS int -acpi_pcib_acpi_release_resource(device_t dev, device_t child, int type, int rid, +acpi_pcib_acpi_release_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); - if (type == PCI_RES_BUS) - return (pci_domain_release_bus(sc->ap_segment, child, rid, r)); - return (bus_generic_release_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_release_bus(sc->ap_segment, child, r)); + return (bus_generic_release_resource(dev, child, r)); } int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_activate_bus(sc->ap_segment, child, r)); return (bus_generic_activate_resource(dev, child, r)); } int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->ap_segment, child, r)); return (bus_generic_deactivate_resource(dev, child, r)); } #endif #endif static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature) { uint32_t osc_ctl; struct acpi_hpcib_softc *sc; sc = device_get_softc(pcib); switch (feature) { case PCI_FEATURE_HP: osc_ctl = PCIM_OSC_CTL_PCIE_HP; break; case PCI_FEATURE_AER: osc_ctl = PCIM_OSC_CTL_PCIE_AER; break; default: return (EINVAL); } return (acpi_pcib_osc(sc, osc_ctl)); } static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child) { struct acpi_hpcib_softc *sc; sc = device_get_softc(bus); return (sc->ap_dma_tag); } diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c index a83189ca7bc8..df977889a9c0 100644 --- a/sys/dev/agp/agp_i810.c +++ b/sys/dev/agp/agp_i810.c @@ -1,2372 +1,2372 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Doug Rabson * Copyright (c) 2000 Ruslan Ermilov * Copyright (c) 2011 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Fixes for 830/845G support: David Dawes * 852GM/855GM/865G support added by David Dawes * * This is generic Intel GTT handling code, morphed from the AGP * bridge code. */ #include #if 0 #define KTR_AGP_I810 KTR_DEV #else #define KTR_AGP_I810 0 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DECLARE(M_AGP); struct agp_i810_match; static int agp_i810_check_active(device_t bridge_dev); static int agp_i830_check_active(device_t bridge_dev); static int agp_i915_check_active(device_t bridge_dev); static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match); static void agp_i810_dump_regs(device_t dev); static void agp_i830_dump_regs(device_t dev); static void agp_i855_dump_regs(device_t dev); static void agp_i915_dump_regs(device_t dev); static void agp_i965_dump_regs(device_t dev); static int agp_i810_get_stolen_size(device_t dev); static int agp_i830_get_stolen_size(device_t dev); static int agp_i915_get_stolen_size(device_t dev); static int agp_i810_get_gtt_mappable_entries(device_t dev); static int agp_i830_get_gtt_mappable_entries(device_t dev); static int agp_i915_get_gtt_mappable_entries(device_t dev); static int agp_i810_get_gtt_total_entries(device_t dev); static int agp_i965_get_gtt_total_entries(device_t dev); static int agp_gen5_get_gtt_total_entries(device_t dev); static int agp_i810_install_gatt(device_t dev); static int agp_i830_install_gatt(device_t dev); static int agp_i965_install_gatt(device_t dev); static int agp_g4x_install_gatt(device_t dev); static void agp_i810_deinstall_gatt(device_t dev); static void agp_i830_deinstall_gatt(device_t dev); static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags); static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte); static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte); static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index); static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index); static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index); static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index); static int agp_i810_set_aperture(device_t dev, u_int32_t aperture); static int agp_i830_set_aperture(device_t dev, u_int32_t aperture); static int agp_i915_set_aperture(device_t dev, u_int32_t aperture); static int agp_i810_chipset_flush_setup(device_t dev); static int agp_i915_chipset_flush_setup(device_t dev); static int agp_i965_chipset_flush_setup(device_t dev); static void agp_i810_chipset_flush_teardown(device_t dev); static void agp_i915_chipset_flush_teardown(device_t dev); static void agp_i965_chipset_flush_teardown(device_t dev); static void agp_i810_chipset_flush(device_t dev); static void agp_i830_chipset_flush(device_t dev); static void agp_i915_chipset_flush(device_t dev); enum { CHIP_I810, /* i810/i815 */ CHIP_I830, /* 830M/845G */ CHIP_I855, /* 852GM/855GM/865G */ CHIP_I915, /* 915G/915GM */ CHIP_I965, /* G965 */ CHIP_G33, /* G33/Q33/Q35 */ CHIP_IGD, /* Pineview */ CHIP_G4X, /* G45/Q45 */ }; /* The i810 through i855 have the registers at BAR 1, and the GATT gets * allocated by us. The i915 has registers in BAR 0 and the GATT is at the * start of the stolen memory, and should only be accessed by the OS through * BAR 3. The G965 has registers and GATT in the same BAR (0) -- first 512KB * is registers, second 512KB is GATT. */ static struct resource_spec agp_i810_res_spec[] = { { SYS_RES_MEMORY, AGP_I810_MMADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i915_res_spec[] = { { SYS_RES_MEMORY, AGP_I915_MMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_I915_GTTADR, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec agp_i965_res_spec[] = { { SYS_RES_MEMORY, AGP_I965_GTTMMADR, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_MEMORY, AGP_I965_APBASE, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; struct agp_i810_softc { struct agp_softc agp; u_int32_t initial_aperture; /* aperture size at startup */ struct agp_gatt *gatt; u_int32_t dcache_size; /* i810 only */ u_int32_t stolen; /* number of i830/845 gtt entries for stolen memory */ u_int stolen_size; /* BIOS-reserved graphics memory */ u_int gtt_total_entries; /* Total number of gtt ptes */ u_int gtt_mappable_entries; /* Number of gtt ptes mappable by CPU */ device_t bdev; /* bridge device */ void *argb_cursor; /* contigmalloc area for ARGB cursor */ struct resource *sc_res[2]; const struct agp_i810_match *match; int sc_flush_page_rid; struct resource *sc_flush_page_res; void *sc_flush_page_vaddr; int sc_bios_allocated_flush_page; }; static device_t intel_agp; struct agp_i810_driver { int chiptype; int gen; int busdma_addr_mask_sz; struct resource_spec *res_spec; int (*check_active)(device_t); void (*set_desc)(device_t, const struct agp_i810_match *); void (*dump_regs)(device_t); int (*get_stolen_size)(device_t); int (*get_gtt_total_entries)(device_t); int (*get_gtt_mappable_entries)(device_t); int (*install_gatt)(device_t); void (*deinstall_gatt)(device_t); void (*write_gtt)(device_t, u_int, uint32_t); void (*install_gtt_pte)(device_t, u_int, vm_offset_t, int); u_int32_t (*read_gtt_pte)(device_t, u_int); vm_paddr_t (*read_gtt_pte_paddr)(device_t , u_int); int (*set_aperture)(device_t, u_int32_t); int (*chipset_flush_setup)(device_t); void (*chipset_flush_teardown)(device_t); void (*chipset_flush)(device_t); }; static struct { struct intel_gtt base; } intel_private; static const struct agp_i810_driver agp_i810_i810_driver = { .chiptype = CHIP_I810, .gen = 1, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i810_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i810_chipset_flush, }; static const struct agp_i810_driver agp_i810_i815_driver = { .chiptype = CHIP_I810, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i810_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i810_dump_regs, .get_stolen_size = agp_i810_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i810_install_gatt, .deinstall_gatt = agp_i810_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i810_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i810_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i830_driver = { .chiptype = CHIP_I830, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i830_dump_regs, .get_stolen_size = agp_i830_get_stolen_size, .get_gtt_mappable_entries = agp_i830_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i855_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_82852_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i830_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i865_driver = { .chiptype = CHIP_I855, .gen = 2, .busdma_addr_mask_sz = 32, .res_spec = agp_i810_res_spec, .check_active = agp_i830_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i855_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i810_write_gtt, .install_gtt_pte = agp_i830_install_gtt_pte, .read_gtt_pte = agp_i810_read_gtt_pte, .read_gtt_pte_paddr = agp_i810_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i810_chipset_flush_setup, .chipset_flush_teardown = agp_i810_chipset_flush_teardown, .chipset_flush = agp_i830_chipset_flush, }; static const struct agp_i810_driver agp_i810_i915_driver = { .chiptype = CHIP_I915, .gen = 3, .busdma_addr_mask_sz = 32, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i810_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i915_chipset_flush_setup, .chipset_flush_teardown = agp_i915_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g33_driver = { .chiptype = CHIP_G33, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_igd_driver = { .chiptype = CHIP_IGD, .gen = 3, .busdma_addr_mask_sz = 36, .res_spec = agp_i915_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i915_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i830_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i915_write_gtt, .install_gtt_pte = agp_i915_install_gtt_pte, .read_gtt_pte = agp_i915_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g965_driver = { .chiptype = CHIP_I965, .gen = 4, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_i965_get_gtt_total_entries, .install_gatt = agp_i965_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_i965_write_gtt, .install_gtt_pte = agp_i965_install_gtt_pte, .read_gtt_pte = agp_i965_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; static const struct agp_i810_driver agp_i810_g4x_driver = { .chiptype = CHIP_G4X, .gen = 5, .busdma_addr_mask_sz = 36, .res_spec = agp_i965_res_spec, .check_active = agp_i915_check_active, .set_desc = agp_i810_set_desc, .dump_regs = agp_i965_dump_regs, .get_stolen_size = agp_i915_get_stolen_size, .get_gtt_mappable_entries = agp_i915_get_gtt_mappable_entries, .get_gtt_total_entries = agp_gen5_get_gtt_total_entries, .install_gatt = agp_g4x_install_gatt, .deinstall_gatt = agp_i830_deinstall_gatt, .write_gtt = agp_g4x_write_gtt, .install_gtt_pte = agp_g4x_install_gtt_pte, .read_gtt_pte = agp_g4x_read_gtt_pte, .read_gtt_pte_paddr = agp_i915_read_gtt_pte_paddr, .set_aperture = agp_i915_set_aperture, .chipset_flush_setup = agp_i965_chipset_flush_setup, .chipset_flush_teardown = agp_i965_chipset_flush_teardown, .chipset_flush = agp_i915_chipset_flush, }; /* For adding new devices, devid is the id of the graphics controller * (pci:0:2:0, for example). The placeholder (usually at pci:0:2:1) for the * second head should never be added. The bridge_offset is the offset to * subtract from devid to get the id of the hostb that the device is on. */ static const struct agp_i810_match { int devid; char *name; const struct agp_i810_driver *driver; } agp_i810_matches[] = { { .devid = 0x71218086, .name = "Intel 82810 (i810 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71238086, .name = "Intel 82810-DC100 (i810-DC100 GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x71258086, .name = "Intel 82810E (i810E GMCH) SVGA controller", .driver = &agp_i810_i810_driver }, { .devid = 0x11328086, .name = "Intel 82815 (i815 GMCH) SVGA controller", .driver = &agp_i810_i815_driver }, { .devid = 0x35778086, .name = "Intel 82830M (830M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x25628086, .name = "Intel 82845M (845M GMCH) SVGA controller", .driver = &agp_i810_i830_driver }, { .devid = 0x35828086, .name = "Intel 82852/855GM SVGA controller", .driver = &agp_i810_i855_driver }, { .devid = 0x25728086, .name = "Intel 82865G (865G GMCH) SVGA controller", .driver = &agp_i810_i865_driver }, { .devid = 0x25828086, .name = "Intel 82915G (915G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x258A8086, .name = "Intel E7221 SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x25928086, .name = "Intel 82915GM (915GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27728086, .name = "Intel 82945G (945G GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27A28086, .name = "Intel 82945GM (945GM GMCH) SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x27AE8086, .name = "Intel 945GME SVGA controller", .driver = &agp_i810_i915_driver }, { .devid = 0x29728086, .name = "Intel 946GZ SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29828086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29928086, .name = "Intel Q965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29A28086, .name = "Intel G965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x29B28086, .name = "Intel Q35 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29C28086, .name = "Intel G33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0x29D28086, .name = "Intel Q33 SVGA controller", .driver = &agp_i810_g33_driver }, { .devid = 0xA0018086, .name = "Intel Pineview SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0xA0118086, .name = "Intel Pineview (M) SVGA controller", .driver = &agp_i810_igd_driver }, { .devid = 0x2A028086, .name = "Intel GM965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A128086, .name = "Intel GME965 SVGA controller", .driver = &agp_i810_g965_driver }, { .devid = 0x2A428086, .name = "Intel GM45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E028086, .name = "Intel Eaglelake SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E128086, .name = "Intel Q45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E228086, .name = "Intel G45 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x2E328086, .name = "Intel G41 SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00428086, .name = "Intel Ironlake (D) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0x00468086, .name = "Intel Ironlake (M) SVGA controller", .driver = &agp_i810_g4x_driver }, { .devid = 0, } }; static const struct agp_i810_match* agp_i810_match(device_t dev) { int i, devid; if (pci_get_class(dev) != PCIC_DISPLAY || (pci_get_subclass(dev) != PCIS_DISPLAY_VGA && pci_get_subclass(dev) != PCIS_DISPLAY_OTHER)) return (NULL); devid = pci_get_devid(dev); for (i = 0; agp_i810_matches[i].devid != 0; i++) { if (agp_i810_matches[i].devid == devid) break; } if (agp_i810_matches[i].devid == 0) return (NULL); else return (&agp_i810_matches[i]); } /* * Find bridge device. */ static device_t agp_i810_find_bridge(device_t dev) { return (pci_find_dbsf(0, 0, 0, 0)); } static void agp_i810_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "agp", -1) == NULL && agp_i810_match(parent)) device_add_child(parent, "agp", -1); } static int agp_i810_check_active(device_t bridge_dev) { u_int8_t smram; smram = pci_read_config(bridge_dev, AGP_I810_SMRAM, 1); if ((smram & AGP_I810_SMRAM_GMS) == AGP_I810_SMRAM_GMS_DISABLED) return (ENXIO); return (0); } static int agp_i830_check_active(device_t bridge_dev) { int gcc1; gcc1 = pci_read_config(bridge_dev, AGP_I830_GCC1, 1); if ((gcc1 & AGP_I830_GCC1_DEV2) == AGP_I830_GCC1_DEV2_DISABLED) return (ENXIO); return (0); } static int agp_i915_check_active(device_t bridge_dev) { int deven; deven = pci_read_config(bridge_dev, AGP_I915_DEVEN, 4); if ((deven & AGP_I915_DEVEN_D2F0) == AGP_I915_DEVEN_D2F0_DISABLED) return (ENXIO); return (0); } static void agp_82852_set_desc(device_t dev, const struct agp_i810_match *match) { switch (pci_read_config(dev, AGP_I85X_CAPID, 1)) { case AGP_I855_GME: device_set_desc(dev, "Intel 82855GME (855GME GMCH) SVGA controller"); break; case AGP_I855_GM: device_set_desc(dev, "Intel 82855GM (855GM GMCH) SVGA controller"); break; case AGP_I852_GME: device_set_desc(dev, "Intel 82852GME (852GME GMCH) SVGA controller"); break; case AGP_I852_GM: device_set_desc(dev, "Intel 82852GM (852GM GMCH) SVGA controller"); break; default: device_set_desc(dev, "Intel 8285xM (85xGM GMCH) SVGA controller"); break; } } static void agp_i810_set_desc(device_t dev, const struct agp_i810_match *match) { device_set_desc(dev, match->name); } static int agp_i810_probe(device_t dev) { device_t bdev; const struct agp_i810_match *match; int err; if (resource_disabled("agp", device_get_unit(dev))) return (ENXIO); match = agp_i810_match(dev); if (match == NULL) return (ENXIO); bdev = agp_i810_find_bridge(dev); if (bdev == NULL) { if (bootverbose) printf("I810: can't find bridge device\n"); return (ENXIO); } /* * checking whether internal graphics device has been activated. */ err = match->driver->check_active(bdev); if (err != 0) { if (bootverbose) printf("i810: disabled, not probing\n"); return (err); } match->driver->set_desc(dev, match); return (BUS_PROBE_DEFAULT); } static void agp_i810_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I810_MISCC: 0x%04x\n", pci_read_config(sc->bdev, AGP_I810_MISCC, 2)); } static void agp_i830_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I830_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I830_GCC1, 1)); } static void agp_i855_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); } static void agp_i915_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I810_PGTBL_CTL: %08x\n", bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I915_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I915_MSAC, 1)); } static void agp_i965_dump_regs(device_t dev) { struct agp_i810_softc *sc = device_get_softc(dev); device_printf(dev, "AGP_I965_PGTBL_CTL2: %08x\n", bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2)); device_printf(dev, "AGP_I855_GCC1: 0x%02x\n", pci_read_config(sc->bdev, AGP_I855_GCC1, 1)); device_printf(dev, "AGP_I965_MSAC: 0x%02x\n", pci_read_config(sc->bdev, AGP_I965_MSAC, 1)); } static int agp_i810_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->stolen = 0; sc->stolen_size = 0; return (0); } static int agp_i830_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 1); switch (gcc1 & AGP_I830_GCC1_GMS) { case AGP_I830_GCC1_GMS_STOLEN_512: sc->stolen = (512 - 132) * 1024 / 4096; sc->stolen_size = 512 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_1024: sc->stolen = (1024 - 132) * 1024 / 4096; sc->stolen_size = 1024 * 1024; break; case AGP_I830_GCC1_GMS_STOLEN_8192: sc->stolen = (8192 - 132) * 1024 / 4096; sc->stolen_size = 8192 * 1024; break; default: sc->stolen = 0; device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } return (0); } static int agp_i915_get_stolen_size(device_t dev) { struct agp_i810_softc *sc; unsigned int gcc1, stolen, gtt_size; sc = device_get_softc(dev); /* * Stolen memory is set up at the beginning of the aperture by * the BIOS, consisting of the GATT followed by 4kb for the * BIOS display. */ switch (sc->match->driver->chiptype) { case CHIP_I855: gtt_size = 128; break; case CHIP_I915: gtt_size = 256; break; case CHIP_I965: switch (bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL) & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: gtt_size = 128; break; case AGP_I810_PGTBL_SIZE_256KB: gtt_size = 256; break; case AGP_I810_PGTBL_SIZE_512KB: gtt_size = 512; break; case AGP_I965_PGTBL_SIZE_1MB: gtt_size = 1024; break; case AGP_I965_PGTBL_SIZE_2MB: gtt_size = 2048; break; case AGP_I965_PGTBL_SIZE_1_5MB: gtt_size = 1024 + 512; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_G33: gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 2); switch (gcc1 & AGP_G33_MGGC_GGMS_MASK) { case AGP_G33_MGGC_GGMS_SIZE_1M: gtt_size = 1024; break; case AGP_G33_MGGC_GGMS_SIZE_2M: gtt_size = 2048; break; default: device_printf(dev, "Bad PGTBL size\n"); return (EINVAL); } break; case CHIP_IGD: case CHIP_G4X: gtt_size = 0; break; default: device_printf(dev, "Bad chiptype\n"); return (EINVAL); } /* GCC1 is called MGGC on i915+ */ gcc1 = pci_read_config(sc->bdev, AGP_I855_GCC1, 1); switch (gcc1 & AGP_I855_GCC1_GMS) { case AGP_I855_GCC1_GMS_STOLEN_1M: stolen = 1024; break; case AGP_I855_GCC1_GMS_STOLEN_4M: stolen = 4 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_8M: stolen = 8 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_16M: stolen = 16 * 1024; break; case AGP_I855_GCC1_GMS_STOLEN_32M: stolen = 32 * 1024; break; case AGP_I915_GCC1_GMS_STOLEN_48M: stolen = sc->match->driver->gen > 2 ? 48 * 1024 : 0; break; case AGP_I915_GCC1_GMS_STOLEN_64M: stolen = sc->match->driver->gen > 2 ? 64 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_128M: stolen = sc->match->driver->gen > 2 ? 128 * 1024 : 0; break; case AGP_G33_GCC1_GMS_STOLEN_256M: stolen = sc->match->driver->gen > 2 ? 256 * 1024 : 0; break; case AGP_G4X_GCC1_GMS_STOLEN_96M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 96 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_160M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 160 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_224M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 224 * 1024; else stolen = 0; break; case AGP_G4X_GCC1_GMS_STOLEN_352M: if (sc->match->driver->chiptype == CHIP_I965 || sc->match->driver->chiptype == CHIP_G4X) stolen = 352 * 1024; else stolen = 0; break; default: device_printf(dev, "unknown memory configuration, disabling (GCC1 %x)\n", gcc1); return (EINVAL); } gtt_size += 4; sc->stolen_size = stolen * 1024; sc->stolen = (stolen - gtt_size) * 1024 / 4096; return (0); } static int agp_i810_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t miscc; sc = device_get_softc(dev); miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); if ((miscc & AGP_I810_MISCC_WINSIZE) == AGP_I810_MISCC_WINSIZE_32) ap = 32; else ap = 64; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i830_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; uint16_t gmch_ctl; sc = device_get_softc(dev); gmch_ctl = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); if ((gmch_ctl & AGP_I830_GCC1_GMASIZE) == AGP_I830_GCC1_GMASIZE_64) ap = 64; else ap = 128; sc->gtt_mappable_entries = (ap * 1024 * 1024) >> AGP_PAGE_SHIFT; return (0); } static int agp_i915_get_gtt_mappable_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t ap; sc = device_get_softc(dev); ap = AGP_GET_APERTURE(dev); sc->gtt_mappable_entries = ap >> AGP_PAGE_SHIFT; return (0); } static int agp_i810_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->gtt_total_entries = sc->gtt_mappable_entries; return (0); } static int agp_i965_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl; int error; sc = device_get_softc(dev); error = 0; pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); switch (pgetbl_ctl & AGP_I810_PGTBL_SIZE_MASK) { case AGP_I810_PGTBL_SIZE_128KB: sc->gtt_total_entries = 128 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_256KB: sc->gtt_total_entries = 256 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_512KB: sc->gtt_total_entries = 512 * 1024 / 4; break; /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ case AGP_I810_PGTBL_SIZE_1MB: sc->gtt_total_entries = 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_2MB: sc->gtt_total_entries = 2 * 1024 * 1024 / 4; break; case AGP_I810_PGTBL_SIZE_1_5MB: sc->gtt_total_entries = (1024 + 512) * 1024 / 4; break; default: device_printf(dev, "Unknown page table size\n"); error = ENXIO; } return (error); } static void agp_gen5_adjust_pgtbl_size(device_t dev, uint32_t sz) { struct agp_i810_softc *sc; uint32_t pgetbl_ctl, pgetbl_ctl2; sc = device_get_softc(dev); /* Disable per-process page table. */ pgetbl_ctl2 = bus_read_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2); pgetbl_ctl2 &= ~AGP_I810_PGTBL_ENABLED; bus_write_4(sc->sc_res[0], AGP_I965_PGTBL_CTL2, pgetbl_ctl2); /* Write the new ggtt size. */ pgetbl_ctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgetbl_ctl &= ~AGP_I810_PGTBL_SIZE_MASK; pgetbl_ctl |= sz; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgetbl_ctl); } static int agp_gen5_get_gtt_total_entries(device_t dev) { struct agp_i810_softc *sc; uint16_t gcc1; sc = device_get_softc(dev); gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); switch (gcc1 & AGP_G4x_GCC1_SIZE_MASK) { case AGP_G4x_GCC1_SIZE_1M: case AGP_G4x_GCC1_SIZE_VT_1M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1MB); break; case AGP_G4x_GCC1_SIZE_VT_1_5M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_1_5MB); break; case AGP_G4x_GCC1_SIZE_2M: case AGP_G4x_GCC1_SIZE_VT_2M: agp_gen5_adjust_pgtbl_size(dev, AGP_I810_PGTBL_SIZE_2MB); break; default: device_printf(dev, "Unknown page table size\n"); return (ENXIO); } return (agp_i965_get_gtt_total_entries(dev)); } static int agp_i810_install_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); /* Some i810s have on-chip memory called dcache. */ if ((bus_read_1(sc->sc_res[0], AGP_I810_DRT) & AGP_I810_DRT_POPULATED) != 0) sc->dcache_size = 4 * 1024 * 1024; else sc->dcache_size = 0; /* According to the specs the gatt on the i810 must be 64k. */ sc->gatt->ag_virtual = kmem_alloc_contig(64 * 1024, M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->gatt->ag_virtual == NULL) { if (bootverbose) device_printf(dev, "contiguous allocation failed\n"); return (ENOMEM); } sc->gatt->ag_physical = vtophys((vm_offset_t)sc->gatt->ag_virtual); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (0); } static void agp_i830_install_gatt_init(struct agp_i810_softc *sc) { uint32_t pgtblctl; /* * The i830 automatically initializes the 128k gatt on boot. * GATT address is already in there, make sure it's enabled. */ pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl |= 1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); sc->gatt->ag_physical = pgtblctl & ~1; } static int agp_i830_install_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); agp_i830_install_gatt_init(sc); return (0); } static int agp_gen4_install_gatt(device_t dev, const vm_size_t gtt_offset) { struct agp_i810_softc *sc; sc = device_get_softc(dev); pmap_change_attr((vm_offset_t)rman_get_virtual(sc->sc_res[0]) + gtt_offset, rman_get_size(sc->sc_res[0]) - gtt_offset, VM_MEMATTR_WRITE_COMBINING); agp_i830_install_gatt_init(sc); return (0); } static int agp_i965_install_gatt(device_t dev) { return (agp_gen4_install_gatt(dev, 512 * 1024)); } static int agp_g4x_install_gatt(device_t dev) { return (agp_gen4_install_gatt(dev, 2 * 1024 * 1024)); } static int agp_i810_attach(device_t dev) { struct agp_i810_softc *sc; int error; sc = device_get_softc(dev); sc->bdev = agp_i810_find_bridge(dev); if (sc->bdev == NULL) return (ENOENT); sc->match = agp_i810_match(dev); agp_set_aperture_resource(dev, sc->match->driver->gen <= 2 ? AGP_APBASE : AGP_I915_GMADR); error = agp_generic_attach(dev); if (error) return (error); if (ptoa((vm_paddr_t)Maxmem) > (1ULL << sc->match->driver->busdma_addr_mask_sz) - 1) { device_printf(dev, "agp_i810 does not support physical " "memory above %ju.\n", (uintmax_t)(1ULL << sc->match->driver->busdma_addr_mask_sz) - 1); return (ENOENT); } if (bus_alloc_resources(dev, sc->match->driver->res_spec, sc->sc_res)) { agp_generic_detach(dev); return (ENODEV); } sc->initial_aperture = AGP_GET_APERTURE(dev); sc->gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_WAITOK); sc->gatt->ag_entries = AGP_GET_APERTURE(dev) >> AGP_PAGE_SHIFT; if ((error = sc->match->driver->get_stolen_size(dev)) != 0 || (error = sc->match->driver->install_gatt(dev)) != 0 || (error = sc->match->driver->get_gtt_mappable_entries(dev)) != 0 || (error = sc->match->driver->get_gtt_total_entries(dev)) != 0 || (error = sc->match->driver->chipset_flush_setup(dev)) != 0) { bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); free(sc->gatt, M_AGP); agp_generic_detach(dev); return (error); } intel_agp = dev; device_printf(dev, "aperture size is %dM", sc->initial_aperture / 1024 / 1024); if (sc->stolen > 0) printf(", detected %dk stolen memory\n", sc->stolen * 4); else printf("\n"); if (bootverbose) { sc->match->driver->dump_regs(dev); device_printf(dev, "Mappable GTT entries: %d\n", sc->gtt_mappable_entries); device_printf(dev, "Total GTT entries: %d\n", sc->gtt_total_entries); } return (0); } static void agp_i810_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0); kmem_free(sc->gatt->ag_virtual, 64 * 1024); } static void agp_i830_deinstall_gatt(device_t dev) { struct agp_i810_softc *sc; unsigned int pgtblctl; sc = device_get_softc(dev); pgtblctl = bus_read_4(sc->sc_res[0], AGP_I810_PGTBL_CTL); pgtblctl &= ~1; bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, pgtblctl); } static int agp_i810_detach(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); agp_free_cdev(dev); /* Clear the GATT base. */ sc->match->driver->deinstall_gatt(dev); sc->match->driver->chipset_flush_teardown(dev); /* Put the aperture back the way it started. */ AGP_SET_APERTURE(dev, sc->initial_aperture); free(sc->gatt, M_AGP); bus_release_resources(dev, sc->match->driver->res_spec, sc->sc_res); agp_free_res(dev); return (0); } static int agp_i810_resume(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); AGP_SET_APERTURE(dev, sc->initial_aperture); /* Install the GATT. */ bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, sc->gatt->ag_physical | 1); return (bus_generic_resume(dev)); } /** * Sets the PCI resource size of the aperture on i830-class and below chipsets, * while returning failure on later chipsets when an actual change is * requested. * * This whole function is likely bogus, as the kernel would probably need to * reconfigure the placement of the AGP aperture if a larger size is requested, * which doesn't happen currently. */ static int agp_i810_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t miscc; sc = device_get_softc(dev); /* * Double check for sanity. */ if (aperture != 32 * 1024 * 1024 && aperture != 64 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } miscc = pci_read_config(sc->bdev, AGP_I810_MISCC, 2); miscc &= ~AGP_I810_MISCC_WINSIZE; if (aperture == 32 * 1024 * 1024) miscc |= AGP_I810_MISCC_WINSIZE_32; else miscc |= AGP_I810_MISCC_WINSIZE_64; pci_write_config(sc->bdev, AGP_I810_MISCC, miscc, 2); return (0); } static int agp_i830_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; u_int16_t gcc1; sc = device_get_softc(dev); if (aperture != 64 * 1024 * 1024 && aperture != 128 * 1024 * 1024) { device_printf(dev, "bad aperture size %d\n", aperture); return (EINVAL); } gcc1 = pci_read_config(sc->bdev, AGP_I830_GCC1, 2); gcc1 &= ~AGP_I830_GCC1_GMASIZE; if (aperture == 64 * 1024 * 1024) gcc1 |= AGP_I830_GCC1_GMASIZE_64; else gcc1 |= AGP_I830_GCC1_GMASIZE_128; pci_write_config(sc->bdev, AGP_I830_GCC1, gcc1, 2); return (0); } static int agp_i915_set_aperture(device_t dev, u_int32_t aperture) { return (agp_generic_set_aperture(dev, aperture)); } static int agp_i810_method_set_aperture(device_t dev, u_int32_t aperture) { struct agp_i810_softc *sc; sc = device_get_softc(dev); return (sc->match->driver->set_aperture(dev, aperture)); } /** * Writes a GTT entry mapping the page at the given offset from the * beginning of the aperture to the given physical address. Setup the * caching mode according to flags. * * For gen 1, 2 and 3, GTT start is located at AGP_I810_GTT offset * from corresponding BAR start. For gen 4, offset is 512KB + * AGP_I810_GTT, for gen 5 and 6 it is 2MB + AGP_I810_GTT. * * Also, the bits of the physical page address above 4GB needs to be * placed into bits 40-32 of PTE. */ static void agp_i810_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_DCACHE_MEMORY) pte |= I810_PTE_LOCAL; else if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i810_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], AGP_I810_GTT + index * 4, pte); CTR2(KTR_AGP_I810, "810_pte %x %x", index, pte); } static void agp_i830_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; agp_i810_write_gtt(dev, index, pte); } static void agp_i915_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i915_write_gtt(dev, index, pte); } static void agp_i915_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[1], index * 4, pte); CTR2(KTR_AGP_I810, "915_pte %x %x", index, pte); } static void agp_i965_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_i965_write_gtt(dev, index, pte); } static void agp_i965_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (512 * 1024), pte); CTR2(KTR_AGP_I810, "965_pte %x %x", index, pte); } static void agp_g4x_install_gtt_pte(device_t dev, u_int index, vm_offset_t physical, int flags) { uint32_t pte; pte = (u_int32_t)physical | I810_PTE_VALID; if (flags == AGP_USER_CACHED_MEMORY) pte |= I830_PTE_SYSTEM_CACHED; pte |= (physical & 0x0000000f00000000ull) >> 28; agp_g4x_write_gtt(dev, index, pte); } static void agp_g4x_write_gtt(device_t dev, u_int index, uint32_t pte) { struct agp_i810_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024), pte); CTR2(KTR_AGP_I810, "g4x_pte %x %x", index, pte); } static int agp_i810_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical) { struct agp_i810_softc *sc = device_get_softc(dev); u_int index; if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) { device_printf(dev, "failed: offset is 0x%08jx, " "shift is %d, entries is %d\n", (intmax_t)offset, AGP_PAGE_SHIFT, sc->gatt->ag_entries); return (EINVAL); } index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to bind into stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, physical, 0); return (0); } static int agp_i810_unbind_page(device_t dev, vm_offset_t offset) { struct agp_i810_softc *sc; u_int index; sc = device_get_softc(dev); if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT)) return (EINVAL); index = offset >> AGP_PAGE_SHIFT; if (sc->stolen != 0 && index < sc->stolen) { device_printf(dev, "trying to unbind from stolen memory\n"); return (EINVAL); } sc->match->driver->install_gtt_pte(dev, index, 0, 0); return (0); } static u_int32_t agp_i810_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], AGP_I810_GTT + index * 4); return (pte); } static u_int32_t agp_i915_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[1], index * 4); return (pte); } static u_int32_t agp_i965_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (512 * 1024)); return (pte); } static u_int32_t agp_g4x_read_gtt_pte(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; sc = device_get_softc(dev); pte = bus_read_4(sc->sc_res[0], index * 4 + (2 * 1024 * 1024)); return (pte); } static vm_paddr_t agp_i810_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = pte & ~PAGE_MASK; return (res); } static vm_paddr_t agp_i915_read_gtt_pte_paddr(device_t dev, u_int index) { struct agp_i810_softc *sc; u_int32_t pte; vm_paddr_t res; sc = device_get_softc(dev); pte = sc->match->driver->read_gtt_pte(dev, index); res = (pte & ~PAGE_MASK) | ((pte & 0xf0) << 28); return (res); } /* * Writing via memory mapped registers already flushes all TLBs. */ static void agp_i810_flush_tlb(device_t dev) { } static int agp_i810_enable(device_t dev, u_int32_t mode) { return (0); } static struct agp_memory * agp_i810_alloc_memory(device_t dev, int type, vm_size_t size) { struct agp_i810_softc *sc; struct agp_memory *mem; vm_page_t m; sc = device_get_softc(dev); if ((size & (AGP_PAGE_SIZE - 1)) != 0 || sc->agp.as_allocated + size > sc->agp.as_maxmem) return (0); if (type == 1) { /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (0); if (size != sc->dcache_size) return (0); } else if (type == 2) { /* * Type 2 is the contiguous physical memory type, that hands * back a physical address. This is used for cursors on i810. * Hand back as many single pages with physical as the user * wants, but only allow one larger allocation (ARGB cursor) * for simplicity. */ if (size != AGP_PAGE_SIZE) { if (sc->argb_cursor != NULL) return (0); /* Allocate memory for ARGB cursor, if we can. */ sc->argb_cursor = contigmalloc(size, M_AGP, 0, 0, ~0, PAGE_SIZE, 0); if (sc->argb_cursor == NULL) return (0); } } mem = malloc(sizeof *mem, M_AGP, M_WAITOK); mem->am_id = sc->agp.as_nextid++; mem->am_size = size; mem->am_type = type; if (type != 1 && (type != 2 || size == AGP_PAGE_SIZE)) mem->am_obj = vm_object_allocate(OBJT_SWAP, atop(round_page(size))); else mem->am_obj = 0; if (type == 2) { if (size == AGP_PAGE_SIZE) { /* * Allocate and wire down the page now so that we can * get its physical address. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO); VM_OBJECT_WUNLOCK(mem->am_obj); mem->am_physical = VM_PAGE_TO_PHYS(m); } else { /* Our allocation is already nicely wired down for us. * Just grab the physical address. */ mem->am_physical = vtophys(sc->argb_cursor); } } else mem->am_physical = 0; mem->am_offset = 0; mem->am_is_bound = 0; TAILQ_INSERT_TAIL(&sc->agp.as_memory, mem, am_link); sc->agp.as_allocated += size; return (mem); } static int agp_i810_free_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_page_t m; if (mem->am_is_bound) return (EBUSY); sc = device_get_softc(dev); if (mem->am_type == 2) { if (mem->am_size == AGP_PAGE_SIZE) { /* * Unwire the page which we wired in alloc_memory. */ VM_OBJECT_WLOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); vm_page_unwire(m, PQ_INACTIVE); VM_OBJECT_WUNLOCK(mem->am_obj); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; } } sc->agp.as_allocated -= mem->am_size; TAILQ_REMOVE(&sc->agp.as_memory, mem, am_link); if (mem->am_obj) vm_object_deallocate(mem->am_obj); free(mem, M_AGP); return (0); } static int agp_i810_bind_memory(device_t dev, struct agp_memory *mem, vm_offset_t offset) { struct agp_i810_softc *sc; vm_offset_t i; /* Do some sanity checks first. */ if ((offset & (AGP_PAGE_SIZE - 1)) != 0 || offset + mem->am_size > AGP_GET_APERTURE(dev)) { device_printf(dev, "binding memory at bad offset %#x\n", (int)offset); return (EINVAL); } sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } /* The memory's already wired down, just stick it in the GTT. */ for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (offset + i) >> AGP_PAGE_SHIFT, mem->am_physical + i, 0); } mem->am_offset = offset; mem->am_is_bound = 1; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_bind_memory(dev, mem, offset)); /* * Mapping local DRAM into GATT. */ if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) bus_write_4(sc->sc_res[0], AGP_I810_GTT + (i >> AGP_PAGE_SHIFT) * 4, i | 3); return (0); } static int agp_i810_unbind_memory(device_t dev, struct agp_memory *mem) { struct agp_i810_softc *sc; vm_offset_t i; sc = device_get_softc(dev); if (mem->am_type == 2 && mem->am_size != AGP_PAGE_SIZE) { mtx_lock(&sc->agp.as_lock); if (!mem->am_is_bound) { mtx_unlock(&sc->agp.as_lock); return (EINVAL); } for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, (mem->am_offset + i) >> AGP_PAGE_SHIFT, 0, 0); } mem->am_is_bound = 0; mtx_unlock(&sc->agp.as_lock); return (0); } if (mem->am_type != 1) return (agp_generic_unbind_memory(dev, mem)); if (sc->match->driver->chiptype != CHIP_I810) return (EINVAL); for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) { sc->match->driver->install_gtt_pte(dev, i >> AGP_PAGE_SHIFT, 0, 0); } return (0); } static device_method_t agp_i810_methods[] = { /* Device interface */ DEVMETHOD(device_identify, agp_i810_identify), DEVMETHOD(device_probe, agp_i810_probe), DEVMETHOD(device_attach, agp_i810_attach), DEVMETHOD(device_detach, agp_i810_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, agp_i810_resume), /* AGP interface */ DEVMETHOD(agp_get_aperture, agp_generic_get_aperture), DEVMETHOD(agp_set_aperture, agp_i810_method_set_aperture), DEVMETHOD(agp_bind_page, agp_i810_bind_page), DEVMETHOD(agp_unbind_page, agp_i810_unbind_page), DEVMETHOD(agp_flush_tlb, agp_i810_flush_tlb), DEVMETHOD(agp_enable, agp_i810_enable), DEVMETHOD(agp_alloc_memory, agp_i810_alloc_memory), DEVMETHOD(agp_free_memory, agp_i810_free_memory), DEVMETHOD(agp_bind_memory, agp_i810_bind_memory), DEVMETHOD(agp_unbind_memory, agp_i810_unbind_memory), DEVMETHOD(agp_chipset_flush, agp_intel_gtt_chipset_flush), { 0, 0 } }; static driver_t agp_i810_driver = { "agp", agp_i810_methods, sizeof(struct agp_i810_softc), }; DRIVER_MODULE(agp_i810, vgapci, agp_i810_driver, 0, 0); MODULE_DEPEND(agp_i810, agp, 1, 1, 1); MODULE_DEPEND(agp_i810, pci, 1, 1, 1); void agp_intel_gtt_clear_range(device_t dev, u_int first_entry, u_int num_entries) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(bogus_page), 0); sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } void agp_intel_gtt_insert_pages(device_t dev, u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { struct agp_i810_softc *sc; u_int i; sc = device_get_softc(dev); for (i = 0; i < num_entries; i++) { MPASS(pages[i]->valid == VM_PAGE_BITS_ALL); MPASS(pages[i]->ref_count > 0); sc->match->driver->install_gtt_pte(dev, first_entry + i, VM_PAGE_TO_PHYS(pages[i]), flags); } sc->match->driver->read_gtt_pte(dev, first_entry + num_entries - 1); } struct intel_gtt agp_intel_gtt_get(device_t dev) { struct agp_i810_softc *sc; struct intel_gtt res; sc = device_get_softc(dev); res.stolen_size = sc->stolen_size; res.gtt_total_entries = sc->gtt_total_entries; res.gtt_mappable_entries = sc->gtt_mappable_entries; res.do_idle_maps = 0; res.scratch_page_dma = VM_PAGE_TO_PHYS(bogus_page); if (sc->agp.as_aperture != NULL) res.gma_bus_addr = rman_get_start(sc->agp.as_aperture); else res.gma_bus_addr = 0; return (res); } static int agp_i810_chipset_flush_setup(device_t dev) { return (0); } static void agp_i810_chipset_flush_teardown(device_t dev) { /* Nothing to do. */ } static void agp_i810_chipset_flush(device_t dev) { /* Nothing to do. */ } static void agp_i830_chipset_flush(device_t dev) { struct agp_i810_softc *sc; uint32_t hic; int i; sc = device_get_softc(dev); pmap_invalidate_cache(); hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); bus_write_4(sc->sc_res[0], AGP_I830_HIC, hic | (1U << 31)); for (i = 0; i < 20000 /* 1 sec */; i++) { hic = bus_read_4(sc->sc_res[0], AGP_I830_HIC); if ((hic & (1U << 31)) == 0) break; DELAY(50); } } static int agp_i915_chipset_flush_alloc_page(device_t dev, uint64_t start, uint64_t end) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); sc->sc_flush_page_rid = 100; sc->sc_flush_page_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, &sc->sc_flush_page_rid, start, end, PAGE_SIZE, RF_ACTIVE); if (sc->sc_flush_page_res == NULL) { device_printf(dev, "Failed to allocate flush page at 0x%jx\n", (uintmax_t)start); return (EINVAL); } sc->sc_flush_page_vaddr = rman_get_virtual(sc->sc_flush_page_res); if (bootverbose) { device_printf(dev, "Allocated flush page phys 0x%jx virt %p\n", (uintmax_t)rman_get_start(sc->sc_flush_page_res), sc->sc_flush_page_vaddr); } return (0); } static void agp_i915_chipset_flush_free_page(device_t dev) { struct agp_i810_softc *sc; device_t vga; sc = device_get_softc(dev); vga = device_get_parent(dev); if (sc->sc_flush_page_res == NULL) return; BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev, sc->sc_flush_page_res); - BUS_RELEASE_RESOURCE(device_get_parent(vga), dev, SYS_RES_MEMORY, - sc->sc_flush_page_rid, sc->sc_flush_page_res); + BUS_RELEASE_RESOURCE(device_get_parent(vga), dev, + sc->sc_flush_page_res); } static int agp_i915_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; int error; sc = device_get_softc(dev); temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); if ((temp & 1) != 0) { temp &= ~1; if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, 0xffffffff); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp | 1, 4); } return (0); } static void agp_i915_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp = pci_read_config(sc->bdev, AGP_I915_IFPADDR, 4); temp &= ~1; pci_write_config(sc->bdev, AGP_I915_IFPADDR, temp, 4); } agp_i915_chipset_flush_free_page(dev); } static int agp_i965_chipset_flush_setup(device_t dev) { struct agp_i810_softc *sc; uint64_t temp; uint32_t temp_hi, temp_lo; int error; sc = device_get_softc(dev); temp_hi = pci_read_config(sc->bdev, AGP_I965_IFPADDR + 4, 4); temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); if ((temp_lo & 1) != 0) { temp = ((uint64_t)temp_hi << 32) | (temp_lo & ~1); if (bootverbose) device_printf(dev, "Found already configured flush page at 0x%jx\n", (uintmax_t)temp); sc->sc_bios_allocated_flush_page = 1; /* * In the case BIOS initialized the flush pointer (?) * register, expect that BIOS also set up the resource * for the page. */ error = agp_i915_chipset_flush_alloc_page(dev, temp, temp + PAGE_SIZE - 1); if (error != 0) return (error); } else { sc->sc_bios_allocated_flush_page = 0; error = agp_i915_chipset_flush_alloc_page(dev, 0, ~0); if (error != 0) return (error); temp = rman_get_start(sc->sc_flush_page_res); pci_write_config(sc->bdev, AGP_I965_IFPADDR + 4, (temp >> 32) & UINT32_MAX, 4); pci_write_config(sc->bdev, AGP_I965_IFPADDR, (temp & UINT32_MAX) | 1, 4); } return (0); } static void agp_i965_chipset_flush_teardown(device_t dev) { struct agp_i810_softc *sc; uint32_t temp_lo; sc = device_get_softc(dev); if (sc->sc_flush_page_res == NULL) return; if (!sc->sc_bios_allocated_flush_page) { temp_lo = pci_read_config(sc->bdev, AGP_I965_IFPADDR, 4); temp_lo &= ~1; pci_write_config(sc->bdev, AGP_I965_IFPADDR, temp_lo, 4); } agp_i915_chipset_flush_free_page(dev); } static void agp_i915_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); *(uint32_t *)sc->sc_flush_page_vaddr = 1; } int agp_intel_gtt_chipset_flush(device_t dev) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->match->driver->chipset_flush(dev); return (0); } void agp_intel_gtt_unmap_memory(device_t dev, struct sglist *sg_list) { } int agp_intel_gtt_map_memory(device_t dev, vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { #if 0 struct agp_i810_softc *sc; #endif struct sglist *sg; int i; #if 0 int error; bus_dma_tag_t dmat; #endif if (*sg_list != NULL) return (0); #if 0 sc = device_get_softc(dev); #endif sg = sglist_alloc(num_entries, M_WAITOK /* XXXKIB */); for (i = 0; i < num_entries; i++) { sg->sg_segs[i].ss_paddr = VM_PAGE_TO_PHYS(pages[i]); sg->sg_segs[i].ss_len = PAGE_SIZE; } #if 0 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1 /* alignment */, 0 /* boundary */, 1ULL << sc->match->busdma_addr_mask_sz /* lowaddr */, BUS_SPACE_MAXADDR /* highaddr */, NULL /* filtfunc */, NULL /* filtfuncarg */, BUS_SPACE_MAXADDR /* maxsize */, BUS_SPACE_UNRESTRICTED /* nsegments */, BUS_SPACE_MAXADDR /* maxsegsz */, 0 /* flags */, NULL /* lockfunc */, NULL /* lockfuncarg */, &dmat); if (error != 0) { sglist_free(sg); return (error); } /* XXXKIB */ #endif *sg_list = sg; return (0); } static void agp_intel_gtt_install_pte(device_t dev, u_int index, vm_paddr_t addr, u_int flags) { struct agp_i810_softc *sc; sc = device_get_softc(dev); sc->match->driver->install_gtt_pte(dev, index, addr, flags); } void agp_intel_gtt_insert_sg_entries(device_t dev, struct sglist *sg_list, u_int first_entry, u_int flags) { struct agp_i810_softc *sc; vm_paddr_t spaddr; size_t slen; u_int i, j; sc = device_get_softc(dev); for (i = j = 0; j < sg_list->sg_nseg; j++) { spaddr = sg_list->sg_segs[i].ss_paddr; slen = sg_list->sg_segs[i].ss_len; for (; slen > 0; i++) { sc->match->driver->install_gtt_pte(dev, first_entry + i, spaddr, flags); spaddr += AGP_PAGE_SIZE; slen -= AGP_PAGE_SIZE; } } sc->match->driver->read_gtt_pte(dev, first_entry + i - 1); } void intel_gtt_clear_range(u_int first_entry, u_int num_entries) { agp_intel_gtt_clear_range(intel_agp, first_entry, num_entries); } void intel_gtt_insert_pages(u_int first_entry, u_int num_entries, vm_page_t *pages, u_int flags) { agp_intel_gtt_insert_pages(intel_agp, first_entry, num_entries, pages, flags); } struct intel_gtt * intel_gtt_get(void) { intel_private.base = agp_intel_gtt_get(intel_agp); return (&intel_private.base); } int intel_gtt_chipset_flush(void) { return (agp_intel_gtt_chipset_flush(intel_agp)); } void intel_gtt_unmap_memory(struct sglist *sg_list) { agp_intel_gtt_unmap_memory(intel_agp, sg_list); } int intel_gtt_map_memory(vm_page_t *pages, u_int num_entries, struct sglist **sg_list) { return (agp_intel_gtt_map_memory(intel_agp, pages, num_entries, sg_list)); } void intel_gtt_insert_sg_entries(struct sglist *sg_list, u_int first_entry, u_int flags) { agp_intel_gtt_insert_sg_entries(intel_agp, sg_list, first_entry, flags); } void intel_gtt_install_pte(u_int index, vm_paddr_t addr, u_int flags) { agp_intel_gtt_install_pte(intel_agp, index, addr, flags); } device_t intel_gtt_get_bridge_device(void) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->bdev); } vm_paddr_t intel_gtt_read_pte_paddr(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte_paddr(intel_agp, entry)); } u_int32_t intel_gtt_read_pte(u_int entry) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->read_gtt_pte(intel_agp, entry)); } void intel_gtt_write(u_int entry, uint32_t val) { struct agp_i810_softc *sc; sc = device_get_softc(intel_agp); return (sc->match->driver->write_gtt(intel_agp, entry, val)); } diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c index 27860f9ee570..b1f9c85141bb 100644 --- a/sys/dev/ahci/ahci.c +++ b/sys/dev/ahci/ahci.c @@ -1,2910 +1,2909 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" #include #include #include #include #include /* local prototypes */ static void ahci_intr(void *data); static void ahci_intr_one(void *data); static void ahci_intr_one_edge(void *data); static int ahci_ch_init(device_t dev); static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); static void ahci_ch_intr(void *arg); static void ahci_ch_intr_direct(void *arg); static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_execute_transaction(struct ahci_slot *slot); static void ahci_timeout(void *arg); static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); static void ahci_dmainit(device_t dev); static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_dmafini(device_t dev); static void ahci_slotsalloc(device_t dev); static void ahci_slotsfree(device_t dev); static void ahci_reset(struct ahci_channel *ch); static void ahci_start(struct ahci_channel *ch, int fbs); static void ahci_stop(struct ahci_channel *ch); static void ahci_clo(struct ahci_channel *ch); static void ahci_start_fr(struct ahci_channel *ch); static void ahci_stop_fr(struct ahci_channel *ch); static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr); static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val); static int ahci_sata_connect(struct ahci_channel *ch); static int ahci_sata_phy_reset(struct ahci_channel *ch); static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); static void ahci_issue_recovery(struct ahci_channel *ch); static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); static void ahciaction(struct cam_sim *sim, union ccb *ccb); static void ahcipoll(struct cam_sim *sim); static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val) { return ch->disablephy ? ATA_SC_DET_DISABLE : val; } int ahci_ctlr_setup(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Clear interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); /* Configure CCC */ if (ctlr->ccc) { ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); ATA_OUTL(ctlr->r_mem, AHCI_CCCC, (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | (4 << AHCI_CCCC_CC_SHIFT) | AHCI_CCCC_EN); ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; if (bootverbose) { device_printf(dev, "CCC with %dms/4cmd enabled on vector %d\n", ctlr->ccc, ctlr->cccv); } } /* Enable AHCI interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); return (0); } int ahci_ctlr_reset(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); uint32_t v; int timeout; /* BIOS/OS Handoff */ if ((ATA_INL(ctlr->r_mem, AHCI_VS) >= 0x00010200) && (ATA_INL(ctlr->r_mem, AHCI_CAP2) & AHCI_CAP2_BOH) && ((v = ATA_INL(ctlr->r_mem, AHCI_BOHC)) & AHCI_BOHC_OOS) == 0) { /* Request OS ownership. */ ATA_OUTL(ctlr->r_mem, AHCI_BOHC, v | AHCI_BOHC_OOS); /* Wait up to 2s for BIOS ownership release. */ for (timeout = 0; timeout < 80; timeout++) { DELAY(25000); v = ATA_INL(ctlr->r_mem, AHCI_BOHC); if ((v & AHCI_BOHC_BOS) == 0) break; if ((v & AHCI_BOHC_BB) == 0) break; } } /* Enable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); /* Reset AHCI controller */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); for (timeout = 1000; timeout > 0; timeout--) { DELAY(1000); if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) break; } if (timeout == 0) { device_printf(dev, "AHCI controller reset failure\n"); return (ENXIO); } /* Reenable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { /* * Restore capability field. * This is write to a read-only register to restore its state. * On fully standard-compliant hardware this is not needed and * this operation shall not take place. See ahci_pci.c for * platforms using this quirk. */ ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); } return (0); } int ahci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i, speed, unit; uint32_t u, version; device_t child; ctlr->dev = dev; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { ahci_free_mem(dev); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } /* Get the HW capabilities */ version = ATA_INL(ctlr->r_mem, AHCI_VS); ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); if (version >= 0x00010200) ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); if (ctlr->caps & AHCI_CAP_EMS) ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); if (ctlr->quirks & AHCI_Q_FORCE_PI) { /* * Enable ports. * The spec says that BIOS sets up bits corresponding to * available ports. On platforms where this information * is missing, the driver can define available ports on its own. */ int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; int nmask = (1 << nports) - 1; ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", nports, nmask); } ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ if ((ctlr->quirks & AHCI_Q_ALTSIG) && (ctlr->caps & AHCI_CAP_SPM) == 0) ctlr->quirks |= AHCI_Q_NOBSYRES; if (ctlr->quirks & AHCI_Q_1CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->ichannels &= 0x01; } if (ctlr->quirks & AHCI_Q_2CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 1; ctlr->ichannels &= 0x03; } if (ctlr->quirks & AHCI_Q_4CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 3; ctlr->ichannels &= 0x0f; } ctlr->channels = MAX(flsl(ctlr->ichannels), (ctlr->caps & AHCI_CAP_NPMASK) + 1); if (ctlr->quirks & AHCI_Q_NOPMP) ctlr->caps &= ~AHCI_CAP_SPM; if (ctlr->quirks & AHCI_Q_NONCQ) ctlr->caps &= ~AHCI_CAP_SNCQ; if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); /* Create controller-wide DMA tag. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, &ctlr->dma_tag)) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (ENXIO); } ahci_ctlr_setup(dev); /* Setup interrupts. */ if ((error = ahci_setup_interrupt(dev)) != 0) { bus_dma_tag_destroy(ctlr->dma_tag); ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } i = 0; for (u = ctlr->ichannels; u != 0; u >>= 1) i += (u & 1); ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); resource_int_value(device_get_name(dev), device_get_unit(dev), "direct", &ctlr->direct); /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), ((version >> 4) & 0xf0) + (version & 0x0f), (ctlr->caps & AHCI_CAP_NPMASK) + 1, ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?"))), (ctlr->caps & AHCI_CAP_SPM) ? "supported" : "not supported", (ctlr->caps & AHCI_CAP_FBSS) ? " with FBS" : ""); if (ctlr->quirks != 0) { device_printf(dev, "quirks=0x%b\n", ctlr->quirks, AHCI_Q_BIT_STRING); } if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?")))); printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", (ctlr->caps & AHCI_CAP_NPMASK) + 1); } if (bootverbose && version >= 0x00010200) { device_printf(dev, "Caps2:%s%s%s%s%s%s\n", (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "ahcich", -1); if (child == NULL) { device_printf(dev, "failed to add channel device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)unit); if ((ctlr->ichannels & (1 << unit)) == 0) device_disable(child); } /* Attach any remapped NVME device */ for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { child = device_add_child(dev, "nvme", -1); if (child == NULL) { device_printf(dev, "failed to add remapped NVMe device"); continue; } device_set_ivars(child, (void *)(intptr_t)(unit | AHCI_REMAPPED_UNIT)); } int em = (ctlr->caps & AHCI_CAP_EMS) != 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "em", &em); if (em) { child = device_add_child(dev, "ahciem", -1); if (child == NULL) device_printf(dev, "failed to add enclosure device\n"); else device_set_ivars(child, (void *)(intptr_t)AHCI_EM_UNIT); } bus_generic_attach(dev); return (0); } int ahci_detach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ for (i = 0; i < ctlr->numirqs; i++) { if (ctlr->irqs[i].r_irq) { bus_teardown_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); } } bus_dma_tag_destroy(ctlr->dma_tag); /* Free memory. */ rman_fini(&ctlr->sc_iomem); ahci_free_mem(dev); mtx_destroy(&ctlr->ch_mtx); return (0); } void ahci_free_mem(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Release memory resources */ if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); if (ctlr->r_msix_table) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_tab_rid, ctlr->r_msix_table); if (ctlr->r_msix_pba) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_pba_rid, ctlr->r_msix_pba); ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; } int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Check for single MSI vector fallback. */ if (ctlr->numirqs > 1 && (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { device_printf(dev, "Falling back to one MSI\n"); ctlr->numirqs = 1; } /* Ensure we don't overrun irqs. */ if (ctlr->numirqs > AHCI_MAX_IRQS) { device_printf(dev, "Too many irqs %d > %d (clamping)\n", ctlr->numirqs, AHCI_MAX_IRQS); ctlr->numirqs = AHCI_MAX_IRQS; } /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; else if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; else if (ctlr->channels > ctlr->numirqs && i == ctlr->numirqs - 1) ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; else ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : ahci_intr_one), &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return (ENXIO); } if (ctlr->numirqs > 1) { bus_describe_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle, ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? "ch%d" : "%d", i); } } return (0); } /* * Common case interrupt handler. */ static void ahci_intr(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; u_int32_t is, ise = 0; void *arg; int unit; if (irq->mode == AHCI_IRQ_MODE_ALL) { unit = 0; if (ctlr->ccc) is = ctlr->ichannels; else is = ATA_INL(ctlr->r_mem, AHCI_IS); } else { /* AHCI_IRQ_MODE_AFTER */ unit = irq->r_irq_rid - 1; is = ATA_INL(ctlr->r_mem, AHCI_IS); is &= (0xffffffff << unit); } /* CCC interrupt is edge triggered. */ if (ctlr->ccc) ise = 1 << ctlr->cccv; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ise |= is; if (ise != 0) ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); for (; unit < ctlr->channels; unit++) { if ((is & (1 << unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { if ((arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, is); ATA_RBL(ctlr->r_mem, AHCI_IS); } /* * Simplified interrupt handler for multivector MSI mode. */ static void ahci_intr_one(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); ATA_RBL(ctlr->r_mem, AHCI_IS); } static void ahci_intr_one_edge(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; /* Some controllers have edge triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); ATA_RBL(ctlr->r_mem, AHCI_IS); } struct resource * ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ahci_controller *ctlr = device_get_softc(dev); struct resource *res; rman_res_t st; int offset, size, unit; bool is_em, is_remapped; unit = (intptr_t)device_get_ivars(child); is_em = is_remapped = false; if (unit & AHCI_REMAPPED_UNIT) { unit &= AHCI_UNIT; unit -= ctlr->channels; is_remapped = true; } else if (unit & AHCI_EM_UNIT) { unit &= AHCI_UNIT; is_em = true; } res = NULL; switch (type) { case SYS_RES_MEMORY: if (is_remapped) { offset = ctlr->remap_offset + unit * ctlr->remap_size; size = ctlr->remap_size; } else if (!is_em) { offset = AHCI_OFFSET + (unit << 7); size = 128; } else if ((ctlr->caps & AHCI_CAP_EMS) == 0) { break; } else if (*rid == 0) { offset = AHCI_EM_CTL; size = 4; } else { offset = (ctlr->emloc & 0xffff0000) >> 14; size = (ctlr->emloc & 0x0000ffff) << 2; if (*rid != 1) { if (*rid == 2 && (ctlr->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) offset += size; else break; } } st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + size - 1, size, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 128, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irqs[0].r_irq; break; } return (res); } int -ahci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +ahci_release_resource(device_t dev, device_t child, struct resource *r) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: - if (rid != ATA_IRQ_RID) + if (rman_get_rid(r) != ATA_IRQ_RID) return (ENOENT); return (0); } return (EINVAL); } int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT; if (filter != NULL) { printf("ahci.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT; ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } int ahci_print_child(device_t dev, device_t child) { intptr_t ivars; int retval; retval = bus_print_child_header(dev, child); ivars = (intptr_t)device_get_ivars(child); if ((ivars & AHCI_EM_UNIT) == 0) retval += printf(" at channel %d", (int)ivars & AHCI_UNIT); retval += bus_print_child_footer(dev, child); return (retval); } int ahci_child_location(device_t dev, device_t child, struct sbuf *sb) { intptr_t ivars; ivars = (intptr_t)device_get_ivars(child); if ((ivars & AHCI_EM_UNIT) == 0) sbuf_printf(sb, "channel=%d", (int)ivars & AHCI_UNIT); return (0); } bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child) { struct ahci_controller *ctlr = device_get_softc(dev); return (ctlr->dma_tag); } void ahci_attached(device_t dev, struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(dev); mtx_lock(&ctlr->ch_mtx); ctlr->ch[ch->unit] = ch; mtx_unlock(&ctlr->ch_mtx); } void ahci_detached(device_t dev, struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(dev); mtx_lock(&ctlr->ch_mtx); mtx_lock(&ch->mtx); ctlr->ch[ch->unit] = NULL; mtx_unlock(&ch->mtx); mtx_unlock(&ctlr->ch_mtx); } struct ahci_channel * ahci_getch(device_t dev, int n) { struct ahci_controller *ctlr = device_get_softc(dev); struct ahci_channel *ch; KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n)); mtx_lock(&ctlr->ch_mtx); ch = ctlr->ch[n]; if (ch != NULL) mtx_lock(&ch->mtx); mtx_unlock(&ctlr->ch_mtx); return (ch); } void ahci_putch(struct ahci_channel *ch) { mtx_unlock(&ch->mtx); } static int ahci_ch_probe(device_t dev) { device_set_desc_copy(dev, "AHCI channel"); return (BUS_PROBE_DEFAULT); } static int ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS) { struct ahci_channel *ch; int error, value; ch = arg1; value = ch->disablephy; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL || (value != 0 && value != 1)) return (error); mtx_lock(&ch->mtx); ch->disablephy = value; if (value) { ahci_ch_deinit(ch->dev); } else { ahci_ch_init(ch->dev); ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED); } mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ahci_channel *ch = device_get_softc(dev); struct cam_devq *devq; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; int rid, error, i, sata_rev = 0; u_int32_t version; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->caps = ctlr->caps; ch->caps2 = ctlr->caps2; ch->start = ctlr->ch_start; ch->quirks = ctlr->quirks; ch->vendorid = ctlr->vendorid; ch->deviceid = ctlr->deviceid; ch->subvendorid = ctlr->subvendorid; ch->subdeviceid = ctlr->subdeviceid; ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); STAILQ_INIT(&ch->doneq); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); /* JMicron external ports (0) sometimes limited */ if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) sata_rev = 1; if (ch->quirks & AHCI_Q_SATA2) sata_rev = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = ch->numslots; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN; } rid = 0; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); version = ATA_INL(ctlr->r_mem, AHCI_VS); if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) ch->chcaps |= AHCI_P_CMD_FBSCP; if (ch->caps2 & AHCI_CAP2_SDS) ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s\n", (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); } ahci_dmainit(dev); ahci_slotsalloc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, ch, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(ch->numslots); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), (struct mtx *)&ch->mtx, (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, ahci_ch_pm, ch); } mtx_unlock(&ch->mtx); ahci_attached(device_get_parent(dev), ch); ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ch, 0, ahci_ch_disablephy_proc, "IU", "Disable PHY"); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int ahci_ch_detach(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); ahci_detached(device_get_parent(dev), ch); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ahci_ch_deinit(dev); ahci_slotsfree(dev); ahci_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int ahci_ch_init(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); uint64_t work; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Setup work areas */ work = ch->dma.work_bus + AHCI_CL_OFFSET; ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); work = ch->dma.rfis_bus; ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); /* Activate the channel and power/spin up device */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); ahci_start_fr(ch); ahci_start(ch, 1); return (0); } static int ahci_ch_deinit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset command register. */ ahci_stop(ch); ahci_stop_fr(ch); ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); /* Allow everything, including partial and slumber modes. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); /* Request slumber mode transition and give some time to get there. */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); DELAY(100); /* Disable PHY. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } static int ahci_ch_suspend(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); ahci_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_resume(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); ahci_reset(ch); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } static device_method_t ahcich_methods[] = { DEVMETHOD(device_probe, ahci_ch_probe), DEVMETHOD(device_attach, ahci_ch_attach), DEVMETHOD(device_detach, ahci_ch_detach), DEVMETHOD(device_suspend, ahci_ch_suspend), DEVMETHOD(device_resume, ahci_ch_resume), DEVMETHOD_END }; static driver_t ahcich_driver = { "ahcich", ahcich_methods, sizeof(struct ahci_channel) }; DRIVER_MODULE(ahcich, ahci, ahcich_driver, NULL, NULL); struct ahci_dc_cb_args { bus_addr_t maddr; int error; }; static void ahci_dmainit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_dc_cb_args dcba; size_t rfsize; int error; /* Command area. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag); if (error != 0) goto error; error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, BUS_DMA_ZERO, &ch->dma.work_map); if (error != 0) goto error; error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); if (error != 0 || (error = dcba.error) != 0) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* FIS receive area. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) rfsize = 4096; else rfsize = 256; error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag); if (error != 0) goto error; error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, &ch->dma.rfis_map); if (error != 0) goto error; error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); if (error != 0 || (error = dcba.error) != 0) { bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); goto error; } ch->dma.rfis_bus = dcba.maddr; /* Data area. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag); if (error != 0) goto error; return; error: device_printf(dev, "WARNING - DMA initialization failed, error %d\n", error); ahci_dmafini(dev); } static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void ahci_dmafini(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.rfis_bus) { bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); ch->dma.rfis_bus = 0; ch->dma.rfis = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void ahci_slotsalloc(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; slot->ch = ch; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void ahci_slotsfree(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) { if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); union ccb *ccb; if (bootverbose) { if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) device_printf(ch->dev, "CONNECT requested\n"); else device_printf(ch->dev, "DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return (0); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return (0); } xpt_rescan(ccb); return (1); } return (0); } static void ahci_cpd_check_events(struct ahci_channel *ch) { u_int32_t status; union ccb *ccb; device_t dev; if (ch->pm_level == 0) return; status = ATA_INL(ch->r_mem, AHCI_P_CMD); if ((status & AHCI_P_CMD_CPD) == 0) return; if (bootverbose) { dev = ch->dev; if (status & AHCI_P_CMD_CPS) { device_printf(dev, "COLD CONNECT requested\n"); } else device_printf(dev, "COLD DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void ahci_notify_events(struct ahci_channel *ch, u_int32_t status) { struct cam_path *dpath; int i; if (ch->caps & AHCI_CAP_SSNTF) ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); if (bootverbose) device_printf(ch->dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void ahci_done(struct ahci_channel *ch, union ccb *ccb) { mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || ch->batch == 0) { xpt_done(ccb); return; } STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); } static void ahci_ch_intr(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t istatus; /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ahci_ch_intr_main(ch, istatus); mtx_unlock(&ch->mtx); } static void ahci_ch_intr_direct(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; struct ccb_hdr *ccb_h; uint32_t istatus; STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ch->batch = 1; ahci_ch_intr_main(ch, istatus); ch->batch = 0; /* * Prevent the possibility of issues caused by processing the queue * while unlocked below by moving the contents to a local queue. */ STAILQ_CONCAT(&tmp_doneq, &ch->doneq); mtx_unlock(&ch->mtx); while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); xpt_done_direct((union ccb *)ccb_h); } } static void ahci_ch_pm(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t work; if (ch->numrslots != 0) return; work = ATA_INL(ch->r_mem, AHCI_P_CMD); if (ch->pm_level == 4) work |= AHCI_P_CMD_PARTIAL; else work |= AHCI_P_CMD_SLUMBER; ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); } static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) { uint32_t cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; /* Clear interrupt statuses. */ ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ if (ch->numtslots != 0) cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); else cstatus = 0; if (ch->numrslots != ch->numtslots) cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); /* Read SNTF in one of possible ways. */ if ((istatus & AHCI_P_IX_SDB) && (ch->pm_present || ch->curr[0].atapi != 0)) { if (ch->caps & AHCI_CAP_SSNTF) sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); else if (ch->fbs_enabled) { u_int8_t *fis = ch->dma.rfis + 0x58; for (i = 0; i < 16; i++) { if (fis[1] & 0x80) { fis[1] &= 0x7f; sntf |= 1 << i; } fis += 256; } } else { u_int8_t *fis = ch->dma.rfis + 0x58; if (fis[1] & 0x80) sntf = (1 << (fis[1] & 0x0f)); } } /* Process PHY events */ if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { serr = ATA_INL(ch->r_mem, AHCI_P_SERR); if (serr) { ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); reset = ahci_phy_check_events(ch, serr); } } /* Process cold presence detection events */ if ((istatus & AHCI_P_IX_CPD) && !reset) ahci_cpd_check_events(ch); /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { if (ch->quirks & AHCI_Q_NOCCS) { /* * ASMedia chips sometimes report failed commands as * completed. Count all running commands as failed. */ cstatus |= ch->rslots; /* They also report wrong CCS, so try to guess one. */ ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; } else { ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; } //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); port = -1; if (ch->fbs_enabled) { uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); if (fbs & AHCI_P_FBS_SDE) { port = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; } else { for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } } } err = ch->rslots & cstatus; } else { ccs = 0; err = 0; port = -1; } /* Complete all successful commands. */ ok = ch->rslots & ~cstatus; for (i = 0; i < ch->numslots; i++) { if ((ok >> i) & 1) ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); } /* On error, complete the rest of commands with error statuses. */ if (err) { if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: requests in loading state. */ if (((err >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (istatus & AHCI_P_IX_TFE) { if (port != -2) { /* Task File Error */ if (ch->numtslotspd[ ch->slot[i].ccb->ccb_h.target_id] == 0) { /* Untagged operation. */ if (i == ccs) et = AHCI_ERR_TFE; else et = AHCI_ERR_INNOCENT; } else { /* Tagged operation. */ et = AHCI_ERR_NCQ; } } else { et = AHCI_ERR_TFE; ch->fatalerr = 1; } } else if (istatus & AHCI_P_IX_IF) { if (ch->numtslots == 0 && i != ccs && port != -2) et = AHCI_ERR_INNOCENT; else et = AHCI_ERR_SATA; } else et = AHCI_ERR_INVALID; ahci_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); } /* Process NOTIFY events */ if (sntf) ahci_notify_events(ch, sntf); } /* Must be called with channel locked. */ static int ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) { int t = ccb->ccb_h.target_id; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0xffffffff >> (32 - ch->curr[t].tags))) == 0) return (1); /* If we have FBS */ if (ch->fbs_enabled) { /* Tagged command while untagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) return (1); } else { /* Tagged command while untagged are active. */ if (ch->numrslots != 0 && ch->numtslots == 0) return (1); /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } } else { /* If we have FBS */ if (ch->fbs_enabled) { /* Untagged command while tagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) return (1); } else { /* Untagged command while tagged are active. */ if (ch->numrslots != 0 && ch->numtslots != 0) return (1); } } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) { struct ahci_slot *slot; int tag, tags; /* Choose empty slot. */ tags = ch->numslots; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; if (ch->lastslot + 1 < tags) tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); else tag = 0; if (tag == 0 || tag + ch->lastslot >= tags) tag = ffs(~ch->oslots) - 1; else tag += ch->lastslot; ch->lastslot = tag; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << tag); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << tag); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = AHCI_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, ahci_dmasetprd, slot, 0); } else { slot->dma.nsegs = 0; ahci_execute_transaction(slot); } } /* Locked by busdma engine. */ static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_dma_prd *prd; int i; if (error) { device_printf(ch->dev, "DMA load error\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); } slot->dma.nsegs = nsegs; bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); ahci_execute_transaction(slot); } /* Must be called with channel locked. */ static void ahci_execute_transaction(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_cmd_list *clp; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int fis_size, i, softreset; uint8_t *fis = ch->dma.rfis + 0x40; uint8_t val; uint16_t cmd_flags; /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } /* Setup the command list entry */ clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); cmd_flags = (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | (ccb->ccb_h.func_code == XPT_SCSI_IO ? (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | (fis_size / sizeof(u_int32_t)) | (port << 12); clp->prd_length = htole16(slot->dma.nsegs); /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { if (ccb->ataio.cmd.control & ATA_A_RESET) { softreset = 1; /* Kick controller into sane state */ ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 0); cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; } else { softreset = 2; /* Prepare FIS receive area for check. */ for (i = 0; i < 20; i++) fis[i] = 0xff; } } else softreset = 0; clp->bytecount = 0; clp->cmd_flags = htole16(cmd_flags); clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); /* Set ACTIVE bit for NCQ commands. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); } /* If FBS is enabled, set PMP port. */ if (ch->fbs_enabled) { ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | (port << AHCI_P_FBS_DEV_SHIFT)); } /* Issue command to the controller. */ slot->state = AHCI_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); /* Device reset commands doesn't interrupt. Poll them. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { int count, timeout = ccb->ccb_h.timeout * 100; enum ahci_err_type et = AHCI_ERR_NONE; for (count = 0; count < timeout; count++) { DELAY(10); if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) break; if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && softreset != 1) { #if 0 device_printf(ch->dev, "Poll error on slot %d, TFD: %04x\n", slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); #endif et = AHCI_ERR_TFE; break; } /* Workaround for ATI SB600/SB700 chipsets. */ if (ccb->ccb_h.target_id == 15 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) && (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { et = AHCI_ERR_TIMEOUT; break; } } /* * Some Marvell controllers require additional time * after soft reset to work properly. Setup delay * to 50ms after soft reset. */ if (ch->quirks & AHCI_Q_MRVL_SR_DEL) DELAY(50000); /* * Marvell HBAs with non-RAID firmware do not wait for * readiness after soft reset, so we have to wait here. * Marvell RAIDs do not have this problem, but instead * sometimes forget to update FIS receive area, breaking * this wait. */ if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && softreset == 2 && et == AHCI_ERR_NONE) { for ( ; count < timeout; count++) { bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); val = fis[2]; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); if ((val & ATA_S_BUSY) == 0) break; DELAY(10); } } if (timeout && (count >= timeout)) { device_printf(ch->dev, "Poll timeout on slot %d port %d\n", slot->slot, port); device_printf(ch->dev, "is %08x cs %08x ss %08x " "rs %08x tfd %02x serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); et = AHCI_ERR_TIMEOUT; } /* Kick controller into sane state and enable FBS. */ if (softreset == 2) ch->eslots |= (1 << slot->slot); ahci_end_transaction(slot, et); return; } /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void ahci_process_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void ahci_rearm_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < AHCI_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void ahci_timeout(void *arg) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; device_t dev = ch->dev; uint32_t sstatus; int ccs; int i; /* Check for stale timeout. */ if (slot->state < AHCI_SLOT_RUNNING) return; /* Check if slot was not being executed last time we checked. */ if (slot->state < AHCI_SLOT_EXECUTING) { /* Check if slot started executing. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || ch->fbs_enabled || ch->wrongccs) slot->state = AHCI_SLOT_EXECUTING; else if ((ch->rslots & (1 << ccs)) == 0) { ch->wrongccs = 1; slot->state = AHCI_SLOT_EXECUTING; } callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); return; } device_printf(dev, "Timeout on slot %d port %d\n", slot->slot, slot->ccb->ccb_h.target_id & 0x0f); device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " "serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); /* Handle frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ ch->fatalerr = 1; /* Handle command with timeout. */ ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } } else { /* With FBS we wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) ahci_process_timeout(ch); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } } /* Must be called with channel locked. */ static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) { struct ahci_channel *ch = slot->ch; union ccb *ccb = slot->ccb; struct ahci_cmd_list *clp; int lastto; uint32_t sig; bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == AHCI_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { u_int8_t *fis = ch->dma.rfis + 0x40; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); if (ch->fbs_enabled) { fis += ccb->ccb_h.target_id * 256; res->status = fis[2]; res->error = fis[3]; } else { uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); res->status = tfd; res->error = tfd >> 8; } res->lba_low = fis[4]; res->lba_mid = fis[5]; res->lba_high = fis[6]; res->device = fis[7]; res->lba_low_exp = fis[8]; res->lba_mid_exp = fis[9]; res->lba_high_exp = fis[10]; res->sector_count = fis[12]; res->sector_count_exp = fis[13]; /* * Some weird controllers do not return signature in * FIS receive area. Read it from PxSIG register. */ if ((ch->quirks & AHCI_Q_ALTSIG) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { sig = ATA_INL(ch->r_mem, AHCI_P_SIG); res->lba_high = sig >> 24; res->lba_mid = sig >> 16; res->lba_low = sig >> 8; res->sector_count = sig; } } else bzero(res, sizeof(*res)); if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->ataio.resid = ccb->ataio.dxfer_len - le32toh(clp->bytecount); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->csio.resid = ccb->csio.dxfer_len - le32toh(clp->bytecount); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } if (et != AHCI_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case AHCI_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case AHCI_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case AHCI_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case AHCI_ERR_TFE: case AHCI_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case AHCI_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case AHCI_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != AHCI_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was first request of reset sequence and there is no error, * proceed to second request. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) && et == AHCI_ERR_NONE) { ccb->ataio.cmd.control &= ~ATA_A_RESET; ahci_begin_transaction(ch, ccb); return; } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { ahci_process_read_log(ch, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { ahci_process_request_sense(ch, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == AHCI_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else ahci_done(ch, ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { ahci_reset(ch); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 1); } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) ahci_issue_recovery(ch); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != AHCI_ERR_TIMEOUT) ahci_rearm_timeout(ch); /* Unfreeze frozen command. */ if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; ahci_begin_transaction(ch, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void ahci_issue_recovery(struct ahci_channel *ch) { union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(ch->dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } ahci_reset(ch); return; } xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path, ch->hold[i]->ccb_h.pinfo.priority); if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(ch->dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); ahci_begin_transaction(ch, ccb); } static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) { uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(ch->dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_AHCI); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) { int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_start(struct ahci_channel *ch, int fbs) { u_int32_t cmd; /* Run the channel start callback, if any. */ if (ch->start) ch->start(ch); /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); /* Clear any interrupts pending on this channel */ ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); /* Configure FIS-based switching if supported. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) { ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; ATA_OUTL(ch->r_mem, AHCI_P_FBS, ch->fbs_enabled ? AHCI_P_FBS_EN : 0); } /* Start operations on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd &= ~AHCI_P_CMD_PMA; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | (ch->pm_present ? AHCI_P_CMD_PMA : 0)); } static void ahci_stop(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all activity on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); /* Wait for activity stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); ch->eslots = 0; } static void ahci_clo(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Issue Command List Override if supported */ if (ch->caps & AHCI_CAP_SCLO) { cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd |= AHCI_P_CMD_CLO; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "executing CLO failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); } } static void ahci_stop_fr(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); /* Wait for FIS reception stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI FR engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); } static void ahci_start_fr(struct ahci_channel *ch) { u_int32_t cmd; /* Start FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); } static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0) { int timeout = 0; uint32_t val; while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & (ATA_S_BUSY | ATA_S_DRQ)) { if (timeout > t) { if (t != 0) { device_printf(ch->dev, "AHCI reset: device not ready after %dms " "(tfd = %08x)\n", MAX(t, 0) + t0, val); } return (EBUSY); } DELAY(1000); timeout++; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device ready after %dms\n", timeout + t0); return (0); } static void ahci_reset_to(void *arg) { struct ahci_channel *ch = arg; if (ch->resetting == 0) return; ch->resetting--; if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, (310 - ch->resetting) * 100) == 0) { ch->resetting = 0; ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { ahci_clo(ch); ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void ahci_reset(struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(ch->dev, "AHCI reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(ch); for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->wrongccs = 0; ch->fatalerr = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset and reconnect PHY, */ if (!ahci_sata_phy_reset(ch)) { if (bootverbose) device_printf(ch->dev, "AHCI reset: device not found\n"); ch->devices = 0; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_PRC | AHCI_P_IX_PC)); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device found\n"); /* Wait for clearing busy status. */ if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { if (dumping) ahci_clo(ch); else ch->resetting = 310; } ch->devices = 1; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_TFE | AHCI_P_IX_HBF | AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); else { ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); } } static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) { u_int8_t *fis = &ctp->cfis[0]; bzero(fis, 20); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->acmd, ccb->csio.cdb_len); bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; fis[12] = ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] &= 0x07; fis[12] |= tag << 3; } fis[13] = ccb->ataio.cmd.sector_count_exp; if (ccb->ataio.ata_flags & ATA_FLAG_ICC) fis[14] = ccb->ataio.icc; fis[15] = ATA_A_4BIT; if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } } else { fis[15] = ccb->ataio.cmd.control; } return (20); } static int ahci_sata_connect(struct ahci_channel *ch) { u_int32_t status; int timeout, timeoutslot, found = 0; /* * Wait for "connect well", up to 100ms by default and * up to 500ms for devices with the SLOWDEV quirk. */ timeoutslot = ((ch->quirks & AHCI_Q_SLOWDEV) ? 5000 : 1000); for (timeout = 0; timeout < timeoutslot; timeout++) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= timeoutslot || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); return (1); } static int ahci_sata_phy_reset(struct ahci_channel *ch) { int sata_rev; uint32_t val, detval; if (ch->listening) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val |= AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 0; } sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; detval = ahci_ch_detval(ch, ATA_SC_DET_RESET); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, detval | val | ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); DELAY(1000); detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, detval | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); if (!ahci_sata_connect(ch)) { if (ch->caps & AHCI_CAP_SSS) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val &= ~AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 1; } else if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } return (1); } static int ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) { if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; ahci_done(ch, ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; ahci_done(ch, ccb); return (-1); } return (0); } static void ahciaction(struct cam_sim *sim, union ccb *ccb) { struct ahci_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ahci_channel *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ahci_check_ids(ch, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (ahci_check_collision(ch, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } ahci_begin_transaction(ch, ccb); return; case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(ch->numslots, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; uint32_t status; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->caps2 & AHCI_CAP2_APST) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; } if ((ch->caps & AHCI_CAP_SNCQ) && (ch->quirks & AHCI_Q_NOAA) == 0) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ahci_reset(ch); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (ch->caps & AHCI_CAP_SNCQ) cpi->hba_inquiry |= PI_TAG_ABLE; if (ch->caps & AHCI_CAP_SPM) cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; if ((ch->quirks & AHCI_Q_NOAUX) == 0) cpi->hba_misc |= PIM_ATA_EXT; cpi->hba_eng_cnt = 0; if (ch->caps & AHCI_CAP_SPM) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ctob(AHCI_SG_ENTRIES - 1); /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (ch->quirks & AHCI_Q_MAXIO_64K) cpi->maxio = min(cpi->maxio, 128 * 512); cpi->hba_vendor = ch->vendorid; cpi->hba_device = ch->deviceid; cpi->hba_subvendor = ch->subvendorid; cpi->hba_subdevice = ch->subdeviceid; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ahci_done(ch, ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); uint32_t istatus; /* Read interrupt statuses and process if any. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus != 0) ahci_ch_intr_main(ch, istatus); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; ahci_reset_to(ch); } } MODULE_VERSION(ahci, 1); MODULE_DEPEND(ahci, cam, 1, 1, 1); diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h index 04d0cccc31f9..8b51b1e0b3ae 100644 --- a/sys/dev/ahci/ahci.h +++ b/sys/dev/ahci/ahci.h @@ -1,676 +1,675 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998 - 2008 Søren Schmidt * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* ATA register defines */ #define ATA_DATA 0 /* (RW) data */ #define ATA_FEATURE 1 /* (W) feature */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_COUNT 2 /* (W) sector count */ #define ATA_SECTOR 3 /* (RW) sector # */ #define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */ #define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */ #define ATA_DRIVE 6 /* (W) Sector/Drive/Head */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_COMMAND 7 /* (W) command */ #define ATA_ERROR 8 /* (R) error */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */ #define ATA_IREASON 9 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_STATUS 10 /* (R) status */ #define ATA_ALTSTAT 11 /* (R) alternate status */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_CONTROL 12 /* (W) control */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ #define ATA_A_HOB 0x80 /* High Order Byte enable */ /* SATA register defines */ #define ATA_SSTATUS 13 #define ATA_SS_DET_MASK 0x0000000f #define ATA_SS_DET_NO_DEVICE 0x00000000 #define ATA_SS_DET_DEV_PRESENT 0x00000001 #define ATA_SS_DET_PHY_ONLINE 0x00000003 #define ATA_SS_DET_PHY_OFFLINE 0x00000004 #define ATA_SS_SPD_MASK 0x000000f0 #define ATA_SS_SPD_NO_SPEED 0x00000000 #define ATA_SS_SPD_GEN1 0x00000010 #define ATA_SS_SPD_GEN2 0x00000020 #define ATA_SS_SPD_GEN3 0x00000030 #define ATA_SS_IPM_MASK 0x00000f00 #define ATA_SS_IPM_NO_DEVICE 0x00000000 #define ATA_SS_IPM_ACTIVE 0x00000100 #define ATA_SS_IPM_PARTIAL 0x00000200 #define ATA_SS_IPM_SLUMBER 0x00000600 #define ATA_SS_IPM_DEVSLEEP 0x00000800 #define ATA_SERROR 14 #define ATA_SE_DATA_CORRECTED 0x00000001 #define ATA_SE_COMM_CORRECTED 0x00000002 #define ATA_SE_DATA_ERR 0x00000100 #define ATA_SE_COMM_ERR 0x00000200 #define ATA_SE_PROT_ERR 0x00000400 #define ATA_SE_HOST_ERR 0x00000800 #define ATA_SE_PHY_CHANGED 0x00010000 #define ATA_SE_PHY_IERROR 0x00020000 #define ATA_SE_COMM_WAKE 0x00040000 #define ATA_SE_DECODE_ERR 0x00080000 #define ATA_SE_PARITY_ERR 0x00100000 #define ATA_SE_CRC_ERR 0x00200000 #define ATA_SE_HANDSHAKE_ERR 0x00400000 #define ATA_SE_LINKSEQ_ERR 0x00800000 #define ATA_SE_TRANSPORT_ERR 0x01000000 #define ATA_SE_UNKNOWN_FIS 0x02000000 #define ATA_SE_EXCHANGED 0x04000000 #define ATA_SCONTROL 15 #define ATA_SC_DET_MASK 0x0000000f #define ATA_SC_DET_IDLE 0x00000000 #define ATA_SC_DET_RESET 0x00000001 #define ATA_SC_DET_DISABLE 0x00000004 #define ATA_SC_SPD_MASK 0x000000f0 #define ATA_SC_SPD_NO_SPEED 0x00000000 #define ATA_SC_SPD_SPEED_GEN1 0x00000010 #define ATA_SC_SPD_SPEED_GEN2 0x00000020 #define ATA_SC_SPD_SPEED_GEN3 0x00000030 #define ATA_SC_IPM_MASK 0x00000f00 #define ATA_SC_IPM_NONE 0x00000000 #define ATA_SC_IPM_DIS_PARTIAL 0x00000100 #define ATA_SC_IPM_DIS_SLUMBER 0x00000200 #define ATA_SC_IPM_DIS_DEVSLEEP 0x00000400 #define ATA_SACTIVE 16 #define AHCI_MAX_PORTS 32 #define AHCI_MAX_SLOTS 32 #define AHCI_MAX_IRQS 16 /* SATA AHCI v1.0 register defines */ #define AHCI_CAP 0x00 #define AHCI_CAP_NPMASK 0x0000001f #define AHCI_CAP_SXS 0x00000020 #define AHCI_CAP_EMS 0x00000040 #define AHCI_CAP_CCCS 0x00000080 #define AHCI_CAP_NCS 0x00001F00 #define AHCI_CAP_NCS_SHIFT 8 #define AHCI_CAP_PSC 0x00002000 #define AHCI_CAP_SSC 0x00004000 #define AHCI_CAP_PMD 0x00008000 #define AHCI_CAP_FBSS 0x00010000 #define AHCI_CAP_SPM 0x00020000 #define AHCI_CAP_SAM 0x00080000 #define AHCI_CAP_ISS 0x00F00000 #define AHCI_CAP_ISS_SHIFT 20 #define AHCI_CAP_SCLO 0x01000000 #define AHCI_CAP_SAL 0x02000000 #define AHCI_CAP_SALP 0x04000000 #define AHCI_CAP_SSS 0x08000000 #define AHCI_CAP_SMPS 0x10000000 #define AHCI_CAP_SSNTF 0x20000000 #define AHCI_CAP_SNCQ 0x40000000 #define AHCI_CAP_64BIT 0x80000000 #define AHCI_GHC 0x04 #define AHCI_GHC_AE 0x80000000 #define AHCI_GHC_MRSM 0x00000004 #define AHCI_GHC_IE 0x00000002 #define AHCI_GHC_HR 0x00000001 #define AHCI_IS 0x08 #define AHCI_PI 0x0c #define AHCI_VS 0x10 #define AHCI_CCCC 0x14 #define AHCI_CCCC_TV_MASK 0xffff0000 #define AHCI_CCCC_TV_SHIFT 16 #define AHCI_CCCC_CC_MASK 0x0000ff00 #define AHCI_CCCC_CC_SHIFT 8 #define AHCI_CCCC_INT_MASK 0x000000f8 #define AHCI_CCCC_INT_SHIFT 3 #define AHCI_CCCC_EN 0x00000001 #define AHCI_CCCP 0x18 #define AHCI_EM_LOC 0x1C #define AHCI_EM_CTL 0x20 #define AHCI_EM_MR 0x00000001 #define AHCI_EM_TM 0x00000100 #define AHCI_EM_RST 0x00000200 #define AHCI_EM_LED 0x00010000 #define AHCI_EM_SAFTE 0x00020000 #define AHCI_EM_SES2 0x00040000 #define AHCI_EM_SGPIO 0x00080000 #define AHCI_EM_SMB 0x01000000 #define AHCI_EM_XMT 0x02000000 #define AHCI_EM_ALHD 0x04000000 #define AHCI_EM_PM 0x08000000 #define AHCI_CAP2 0x24 #define AHCI_CAP2_BOH 0x00000001 #define AHCI_CAP2_NVMP 0x00000002 #define AHCI_CAP2_APST 0x00000004 #define AHCI_CAP2_SDS 0x00000008 #define AHCI_CAP2_SADM 0x00000010 #define AHCI_CAP2_DESO 0x00000020 #define AHCI_BOHC 0x28 #define AHCI_BOHC_BOS 0x00000001 #define AHCI_BOHC_OOS 0x00000002 #define AHCI_BOHC_SOOE 0x00000004 #define AHCI_BOHC_OOC 0x00000008 #define AHCI_BOHC_BB 0x00000010 #define AHCI_VSCAP 0xa4 #define AHCI_OFFSET 0x100 #define AHCI_STEP 0x80 #define AHCI_P_CLB 0x00 #define AHCI_P_CLBU 0x04 #define AHCI_P_FB 0x08 #define AHCI_P_FBU 0x0c #define AHCI_P_IS 0x10 #define AHCI_P_IE 0x14 #define AHCI_P_IX_DHR 0x00000001 #define AHCI_P_IX_PS 0x00000002 #define AHCI_P_IX_DS 0x00000004 #define AHCI_P_IX_SDB 0x00000008 #define AHCI_P_IX_UF 0x00000010 #define AHCI_P_IX_DP 0x00000020 #define AHCI_P_IX_PC 0x00000040 #define AHCI_P_IX_MP 0x00000080 #define AHCI_P_IX_PRC 0x00400000 #define AHCI_P_IX_IPM 0x00800000 #define AHCI_P_IX_OF 0x01000000 #define AHCI_P_IX_INF 0x04000000 #define AHCI_P_IX_IF 0x08000000 #define AHCI_P_IX_HBD 0x10000000 #define AHCI_P_IX_HBF 0x20000000 #define AHCI_P_IX_TFE 0x40000000 #define AHCI_P_IX_CPD 0x80000000 #define AHCI_P_CMD 0x18 #define AHCI_P_CMD_ST 0x00000001 #define AHCI_P_CMD_SUD 0x00000002 #define AHCI_P_CMD_POD 0x00000004 #define AHCI_P_CMD_CLO 0x00000008 #define AHCI_P_CMD_FRE 0x00000010 #define AHCI_P_CMD_CCS_MASK 0x00001f00 #define AHCI_P_CMD_CCS_SHIFT 8 #define AHCI_P_CMD_ISS 0x00002000 #define AHCI_P_CMD_FR 0x00004000 #define AHCI_P_CMD_CR 0x00008000 #define AHCI_P_CMD_CPS 0x00010000 #define AHCI_P_CMD_PMA 0x00020000 #define AHCI_P_CMD_HPCP 0x00040000 #define AHCI_P_CMD_MPSP 0x00080000 #define AHCI_P_CMD_CPD 0x00100000 #define AHCI_P_CMD_ESP 0x00200000 #define AHCI_P_CMD_FBSCP 0x00400000 #define AHCI_P_CMD_APSTE 0x00800000 #define AHCI_P_CMD_ATAPI 0x01000000 #define AHCI_P_CMD_DLAE 0x02000000 #define AHCI_P_CMD_ALPE 0x04000000 #define AHCI_P_CMD_ASP 0x08000000 #define AHCI_P_CMD_ICC_MASK 0xf0000000 #define AHCI_P_CMD_NOOP 0x00000000 #define AHCI_P_CMD_ACTIVE 0x10000000 #define AHCI_P_CMD_PARTIAL 0x20000000 #define AHCI_P_CMD_SLUMBER 0x60000000 #define AHCI_P_CMD_DEVSLEEP 0x80000000 #define AHCI_P_TFD 0x20 #define AHCI_P_SIG 0x24 #define AHCI_P_SSTS 0x28 #define AHCI_P_SCTL 0x2c #define AHCI_P_SERR 0x30 #define AHCI_P_SACT 0x34 #define AHCI_P_CI 0x38 #define AHCI_P_SNTF 0x3C #define AHCI_P_FBS 0x40 #define AHCI_P_FBS_EN 0x00000001 #define AHCI_P_FBS_DEC 0x00000002 #define AHCI_P_FBS_SDE 0x00000004 #define AHCI_P_FBS_DEV 0x00000f00 #define AHCI_P_FBS_DEV_SHIFT 8 #define AHCI_P_FBS_ADO 0x0000f000 #define AHCI_P_FBS_ADO_SHIFT 12 #define AHCI_P_FBS_DWE 0x000f0000 #define AHCI_P_FBS_DWE_SHIFT 16 #define AHCI_P_DEVSLP 0x44 #define AHCI_P_DEVSLP_ADSE 0x00000001 #define AHCI_P_DEVSLP_DSP 0x00000002 #define AHCI_P_DEVSLP_DETO 0x000003fc #define AHCI_P_DEVSLP_DETO_SHIFT 2 #define AHCI_P_DEVSLP_MDAT 0x00007c00 #define AHCI_P_DEVSLP_MDAT_SHIFT 10 #define AHCI_P_DEVSLP_DITO 0x01ff8000 #define AHCI_P_DEVSLP_DITO_SHIFT 15 #define AHCI_P_DEVSLP_DM 0x0e000000 #define AHCI_P_DEVSLP_DM_SHIFT 25 /* Pessimistic prognosis on number of required S/G entries */ #define AHCI_SG_ENTRIES MIN(roundup(btoc(maxphys) + 1, 8), 65528) /* Command list. 32 commands. First, 1Kbyte aligned. */ #define AHCI_CL_OFFSET 0 #define AHCI_CL_SIZE 32 /* Command tables. Up to 32 commands, Each, 128byte aligned. */ #define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS) #define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16) /* Total main work area. */ #define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots) /* ivars value fields */ #define AHCI_REMAPPED_UNIT (1 << 31) /* NVMe remapped device. */ #define AHCI_EM_UNIT (1 << 30) /* Enclosure Mgmt device. */ #define AHCI_UNIT 0xff /* Channel number. */ struct ahci_dma_prd { u_int64_t dba; u_int32_t reserved; u_int32_t dbc; /* 0 based */ #define AHCI_PRD_MASK 0x003fffff /* max 4MB */ #define AHCI_PRD_MAX (AHCI_PRD_MASK + 1) #define AHCI_PRD_IPC (1U << 31) } __packed; struct ahci_cmd_tab { u_int8_t cfis[64]; u_int8_t acmd[32]; u_int8_t reserved[32]; struct ahci_dma_prd prd_tab[]; } __packed; struct ahci_cmd_list { u_int16_t cmd_flags; #define AHCI_CMD_ATAPI 0x0020 #define AHCI_CMD_WRITE 0x0040 #define AHCI_CMD_PREFETCH 0x0080 #define AHCI_CMD_RESET 0x0100 #define AHCI_CMD_BIST 0x0200 #define AHCI_CMD_CLR_BUSY 0x0400 u_int16_t prd_length; /* PRD entries */ u_int32_t bytecount; u_int64_t cmd_table_phys; /* 128byte aligned */ } __packed; /* misc defines */ #define ATA_IRQ_RID 0 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) struct ata_dmaslot { bus_dmamap_t data_map; /* data DMA map */ int nsegs; /* Number of segs loaded */ }; /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t work_tag; /* workspace DMA tag */ bus_dmamap_t work_map; /* workspace DMA map */ uint8_t *work; /* workspace */ bus_addr_t work_bus; /* bus address of work */ bus_dma_tag_t rfis_tag; /* RFIS list DMA tag */ bus_dmamap_t rfis_map; /* RFIS list DMA map */ uint8_t *rfis; /* FIS receive area */ bus_addr_t rfis_bus; /* bus address of rfis */ bus_dma_tag_t data_tag; /* data DMA tag */ }; enum ahci_slot_states { AHCI_SLOT_EMPTY, AHCI_SLOT_LOADING, AHCI_SLOT_RUNNING, AHCI_SLOT_EXECUTING }; struct ahci_slot { struct ahci_channel *ch; /* Channel */ u_int8_t slot; /* Number of this slot */ enum ahci_slot_states state; /* Slot state */ u_int ct_offset; /* cmd_tab offset */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ }; struct ahci_device { int revision; int mode; u_int bytecount; u_int atapi; u_int tags; u_int caps; }; struct ahci_led { device_t dev; /* Device handle */ struct cdev *led; uint8_t num; /* Number of this led */ uint8_t state; /* State of this led */ }; #define AHCI_NUM_LEDS 3 /* structure describing an ATA channel */ struct ahci_channel { device_t dev; /* Device handle */ int unit; /* Physical channel */ struct resource *r_mem; /* Memory of this channel */ struct resource *r_irq; /* Interrupt of this channel */ void *ih; /* Interrupt handle */ struct ata_dma dma; /* DMA data */ struct cam_sim *sim; struct cam_path *path; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t chcaps; /* Channel capabilities */ uint32_t chscaps; /* Channel sleep capabilities */ uint16_t vendorid; /* Vendor ID from the bus */ uint16_t deviceid; /* Device ID from the bus */ uint16_t subvendorid; /* Subvendor ID from the bus */ uint16_t subdeviceid; /* Subdevice ID from the bus */ int quirks; int numslots; /* Number of present slots */ int pm_level; /* power management level */ int devices; /* What is present */ int pm_present; /* PM presence reported */ int fbs_enabled; /* FIS-based switching enabled */ void (*start)(struct ahci_channel *); union ccb *hold[AHCI_MAX_SLOTS]; struct ahci_slot slot[AHCI_MAX_SLOTS]; uint32_t oslots; /* Occupied slots */ uint32_t rslots; /* Running slots */ uint32_t aslots; /* Slots with atomic commands */ uint32_t eslots; /* Slots in error */ uint32_t toslots; /* Slots in timeout */ int lastslot; /* Last used slot */ int taggedtarget; /* Last tagged target */ int numrslots; /* Number of running slots */ int numrslotspd[16];/* Number of running slots per dev */ int numtslots; /* Number of tagged slots */ int numtslotspd[16];/* Number of tagged slots per dev */ int numhslots; /* Number of held slots */ int recoverycmd; /* Our READ LOG active */ int fatalerr; /* Fatal error happened */ int resetting; /* Hard-reset in progress. */ int resetpolldiv; /* Hard-reset poll divider. */ int listening; /* SUD bit is cleared. */ int wrongccs; /* CCS field in CMD was wrong */ union ccb *frozen; /* Frozen command */ struct callout pm_timer; /* Power management events */ struct callout reset_timer; /* Hard-reset timeout */ struct ahci_device user[16]; /* User-specified settings */ struct ahci_device curr[16]; /* Current settings */ struct mtx_padalign mtx; /* state lock */ STAILQ_HEAD(, ccb_hdr) doneq; /* queue of completed CCBs */ int batch; /* doneq is in use */ int disablephy; /* keep PHY disabled */ }; struct ahci_enclosure { device_t dev; /* Device handle */ struct resource *r_memc; /* Control register */ struct resource *r_memt; /* Transmit buffer */ struct resource *r_memr; /* Receive buffer */ struct cam_sim *sim; struct cam_path *path; struct mtx mtx; /* state lock */ struct ahci_led leds[AHCI_MAX_PORTS * 3]; uint32_t capsem; /* Controller capabilities */ uint8_t status[AHCI_MAX_PORTS][4]; /* ArrayDev statuses */ int quirks; int channels; uint32_t ichannels; }; /* structure describing a AHCI controller */ struct ahci_controller { device_t dev; bus_dma_tag_t dma_tag; int r_rid; int r_msix_tab_rid; int r_msix_pba_rid; uint16_t vendorid; /* Vendor ID from the bus */ uint16_t deviceid; /* Device ID from the bus */ uint16_t subvendorid; /* Subvendor ID from the bus */ uint16_t subdeviceid; /* Subdevice ID from the bus */ struct resource *r_mem; struct resource *r_msix_table; struct resource *r_msix_pba; struct rman sc_iomem; struct ahci_controller_irq { struct ahci_controller *ctlr; struct resource *r_irq; void *handle; int r_irq_rid; int mode; #define AHCI_IRQ_MODE_ALL 0 #define AHCI_IRQ_MODE_AFTER 1 #define AHCI_IRQ_MODE_ONE 2 } irqs[AHCI_MAX_IRQS]; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t capsem; /* Controller capabilities */ uint32_t emloc; /* EM buffer location */ int quirks; int numirqs; int channels; uint32_t ichannels; int ccc; /* CCC timeout */ int cccv; /* CCC vector */ int direct; /* Direct command completion */ int msi; /* MSI interupts */ int remapped_devices; /* Remapped NVMe devices */ uint32_t remap_offset; uint32_t remap_size; struct { void (*function)(void *); void *argument; } interrupt[AHCI_MAX_PORTS]; void (*ch_start)(struct ahci_channel *); int dma_coherent; /* DMA is cache-coherent */ struct mtx ch_mtx; /* Lock for attached channels */ struct ahci_channel *ch[AHCI_MAX_PORTS]; /* Attached channels */ }; enum ahci_err_type { AHCI_ERR_NONE, /* No error */ AHCI_ERR_INVALID, /* Error detected by us before submitting. */ AHCI_ERR_INNOCENT, /* Innocent victim. */ AHCI_ERR_TFE, /* Task File Error. */ AHCI_ERR_SATA, /* SATA error. */ AHCI_ERR_TIMEOUT, /* Command execution timeout. */ AHCI_ERR_NCQ, /* NCQ command error. CCB should be put on hold * until READ LOG executed to reveal error. */ }; /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_read_1((res), (offset)) #define ATA_INW(res, offset) \ bus_read_2((res), (offset)) #define ATA_INL(res, offset) \ bus_read_4((res), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_read_multi_2((res), (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_read_multi_stream_2((res), (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_read_multi_4((res), (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_read_multi_stream_4((res), (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_write_1((res), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_write_2((res), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_write_4((res), (offset), (value)) #define ATA_OUTSW(res, offset, addr, count) \ bus_write_multi_2((res), (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_write_multi_stream_2((res), (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_write_multi_4((res), (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_write_multi_stream_4((res), (offset), (addr), (count)) /* * On some platforms, we must ensure proper interdevice write ordering. * The AHCI interrupt status register must be updated in HW before * registers in interrupt controller. * Unfortunately, only way how we can do it is readback. * * Currently, only ARM is known to have this issue. */ #if defined(__arm__) #define ATA_RBL(res, offset) \ bus_read_4((res), (offset)) #else #define ATA_RBL(res, offset) #endif #define AHCI_Q_NOFORCE 0x00000001 #define AHCI_Q_NOPMP 0x00000002 #define AHCI_Q_NONCQ 0x00000004 #define AHCI_Q_1CH 0x00000008 #define AHCI_Q_2CH 0x00000010 #define AHCI_Q_4CH 0x00000020 #define AHCI_Q_EDGEIS 0x00000040 #define AHCI_Q_SATA2 0x00000080 #define AHCI_Q_NOBSYRES 0x00000100 #define AHCI_Q_NOAA 0x00000200 #define AHCI_Q_NOCOUNT 0x00000400 #define AHCI_Q_ALTSIG 0x00000800 #define AHCI_Q_NOMSI 0x00001000 #define AHCI_Q_ATI_PMP_BUG 0x00002000 #define AHCI_Q_MAXIO_64K 0x00004000 #define AHCI_Q_SATA1_UNIT0 0x00008000 /* need better method for this */ #define AHCI_Q_ABAR0 0x00010000 #define AHCI_Q_1MSI 0x00020000 #define AHCI_Q_FORCE_PI 0x00040000 #define AHCI_Q_RESTORE_CAP 0x00080000 #define AHCI_Q_NOMSIX 0x00100000 #define AHCI_Q_MRVL_SR_DEL 0x00200000 #define AHCI_Q_NOCCS 0x00400000 #define AHCI_Q_NOAUX 0x00800000 #define AHCI_Q_IOMMU_BUSWIDE 0x01000000 #define AHCI_Q_SLOWDEV 0x02000000 #define AHCI_Q_BIT_STRING \ "\020" \ "\001NOFORCE" \ "\002NOPMP" \ "\003NONCQ" \ "\0041CH" \ "\0052CH" \ "\0064CH" \ "\007EDGEIS" \ "\010SATA2" \ "\011NOBSYRES" \ "\012NOAA" \ "\013NOCOUNT" \ "\014ALTSIG" \ "\015NOMSI" \ "\016ATI_PMP_BUG" \ "\017MAXIO_64K" \ "\020SATA1_UNIT0" \ "\021ABAR0" \ "\0221MSI" \ "\023FORCE_PI" \ "\024RESTORE_CAP" \ "\025NOMSIX" \ "\026MRVL_SR_DEL" \ "\027NOCCS" \ "\030NOAUX" \ "\031IOMMU_BUSWIDE" \ "\032SLOWDEV" int ahci_attach(device_t dev); int ahci_detach(device_t dev); int ahci_setup_interrupt(device_t dev); int ahci_print_child(device_t dev, device_t child); struct resource *ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int ahci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r); +int ahci_release_resource(device_t dev, device_t child, struct resource *r); int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep); int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int ahci_child_location(device_t dev, device_t child, struct sbuf *sb); bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child); int ahci_ctlr_reset(device_t dev); int ahci_ctlr_setup(device_t dev); void ahci_free_mem(device_t dev); /* Functions to allow AHCI EM to access other channels. */ void ahci_attached(device_t dev, struct ahci_channel *ch); void ahci_detached(device_t dev, struct ahci_channel *ch); struct ahci_channel * ahci_getch(device_t dev, int n); void ahci_putch(struct ahci_channel *ch); diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c index 3a2a0f640f62..9cc815150665 100644 --- a/sys/dev/ata/ata-pci.c +++ b/sys/dev/ata/ata-pci.c @@ -1,926 +1,922 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_ATAPCI, "ata_pci", "ATA driver PCI"); /* misc defines */ #define IOMASK 0xfffffffc /* * generic PCI ATA device probe */ int ata_pci_probe(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); char buffer[64]; /* is this a storage class device ? */ if (pci_get_class(dev) != PCIC_STORAGE) return (ENXIO); /* is this an IDE/ATA type device ? */ if (pci_get_subclass(dev) != PCIS_STORAGE_IDE) return (ENXIO); sprintf(buffer, "%s ATA controller", ata_pcivendor2str(dev)); device_set_desc_copy(dev, buffer); ctlr->chipinit = ata_generic_chipinit; /* we are a low priority handler */ return (BUS_PROBE_GENERIC); } int ata_pci_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_t child; u_int32_t cmd; int unit; /* do chipset specific setups only needed once */ ctlr->legacy = ata_legacy(dev); if (ctlr->legacy || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK) ctlr->channels = 2; else ctlr->channels = 1; ctlr->ichannels = -1; ctlr->ch_attach = ata_pci_ch_attach; ctlr->ch_detach = ata_pci_ch_detach; ctlr->dev = dev; /* if needed try to enable busmastering */ pci_enable_busmaster(dev); cmd = pci_read_config(dev, PCIR_COMMAND, 2); /* if busmastering mode "stuck" use it */ if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) { ctlr->r_type1 = SYS_RES_IOPORT; ctlr->r_rid1 = ATA_BMADDR_RID; ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE); } if (ctlr->chipinit(dev)) { if (ctlr->r_res1) bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); return ENXIO; } /* attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { if ((ctlr->ichannels & (1 << unit)) == 0) continue; child = device_add_child(dev, "ata", ((unit == 0 || unit == 1) && ctlr->legacy) ? unit : devclass_find_free_unit(ata_devclass, 2)); if (child == NULL) device_printf(dev, "failed to add ata child device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } int ata_pci_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* detach & delete all children */ device_delete_children(dev); if (ctlr->r_irq) { bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq); if (ctlr->r_irq_rid != ATA_IRQ_RID) pci_release_msi(dev); } if (ctlr->chipdeinit != NULL) ctlr->chipdeinit(dev); if (ctlr->r_res2) { bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); } if (ctlr->r_res1) { bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); } return 0; } int ata_pci_suspend(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int error = 0; bus_generic_suspend(dev); if (ctlr->suspend) error = ctlr->suspend(dev); return error; } int ata_pci_resume(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int error = 0; if (ctlr->resume) error = ctlr->resume(dev); bus_generic_resume(dev); return error; } int ata_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } int ata_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (BUS_WRITE_IVAR(device_get_parent(dev), dev, which, value)); } uint32_t ata_pci_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } void ata_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ata_pci_controller *controller = device_get_softc(dev); struct resource *res = NULL; if (device_get_devclass(child) == ata_devclass) { int unit = ((struct ata_channel *)device_get_softc(child))->unit; int myrid; if (type == SYS_RES_IOPORT) { switch (*rid) { case ATA_IOADDR_RID: if (controller->legacy) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY); count = ATA_IOSIZE; end = start + count - 1; } myrid = PCIR_BAR(0) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; case ATA_CTLADDR_RID: if (controller->legacy) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_CTLOFFSET; count = ATA_CTLIOSIZE; end = start + count - 1; } myrid = PCIR_BAR(1) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; } } if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) { if (controller->legacy) { int irq = (unit == 0 ? 14 : 15); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, irq, irq, 1, flags); } else res = controller->r_irq; } } else { if (type == SYS_RES_IRQ) { if (*rid != ATA_IRQ_RID) return (NULL); res = controller->r_irq; } else { res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags); } } return (res); } int -ata_pci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +ata_pci_release_resource(device_t dev, device_t child, struct resource *r) { + int rid = rman_get_rid(r); + int type = rman_get_type(r); if (device_get_devclass(child) == ata_devclass) { struct ata_pci_controller *controller = device_get_softc(dev); - int unit = ((struct ata_channel *)device_get_softc(child))->unit; if (type == SYS_RES_IOPORT) { switch (rid) { case ATA_IOADDR_RID: - return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, - SYS_RES_IOPORT, - PCIR_BAR(0) + (unit << 3), r); case ATA_CTLADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, - SYS_RES_IOPORT, - PCIR_BAR(1) + (unit << 3), r); + r); default: return ENOENT; } } if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return ENOENT; if (controller->legacy) { return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, - SYS_RES_IRQ, rid, r); + r); } else return 0; } } else { if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return (ENOENT); return (0); } else { return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, - type, rid, r)); + r)); } } return (EINVAL); } int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ata_pci_controller *controller = device_get_softc(dev); if (controller->legacy) { return BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, filter, function, argument, cookiep); } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit; if (filter != NULL) { printf("ata-pci.c: we cannot use a filter here\n"); return (EINVAL); } if (device_get_devclass(child) == ata_devclass) unit = ((struct ata_channel *)device_get_softc(child))->unit; else unit = ATA_PCI_MAX_CH - 1; controller->interrupt[unit].function = function; controller->interrupt[unit].argument = argument; *cookiep = controller; return 0; } } int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ata_pci_controller *controller = device_get_softc(dev); if (controller->legacy) { return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie); } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit; if (device_get_devclass(child) == ata_devclass) unit = ((struct ata_channel *)device_get_softc(child))->unit; else unit = ATA_PCI_MAX_CH - 1; controller->interrupt[unit].function = NULL; controller->interrupt[unit].argument = NULL; return 0; } } int ata_generic_setmode(device_t dev, int target, int mode) { return (min(mode, ATA_UDMA2)); } int ata_generic_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev, ata_generic_intr)) return ENXIO; ctlr->setmode = ata_generic_setmode; return 0; } int ata_pci_ch_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct resource *io = NULL, *ctlio = NULL; int i, rid; rid = ATA_IOADDR_RID; if (!(io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE))) return ENXIO; rid = ATA_CTLADDR_RID; if (!(ctlio = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,RF_ACTIVE))){ bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } ata_pci_dmainit(dev); for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = ctlr->legacy ? 0 : 2; ch->r_io[ATA_IDX_ADDR].res = io; ata_default_registers(dev); if (ctlr->r_res1) { for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = (i - ATA_BMCMD_PORT) + (ch->unit*ATA_BMIOSIZE); } } ata_pci_hw(dev); return 0; } int ata_pci_ch_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_pci_dmafini(dev); bus_release_resource(dev, SYS_RES_IOPORT, ATA_CTLADDR_RID, ch->r_io[ATA_CONTROL].res); bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, ch->r_io[ATA_IDX_ADDR].res); return (0); } int ata_pci_status(device_t dev) { struct ata_pci_controller *controller = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if ((dumping || !controller->legacy) && ((ch->flags & ATA_ALWAYS_DMASTAT) || (ch->dma.flags & ATA_DMA_ACTIVE))) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & ATA_BMSTAT_INTERRUPT) == 0) return 0; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { DELAY(100); if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) return 0; } return 1; } void ata_pci_hw(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_generic_hw(dev); ch->hw.status = ata_pci_status; } static int ata_pci_dmastart(struct ata_request *request) { struct ata_channel *ch = device_get_softc(request->parent); ATA_DEBUG_RQ(request, "dmastart"); ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, request->dma->sg_bus); ch->dma.flags |= ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, (ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_WRITE_READ) | ((request->flags & ATA_R_READ) ? ATA_BMCMD_WRITE_READ : 0)| ATA_BMCMD_START_STOP); return 0; } static int ata_pci_dmastop(struct ata_request *request) { struct ata_channel *ch = device_get_softc(request->parent); int error; ATA_DEBUG_RQ(request, "dmastop"); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma.flags &= ~ATA_DMA_ACTIVE; error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); return error; } static void ata_pci_dmareset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma.flags &= ~ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); if ((request = ch->running)) { device_printf(dev, "DMA reset calling unload\n"); ch->dma.unload(request); } } void ata_pci_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); ch->dma.start = ata_pci_dmastart; ch->dma.stop = ata_pci_dmastop; ch->dma.reset = ata_pci_dmareset; } void ata_pci_dmafini(device_t dev) { ata_dmafini(dev); } int ata_pci_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } int ata_pci_child_location(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t ata_pci_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t ata_pci_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pci_probe), DEVMETHOD(device_attach, ata_pci_attach), DEVMETHOD(device_detach, ata_pci_detach), DEVMETHOD(device_suspend, ata_pci_suspend), DEVMETHOD(device_resume, ata_pci_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus methods */ DEVMETHOD(bus_read_ivar, ata_pci_read_ivar), DEVMETHOD(bus_write_ivar, ata_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), DEVMETHOD(bus_release_resource, ata_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), DEVMETHOD(pci_read_config, ata_pci_read_config), DEVMETHOD(pci_write_config, ata_pci_write_config), DEVMETHOD(bus_print_child, ata_pci_print_child), DEVMETHOD(bus_child_location, ata_pci_child_location), DEVMETHOD(bus_get_dma_tag, ata_pci_get_dma_tag), DEVMETHOD_END }; static driver_t ata_pci_driver = { "atapci", ata_pci_methods, sizeof(struct ata_pci_controller), }; DRIVER_MODULE(atapci, pci, ata_pci_driver, NULL, NULL); MODULE_VERSION(atapci, 1); MODULE_DEPEND(atapci, ata, 1, 1, 1); static int ata_pcichannel_probe(device_t dev) { if ((intptr_t)device_get_ivars(dev) < 0) return (ENXIO); device_set_desc(dev, "ATA channel"); return ata_probe(dev); } static int ata_pcichannel_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (ch->attached) return (0); ch->attached = 1; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); if ((error = ctlr->ch_attach(dev))) return error; return ata_attach(dev); } static int ata_pcichannel_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); ch->attached = 0; if ((error = ata_detach(dev))) return error; if (ctlr->ch_detach) return (ctlr->ch_detach(dev)); return (0); } static int ata_pcichannel_suspend(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); if ((error = ata_suspend(dev))) return (error); if (ctlr->ch_suspend != NULL && (error = ctlr->ch_suspend(dev))) return (error); return (0); } static int ata_pcichannel_resume(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); if (ctlr->ch_resume != NULL && (error = ctlr->ch_resume(dev))) return (error); return ata_resume(dev); } static void ata_pcichannel_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* if DMA engine present reset it */ if (ch->dma.reset) ch->dma.reset(dev); /* reset the controller HW */ if (ctlr->reset) ctlr->reset(dev); else ata_generic_reset(dev); } static int ata_pcichannel_setmode(device_t dev, int target, int mode) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); if (ctlr->setmode) return (ctlr->setmode(dev, target, mode)); else return (ata_generic_setmode(dev, target, mode)); } static int ata_pcichannel_getrev(device_t dev, int target) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if (ch->flags & ATA_SATA) { if (ctlr->getrev) return (ctlr->getrev(dev, target)); else return (0xff); } else return (0); } static device_method_t ata_pcichannel_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pcichannel_probe), DEVMETHOD(device_attach, ata_pcichannel_attach), DEVMETHOD(device_detach, ata_pcichannel_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, ata_pcichannel_suspend), DEVMETHOD(device_resume, ata_pcichannel_resume), /* ATA methods */ DEVMETHOD(ata_setmode, ata_pcichannel_setmode), DEVMETHOD(ata_getrev, ata_pcichannel_getrev), DEVMETHOD(ata_reset, ata_pcichannel_reset), DEVMETHOD_END }; driver_t ata_pcichannel_driver = { "ata", ata_pcichannel_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, NULL, NULL); /* * misc support functions */ int ata_legacy(device_t dev) { return (((pci_read_config(dev, PCIR_SUBCLASS, 1) == PCIS_STORAGE_IDE) && (pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV)&& ((pci_read_config(dev, PCIR_PROGIF, 1) & (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) != (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(1), 4) && !pci_read_config(dev, PCIR_BAR(2), 4) && !pci_read_config(dev, PCIR_BAR(3), 4) && !pci_read_config(dev, PCIR_BAR(5), 4))); } void ata_generic_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; for (unit = 0; unit < ATA_PCI_MAX_CH; unit++) { if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } } int ata_setup_interrupt(device_t dev, void *intr_func) { struct ata_pci_controller *ctlr = device_get_softc(dev); int i, msi = 0; if (!ctlr->legacy) { if (resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &i) == 0 && i != 0) msi = 1; if (msi && pci_msi_count(dev) > 0 && pci_alloc_msi(dev, &msi) == 0) { ctlr->r_irq_rid = 0x1; } else { msi = 0; ctlr->r_irq_rid = ATA_IRQ_RID; } if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); if (msi) pci_release_msi(dev); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, NULL, intr_func, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq); if (msi) pci_release_msi(dev); return ENXIO; } } return 0; } void ata_set_desc(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); char buffer[128]; sprintf(buffer, "%s %s %s controller", ata_pcivendor2str(dev), ctlr->chip->text, ata_mode2str(ctlr->chip->max_dma)); device_set_desc_copy(dev, buffer); } const struct ata_chip_id * ata_match_chip(device_t dev, const struct ata_chip_id *index) { uint32_t devid; uint8_t revid; devid = pci_get_devid(dev); revid = pci_get_revid(dev); while (index->chipid != 0) { if (devid == index->chipid && revid >= index->chiprev) return (index); index++; } return (NULL); } const struct ata_chip_id * ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot) { const struct ata_chip_id *idx; device_t *children; int nchildren, i; uint8_t s; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return (NULL); for (i = 0; i < nchildren; i++) { s = pci_get_slot(children[i]); if ((slot >= 0 && s == slot) || (slot < 0 && s <= -slot)) { idx = ata_match_chip(children[i], index); if (idx != NULL) { free(children, M_TEMP); return (idx); } } } free(children, M_TEMP); return (NULL); } const char * ata_pcivendor2str(device_t dev) { switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: return "Acard"; case ATA_ACER_LABS_ID: return "AcerLabs"; case ATA_AMD_ID: return "AMD"; case ATA_ADAPTEC_ID: return "Adaptec"; case ATA_ATI_ID: return "ATI"; case ATA_CYRIX_ID: return "Cyrix"; case ATA_CYPRESS_ID: return "Cypress"; case ATA_HIGHPOINT_ID: return "HighPoint"; case ATA_INTEL_ID: return "Intel"; case ATA_ITE_ID: return "ITE"; case ATA_JMICRON_ID: return "JMicron"; case ATA_MARVELL_ID: return "Marvell"; case ATA_MARVELL2_ID: return "Marvell"; case ATA_NATIONAL_ID: return "National"; case ATA_NETCELL_ID: return "Netcell"; case ATA_NVIDIA_ID: return "nVidia"; case ATA_PROMISE_ID: return "Promise"; case ATA_SERVERWORKS_ID: return "ServerWorks"; case ATA_SILICON_IMAGE_ID: return "SiI"; case ATA_SIS_ID: return "SiS"; case ATA_VIA_ID: return "VIA"; case ATA_CENATEK_ID: return "Cenatek"; case ATA_MICRON_ID: return "Micron"; default: return "Generic"; } } int ata_mode2idx(int mode) { if ((mode & ATA_DMA_MASK) == ATA_UDMA0) return (mode & ATA_MODE_MASK) + 8; if ((mode & ATA_DMA_MASK) == ATA_WDMA0) return (mode & ATA_MODE_MASK) + 5; return (mode & ATA_MODE_MASK) - ATA_PIO0; } diff --git a/sys/dev/ata/ata-pci.h b/sys/dev/ata/ata-pci.h index 95e7dd113fec..cad9441a21ae 100644 --- a/sys/dev/ata/ata-pci.h +++ b/sys/dev/ata/ata-pci.h @@ -1,598 +1,598 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2003 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* structure holding chipset config info */ struct ata_chip_id { u_int32_t chipid; u_int8_t chiprev; int cfg1; int cfg2; u_int8_t max_dma; const char *text; }; #define ATA_PCI_MAX_CH 8 /* structure describing a PCI ATA controller */ struct ata_pci_controller { device_t dev; int r_type1; int r_rid1; struct resource *r_res1; int r_type2; int r_rid2; struct resource *r_res2; int r_irq_rid; struct resource *r_irq; void *handle; const struct ata_chip_id *chip; int legacy; int channels; int ichannels; int (*chipinit)(device_t); int (*chipdeinit)(device_t); int (*suspend)(device_t); int (*resume)(device_t); int (*ch_attach)(device_t); int (*ch_detach)(device_t); int (*ch_suspend)(device_t); int (*ch_resume)(device_t); void (*reset)(device_t); int (*setmode)(device_t, int, int); int (*getrev)(device_t, int); struct { void (*function)(void *); void *argument; } interrupt[ATA_PCI_MAX_CH]; void *chipset_data; }; /* defines for known chipset PCI id's */ #define ATA_ACARD_ID 0x1191 #define ATA_ATP850 0x00021191 #define ATA_ATP850A 0x00041191 #define ATA_ATP850R 0x00051191 #define ATA_ATP860A 0x00061191 #define ATA_ATP860R 0x00071191 #define ATA_ATP865A 0x00081191 #define ATA_ATP865R 0x00091191 #define ATA_ACER_LABS_ID 0x10b9 #define ATA_ALI_1533 0x153310b9 #define ATA_ALI_5228 0x522810b9 #define ATA_ALI_5229 0x522910b9 #define ATA_ALI_5281 0x528110b9 #define ATA_ALI_5287 0x528710b9 #define ATA_ALI_5288 0x528810b9 #define ATA_ALI_5289 0x528910b9 #define ATA_AMD_ID 0x1022 #define ATA_AMD755 0x74011022 #define ATA_AMD756 0x74091022 #define ATA_AMD766 0x74111022 #define ATA_AMD768 0x74411022 #define ATA_AMD8111 0x74691022 #define ATA_AMD5536 0x209a1022 #define ATA_AMD_HUDSON2_S1 0x78001022 #define ATA_AMD_HUDSON2_S2 0x78011022 #define ATA_AMD_HUDSON2_S3 0x78021022 #define ATA_AMD_HUDSON2_S4 0x78031022 #define ATA_AMD_HUDSON2_S5 0x78041022 #define ATA_AMD_HUDSON2 0x780c1022 #define ATA_ADAPTEC_ID 0x9005 #define ATA_ADAPTEC_1420 0x02419005 #define ATA_ADAPTEC_1430 0x02439005 #define ATA_ATI_ID 0x1002 #define ATA_ATI_IXP200 0x43491002 #define ATA_ATI_IXP300 0x43691002 #define ATA_ATI_IXP300_S1 0x436e1002 #define ATA_ATI_IXP400 0x43761002 #define ATA_ATI_IXP400_S1 0x43791002 #define ATA_ATI_IXP400_S2 0x437a1002 #define ATA_ATI_IXP600 0x438c1002 #define ATA_ATI_IXP600_S1 0x43801002 #define ATA_ATI_IXP700 0x439c1002 #define ATA_ATI_IXP700_S1 0x43901002 #define ATA_ATI_IXP700_S2 0x43911002 #define ATA_ATI_IXP700_S3 0x43921002 #define ATA_ATI_IXP700_S4 0x43931002 #define ATA_ATI_IXP800_S1 0x43941002 #define ATA_ATI_IXP800_S2 0x43951002 #define ATA_CENATEK_ID 0x16ca #define ATA_CENATEK_ROCKET 0x000116ca #define ATA_CYRIX_ID 0x1078 #define ATA_CYRIX_5530 0x01021078 #define ATA_CYPRESS_ID 0x1080 #define ATA_CYPRESS_82C693 0xc6931080 #define ATA_DEC_21150 0x00221011 #define ATA_DEC_21150_1 0x00231011 #define ATA_HIGHPOINT_ID 0x1103 #define ATA_HPT366 0x00041103 #define ATA_HPT372 0x00051103 #define ATA_HPT302 0x00061103 #define ATA_HPT371 0x00071103 #define ATA_HPT374 0x00081103 #define ATA_INTEL_ID 0x8086 #define ATA_I960RM 0x09628086 #define ATA_I82371FB 0x12308086 #define ATA_I82371SB 0x70108086 #define ATA_I82371AB 0x71118086 #define ATA_I82443MX 0x71998086 #define ATA_I82451NX 0x84ca8086 #define ATA_I82372FB 0x76018086 #define ATA_I82801AB 0x24218086 #define ATA_I82801AA 0x24118086 #define ATA_I82801BA 0x244a8086 #define ATA_I82801BA_1 0x244b8086 #define ATA_I82801CA 0x248a8086 #define ATA_I82801CA_1 0x248b8086 #define ATA_I82801DB 0x24cb8086 #define ATA_I82801DB_1 0x24ca8086 #define ATA_I82801EB 0x24db8086 #define ATA_I82801EB_S1 0x24d18086 #define ATA_I82801EB_R1 0x24df8086 #define ATA_I6300ESB 0x25a28086 #define ATA_I6300ESB_S1 0x25a38086 #define ATA_I6300ESB_R1 0x25b08086 #define ATA_I63XXESB2 0x269e8086 #define ATA_I63XXESB2_S1 0x26808086 #define ATA_I82801FB 0x266f8086 #define ATA_I82801FB_S1 0x26518086 #define ATA_I82801FB_R1 0x26528086 #define ATA_I82801FBM 0x26538086 #define ATA_I82801GB 0x27df8086 #define ATA_I82801GB_S1 0x27c08086 #define ATA_I82801GBM_S1 0x27c48086 #define ATA_I82801HB_S1 0x28208086 #define ATA_I82801HB_S2 0x28258086 #define ATA_I82801HBM 0x28508086 #define ATA_I82801HBM_S1 0x28288086 #define ATA_I82801IB_S1 0x29208086 #define ATA_I82801IB_S3 0x29218086 #define ATA_I82801IB_R1 0x29258086 #define ATA_I82801IB_S2 0x29268086 #define ATA_I82801IBM_S1 0x29288086 #define ATA_I82801IBM_S2 0x292d8086 #define ATA_I82801JIB_S1 0x3a208086 #define ATA_I82801JIB_S2 0x3a268086 #define ATA_I82801JD_S1 0x3a008086 #define ATA_I82801JD_S2 0x3a068086 #define ATA_I82801JI_S1 0x3a208086 #define ATA_I82801JI_S2 0x3a268086 #define ATA_IBP_S1 0x3b208086 #define ATA_IBP_S2 0x3b218086 #define ATA_IBP_S3 0x3b268086 #define ATA_IBP_S4 0x3b288086 #define ATA_IBP_S5 0x3b2d8086 #define ATA_IBP_S6 0x3b2e8086 #define ATA_CPT_S1 0x1c008086 #define ATA_CPT_S2 0x1c018086 #define ATA_CPT_S3 0x1c088086 #define ATA_CPT_S4 0x1c098086 #define ATA_PBG_S1 0x1d008086 #define ATA_PBG_S2 0x1d088086 #define ATA_PPT_S1 0x1e008086 #define ATA_PPT_S2 0x1e018086 #define ATA_PPT_S3 0x1e088086 #define ATA_PPT_S4 0x1e098086 #define ATA_AVOTON_S1 0x1f208086 #define ATA_AVOTON_S2 0x1f218086 #define ATA_AVOTON_S3 0x1f308086 #define ATA_AVOTON_S4 0x1f318086 #define ATA_LPT_S1 0x8c008086 #define ATA_LPT_S2 0x8c018086 #define ATA_LPT_S3 0x8c088086 #define ATA_LPT_S4 0x8c098086 #define ATA_WCPT_S1 0x8c808086 #define ATA_WCPT_S2 0x8c818086 #define ATA_WCPT_S3 0x8c888086 #define ATA_WCPT_S4 0x8c898086 #define ATA_WELLS_S1 0x8d008086 #define ATA_WELLS_S2 0x8d088086 #define ATA_WELLS_S3 0x8d608086 #define ATA_WELLS_S4 0x8d688086 #define ATA_LPTLP_S1 0x9c008086 #define ATA_LPTLP_S2 0x9c018086 #define ATA_LPTLP_S3 0x9c088086 #define ATA_LPTLP_S4 0x9c098086 #define ATA_I31244 0x32008086 #define ATA_ISCH 0x811a8086 #define ATA_COLETOCRK_S1 0x23a18086 #define ATA_COLETOCRK_S2 0x23a68086 #define ATA_IBT_S1 0x0f208086 #define ATA_IBT_S2 0x0f218086 #define ATA_ITE_ID 0x1283 #define ATA_IT8211F 0x82111283 #define ATA_IT8212F 0x82121283 #define ATA_IT8213F 0x82131283 #define ATA_JMICRON_ID 0x197b #define ATA_JMB360 0x2360197b #define ATA_JMB361 0x2361197b #define ATA_JMB362 0x2362197b #define ATA_JMB363 0x2363197b #define ATA_JMB365 0x2365197b #define ATA_JMB366 0x2366197b #define ATA_JMB368 0x2368197b #define ATA_JMB368_2 0x0368197b #define ATA_MARVELL_ID 0x11ab #define ATA_M88SE6101 0x610111ab #define ATA_M88SE6102 0x610211ab #define ATA_M88SE6111 0x611111ab #define ATA_M88SE6121 0x612111ab #define ATA_M88SE6141 0x614111ab #define ATA_M88SE6145 0x614511ab #define ATA_MARVELL2_ID 0x1b4b #define ATA_MICRON_ID 0x1042 #define ATA_MICRON_RZ1000 0x10001042 #define ATA_MICRON_RZ1001 0x10011042 #define ATA_NATIONAL_ID 0x100b #define ATA_SC1100 0x0502100b #define ATA_NETCELL_ID 0x169c #define ATA_NETCELL_SR 0x0044169c #define ATA_NVIDIA_ID 0x10de #define ATA_NFORCE1 0x01bc10de #define ATA_NFORCE2 0x006510de #define ATA_NFORCE2_PRO 0x008510de #define ATA_NFORCE2_PRO_S1 0x008e10de #define ATA_NFORCE3 0x00d510de #define ATA_NFORCE3_PRO 0x00e510de #define ATA_NFORCE3_PRO_S1 0x00e310de #define ATA_NFORCE3_PRO_S2 0x00ee10de #define ATA_NFORCE_MCP04 0x003510de #define ATA_NFORCE_MCP04_S1 0x003610de #define ATA_NFORCE_MCP04_S2 0x003e10de #define ATA_NFORCE_CK804 0x005310de #define ATA_NFORCE_CK804_S1 0x005410de #define ATA_NFORCE_CK804_S2 0x005510de #define ATA_NFORCE_MCP51 0x026510de #define ATA_NFORCE_MCP51_S1 0x026610de #define ATA_NFORCE_MCP51_S2 0x026710de #define ATA_NFORCE_MCP55 0x036e10de #define ATA_NFORCE_MCP55_S1 0x037e10de #define ATA_NFORCE_MCP55_S2 0x037f10de #define ATA_NFORCE_MCP61 0x03ec10de #define ATA_NFORCE_MCP61_S1 0x03e710de #define ATA_NFORCE_MCP61_S2 0x03f610de #define ATA_NFORCE_MCP61_S3 0x03f710de #define ATA_NFORCE_MCP65 0x044810de #define ATA_NFORCE_MCP65_A0 0x044c10de #define ATA_NFORCE_MCP65_A1 0x044d10de #define ATA_NFORCE_MCP65_A2 0x044e10de #define ATA_NFORCE_MCP65_A3 0x044f10de #define ATA_NFORCE_MCP65_A4 0x045c10de #define ATA_NFORCE_MCP65_A5 0x045d10de #define ATA_NFORCE_MCP65_A6 0x045e10de #define ATA_NFORCE_MCP65_A7 0x045f10de #define ATA_NFORCE_MCP67 0x056010de #define ATA_NFORCE_MCP67_A0 0x055010de #define ATA_NFORCE_MCP67_A1 0x055110de #define ATA_NFORCE_MCP67_A2 0x055210de #define ATA_NFORCE_MCP67_A3 0x055310de #define ATA_NFORCE_MCP67_A4 0x055410de #define ATA_NFORCE_MCP67_A5 0x055510de #define ATA_NFORCE_MCP67_A6 0x055610de #define ATA_NFORCE_MCP67_A7 0x055710de #define ATA_NFORCE_MCP67_A8 0x055810de #define ATA_NFORCE_MCP67_A9 0x055910de #define ATA_NFORCE_MCP67_AA 0x055A10de #define ATA_NFORCE_MCP67_AB 0x055B10de #define ATA_NFORCE_MCP67_AC 0x058410de #define ATA_NFORCE_MCP73 0x056c10de #define ATA_NFORCE_MCP73_A0 0x07f010de #define ATA_NFORCE_MCP73_A1 0x07f110de #define ATA_NFORCE_MCP73_A2 0x07f210de #define ATA_NFORCE_MCP73_A3 0x07f310de #define ATA_NFORCE_MCP73_A4 0x07f410de #define ATA_NFORCE_MCP73_A5 0x07f510de #define ATA_NFORCE_MCP73_A6 0x07f610de #define ATA_NFORCE_MCP73_A7 0x07f710de #define ATA_NFORCE_MCP73_A8 0x07f810de #define ATA_NFORCE_MCP73_A9 0x07f910de #define ATA_NFORCE_MCP73_AA 0x07fa10de #define ATA_NFORCE_MCP73_AB 0x07fb10de #define ATA_NFORCE_MCP77 0x075910de #define ATA_NFORCE_MCP77_A0 0x0ad010de #define ATA_NFORCE_MCP77_A1 0x0ad110de #define ATA_NFORCE_MCP77_A2 0x0ad210de #define ATA_NFORCE_MCP77_A3 0x0ad310de #define ATA_NFORCE_MCP77_A4 0x0ad410de #define ATA_NFORCE_MCP77_A5 0x0ad510de #define ATA_NFORCE_MCP77_A6 0x0ad610de #define ATA_NFORCE_MCP77_A7 0x0ad710de #define ATA_NFORCE_MCP77_A8 0x0ad810de #define ATA_NFORCE_MCP77_A9 0x0ad910de #define ATA_NFORCE_MCP77_AA 0x0ada10de #define ATA_NFORCE_MCP77_AB 0x0adb10de #define ATA_NFORCE_MCP79_A0 0x0ab410de #define ATA_NFORCE_MCP79_A1 0x0ab510de #define ATA_NFORCE_MCP79_A2 0x0ab610de #define ATA_NFORCE_MCP79_A3 0x0ab710de #define ATA_NFORCE_MCP79_A4 0x0ab810de #define ATA_NFORCE_MCP79_A5 0x0ab910de #define ATA_NFORCE_MCP79_A6 0x0aba10de #define ATA_NFORCE_MCP79_A7 0x0abb10de #define ATA_NFORCE_MCP79_A8 0x0abc10de #define ATA_NFORCE_MCP79_A9 0x0abd10de #define ATA_NFORCE_MCP79_AA 0x0abe10de #define ATA_NFORCE_MCP79_AB 0x0abf10de #define ATA_NFORCE_MCP89_A0 0x0d8410de #define ATA_NFORCE_MCP89_A1 0x0d8510de #define ATA_NFORCE_MCP89_A2 0x0d8610de #define ATA_NFORCE_MCP89_A3 0x0d8710de #define ATA_NFORCE_MCP89_A4 0x0d8810de #define ATA_NFORCE_MCP89_A5 0x0d8910de #define ATA_NFORCE_MCP89_A6 0x0d8a10de #define ATA_NFORCE_MCP89_A7 0x0d8b10de #define ATA_NFORCE_MCP89_A8 0x0d8c10de #define ATA_NFORCE_MCP89_A9 0x0d8d10de #define ATA_NFORCE_MCP89_AA 0x0d8e10de #define ATA_NFORCE_MCP89_AB 0x0d8f10de #define ATA_PROMISE_ID 0x105a #define ATA_PDC20246 0x4d33105a #define ATA_PDC20262 0x4d38105a #define ATA_PDC20263 0x0d38105a #define ATA_PDC20265 0x0d30105a #define ATA_PDC20267 0x4d30105a #define ATA_PDC20268 0x4d68105a #define ATA_PDC20269 0x4d69105a #define ATA_PDC20270 0x6268105a #define ATA_PDC20271 0x6269105a #define ATA_PDC20275 0x1275105a #define ATA_PDC20276 0x5275105a #define ATA_PDC20277 0x7275105a #define ATA_PDC20318 0x3318105a #define ATA_PDC20319 0x3319105a #define ATA_PDC20371 0x3371105a #define ATA_PDC20375 0x3375105a #define ATA_PDC20376 0x3376105a #define ATA_PDC20377 0x3377105a #define ATA_PDC20378 0x3373105a #define ATA_PDC20379 0x3372105a #define ATA_PDC20571 0x3571105a #define ATA_PDC20575 0x3d75105a #define ATA_PDC20579 0x3574105a #define ATA_PDC20771 0x3570105a #define ATA_PDC40518 0x3d18105a #define ATA_PDC40519 0x3519105a #define ATA_PDC40718 0x3d17105a #define ATA_PDC40719 0x3515105a #define ATA_PDC40775 0x3d73105a #define ATA_PDC40779 0x3577105a #define ATA_PDC20617 0x6617105a #define ATA_PDC20618 0x6626105a #define ATA_PDC20619 0x6629105a #define ATA_PDC20620 0x6620105a #define ATA_PDC20621 0x6621105a #define ATA_PDC20622 0x6622105a #define ATA_PDC20624 0x6624105a #define ATA_PDC81518 0x8002105a #define ATA_SERVERWORKS_ID 0x1166 #define ATA_ROSB4_ISA 0x02001166 #define ATA_ROSB4 0x02111166 #define ATA_CSB5 0x02121166 #define ATA_CSB6 0x02131166 #define ATA_CSB6_1 0x02171166 #define ATA_HT1000 0x02141166 #define ATA_HT1000_S1 0x024b1166 #define ATA_HT1000_S2 0x024a1166 #define ATA_K2 0x02401166 #define ATA_FRODO4 0x02411166 #define ATA_FRODO8 0x02421166 #define ATA_SILICON_IMAGE_ID 0x1095 #define ATA_SII3114 0x31141095 #define ATA_SII3512 0x35121095 #define ATA_SII3112 0x31121095 #define ATA_SII3112_1 0x02401095 #define ATA_SII0680 0x06801095 #define ATA_CMD646 0x06461095 #define ATA_CMD648 0x06481095 #define ATA_CMD649 0x06491095 #define ATA_SIS_ID 0x1039 #define ATA_SISSOUTH 0x00081039 #define ATA_SIS5511 0x55111039 #define ATA_SIS5513 0x55131039 #define ATA_SIS5517 0x55171039 #define ATA_SIS5518 0x55181039 #define ATA_SIS5571 0x55711039 #define ATA_SIS5591 0x55911039 #define ATA_SIS5596 0x55961039 #define ATA_SIS5597 0x55971039 #define ATA_SIS5598 0x55981039 #define ATA_SIS5600 0x56001039 #define ATA_SIS530 0x05301039 #define ATA_SIS540 0x05401039 #define ATA_SIS550 0x05501039 #define ATA_SIS620 0x06201039 #define ATA_SIS630 0x06301039 #define ATA_SIS635 0x06351039 #define ATA_SIS633 0x06331039 #define ATA_SIS640 0x06401039 #define ATA_SIS645 0x06451039 #define ATA_SIS646 0x06461039 #define ATA_SIS648 0x06481039 #define ATA_SIS650 0x06501039 #define ATA_SIS651 0x06511039 #define ATA_SIS652 0x06521039 #define ATA_SIS655 0x06551039 #define ATA_SIS658 0x06581039 #define ATA_SIS661 0x06611039 #define ATA_SIS730 0x07301039 #define ATA_SIS733 0x07331039 #define ATA_SIS735 0x07351039 #define ATA_SIS740 0x07401039 #define ATA_SIS745 0x07451039 #define ATA_SIS746 0x07461039 #define ATA_SIS748 0x07481039 #define ATA_SIS750 0x07501039 #define ATA_SIS751 0x07511039 #define ATA_SIS752 0x07521039 #define ATA_SIS755 0x07551039 #define ATA_SIS961 0x09611039 #define ATA_SIS962 0x09621039 #define ATA_SIS963 0x09631039 #define ATA_SIS964 0x09641039 #define ATA_SIS965 0x09651039 #define ATA_SIS180 0x01801039 #define ATA_SIS181 0x01811039 #define ATA_SIS182 0x01821039 #define ATA_VIA_ID 0x1106 #define ATA_VIA82C571 0x05711106 #define ATA_VIA82C586 0x05861106 #define ATA_VIA82C596 0x05961106 #define ATA_VIA82C686 0x06861106 #define ATA_VIA8231 0x82311106 #define ATA_VIA8233 0x30741106 #define ATA_VIA8233A 0x31471106 #define ATA_VIA8233C 0x31091106 #define ATA_VIA8235 0x31771106 #define ATA_VIA8237 0x32271106 #define ATA_VIA8237A 0x05911106 #define ATA_VIA8237S 0x53371106 #define ATA_VIA8237_5372 0x53721106 #define ATA_VIA8237_7372 0x73721106 #define ATA_VIA8251 0x33491106 #define ATA_VIA8361 0x31121106 #define ATA_VIA8363 0x03051106 #define ATA_VIA8371 0x03911106 #define ATA_VIA8662 0x31021106 #define ATA_VIA6410 0x31641106 #define ATA_VIA6420 0x31491106 #define ATA_VIA6421 0x32491106 #define ATA_VIACX700IDE 0x05811106 #define ATA_VIACX700 0x83241106 #define ATA_VIASATAIDE 0x53241106 #define ATA_VIAVX800 0x83531106 #define ATA_VIASATAIDE2 0xc4091106 #define ATA_VIAVX855 0x84091106 #define ATA_VIASATAIDE3 0x90011106 #define ATA_VIAVX900 0x84101106 /* global prototypes ata-pci.c */ int ata_pci_probe(device_t dev); int ata_pci_attach(device_t dev); int ata_pci_detach(device_t dev); int ata_pci_suspend(device_t dev); int ata_pci_resume(device_t dev); int ata_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int ata_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); uint32_t ata_pci_read_config(device_t dev, device_t child, int reg, int width); void ata_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width); int ata_pci_print_child(device_t dev, device_t child); int ata_pci_child_location(device_t dev, device_t child, struct sbuf *sb); struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); +int ata_pci_release_resource(device_t dev, device_t child, struct resource *r); int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep); int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int ata_pci_ch_attach(device_t dev); int ata_pci_ch_detach(device_t dev); int ata_pci_status(device_t dev); void ata_pci_hw(device_t dev); void ata_pci_dmainit(device_t dev); void ata_pci_dmafini(device_t dev); const char *ata_pcivendor2str(device_t dev); int ata_legacy(device_t); void ata_generic_intr(void *data); int ata_generic_chipinit(device_t dev); int ata_generic_setmode(device_t dev, int target, int mode); int ata_setup_interrupt(device_t dev, void *intr_func); void ata_set_desc(device_t dev); const struct ata_chip_id *ata_match_chip(device_t dev, const struct ata_chip_id *index); const struct ata_chip_id *ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot); int ata_mode2idx(int mode); /* global prototypes from chipsets/ata-*.c */ int ata_sii_chipinit(device_t); /* externs */ MALLOC_DECLARE(M_ATAPCI); /* macro for easy definition of all driver module stuff */ #define ATA_DECLARE_DRIVER(dname) \ static device_method_t __CONCAT(dname,_methods)[] = { \ DEVMETHOD(device_probe, __CONCAT(dname,_probe)), \ DEVMETHOD(device_attach, ata_pci_attach), \ DEVMETHOD(device_detach, ata_pci_detach), \ DEVMETHOD(device_suspend, ata_pci_suspend), \ DEVMETHOD(device_resume, ata_pci_resume), \ DEVMETHOD(device_shutdown, bus_generic_shutdown), \ DEVMETHOD(bus_read_ivar, ata_pci_read_ivar), \ DEVMETHOD(bus_write_ivar, ata_pci_write_ivar), \ DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), \ DEVMETHOD(bus_release_resource, ata_pci_release_resource), \ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), \ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), \ DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), \ DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), \ DEVMETHOD(pci_read_config, ata_pci_read_config), \ DEVMETHOD(pci_write_config, ata_pci_write_config), \ DEVMETHOD(bus_print_child, ata_pci_print_child), \ DEVMETHOD(bus_child_location, ata_pci_child_location), \ DEVMETHOD_END \ }; \ static driver_t __CONCAT(dname,_driver) = { \ "atapci", \ __CONCAT(dname,_methods), \ sizeof(struct ata_pci_controller) \ }; \ DRIVER_MODULE(dname, pci, __CONCAT(dname,_driver), NULL, NULL); \ MODULE_VERSION(dname, 1); \ MODULE_DEPEND(dname, ata, 1, 1, 1); \ MODULE_DEPEND(dname, atapci, 1, 1, 1); diff --git a/sys/dev/atkbdc/atkbdc_isa.c b/sys/dev/atkbdc/atkbdc_isa.c index cb42c8bda40c..2f7b9eceda94 100644 --- a/sys/dev/atkbdc/atkbdc_isa.c +++ b/sys/dev/atkbdc/atkbdc_isa.c @@ -1,322 +1,321 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_kbd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static int atkbdc_isa_probe(device_t dev); static int atkbdc_isa_attach(device_t dev); static device_t atkbdc_isa_add_child(device_t bus, u_int order, const char *name, int unit); static struct resource *atkbdc_isa_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int atkbdc_isa_release_resource(device_t dev, device_t child, - int type, int rid, struct resource *r); + struct resource *r); static device_method_t atkbdc_isa_methods[] = { DEVMETHOD(device_probe, atkbdc_isa_probe), DEVMETHOD(device_attach, atkbdc_isa_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, atkbdc_isa_add_child), DEVMETHOD(bus_print_child, atkbdc_print_child), DEVMETHOD(bus_read_ivar, atkbdc_read_ivar), DEVMETHOD(bus_write_ivar, atkbdc_write_ivar), DEVMETHOD(bus_get_resource_list,atkbdc_get_resource_list), DEVMETHOD(bus_alloc_resource, atkbdc_isa_alloc_resource), DEVMETHOD(bus_release_resource, atkbdc_isa_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t atkbdc_isa_driver = { ATKBDC_DRIVER_NAME, atkbdc_isa_methods, sizeof(atkbdc_softc_t *), }; static struct isa_pnp_id atkbdc_ids[] = { { 0x0303d041, "Keyboard controller (i8042)" }, /* PNP0303 */ { 0x0b03d041, "Keyboard controller (i8042)" }, /* PNP030B */ { 0x2003d041, "Keyboard controller (i8042)" }, /* PNP0320 */ { 0 } }; static int atkbdc_isa_probe(device_t dev) { struct resource *port0; struct resource *port1; rman_res_t start; rman_res_t count; int error; int rid; #if defined(__i386__) || defined(__amd64__) bus_space_tag_t tag; bus_space_handle_t ioh1; volatile int i; register_t flags; #endif /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Some brain-damaged ACPI BIOS has reversed * command/data port and status port. Furthermore, /boot/device.hints * may contain just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources * in correct order. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (start == IO_KBD + KBD_STATUS_PORT) { start = IO_KBD; count++; } if (count > 1) /* adjust the count and/or start port */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } #if defined(__i386__) || defined(__amd64__) /* * Check if we really have AT keyboard controller. Poll status * register until we get "all clear" indication. If no such * indication comes, it probably means that there is no AT * keyboard controller present. Give up in such case. Check relies * on the fact that reading from non-existing in/out port returns * 0xff on i386. May or may not be true on other platforms. */ tag = rman_get_bustag(port0); ioh1 = rman_get_bushandle(port1); flags = intr_disable(); for (i = 0; i != 65535; i++) { if ((bus_space_read_1(tag, ioh1, 0) & 0x2) == 0) break; } intr_restore(flags); if (i == 65535) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); if (bootverbose) device_printf(dev, "AT keyboard controller not found\n"); return ENXIO; } #endif device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; } static int atkbdc_isa_attach(device_t dev) { atkbdc_softc_t *sc; int unit; int error; int rid; unit = device_get_unit(dev); sc = *(atkbdc_softc_t **)device_get_softc(dev); if (sc == NULL) { /* * We have to maintain two copies of the kbdc_softc struct, * as the low-level console needs to have access to the * keyboard controller before kbdc is probed and attached. * kbdc_soft[] contains the default entry for that purpose. * See atkbdc.c. XXX */ sc = atkbdc_get_softc(unit); if (sc == NULL) return ENOMEM; } rid = 0; sc->retry = 5000; sc->port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port0 == NULL) return ENXIO; rid = 1; sc->port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); return ENXIO; } /* * If the device is not created by the PnP BIOS or ACPI, then * the hint for the IRQ is on the child atkbd device, not the * keyboard controller, so this can fail. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); error = atkbdc_attach_unit(unit, sc, sc->port0, sc->port1); if (error) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->port1); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); return error; } *(atkbdc_softc_t **)device_get_softc(dev) = sc; bus_generic_probe(dev); bus_generic_attach(dev); return 0; } static device_t atkbdc_isa_add_child(device_t bus, u_int order, const char *name, int unit) { atkbdc_device_t *ivar; atkbdc_softc_t *sc; device_t child; int t; sc = *(atkbdc_softc_t **)device_get_softc(bus); ivar = malloc(sizeof(struct atkbdc_device), M_ATKBDDEV, M_NOWAIT | M_ZERO); if (!ivar) return NULL; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { free(ivar, M_ATKBDDEV); return child; } resource_list_init(&ivar->resources); ivar->rid = order; /* * If the device is not created by the PnP BIOS or ACPI, refer * to device hints for IRQ. We always populate the resource * list entry so we can use a standard bus_get_resource() * method. */ if (order == KBDC_RID_KBD) { if (sc->irq == NULL) { if (resource_int_value(name, unit, "irq", &t) != 0) t = -1; } else t = rman_get_start(sc->irq); if (t > 0) resource_list_add(&ivar->resources, SYS_RES_IRQ, ivar->rid, t, t, 1); } if (resource_disabled(name, unit)) device_disable(child); device_set_ivars(child, ivar); return child; } struct resource * atkbdc_isa_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { atkbdc_softc_t *sc; sc = *(atkbdc_softc_t **)device_get_softc(dev); if (type == SYS_RES_IRQ && *rid == KBDC_RID_KBD && sc->irq != NULL) return (sc->irq); return (bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int -atkbdc_isa_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +atkbdc_isa_release_resource(device_t dev, device_t child, struct resource *r) { atkbdc_softc_t *sc; sc = *(atkbdc_softc_t **)device_get_softc(dev); - if (type == SYS_RES_IRQ && rid == KBDC_RID_KBD && r == sc->irq) + if (r == sc->irq) return (0); - return (bus_generic_rl_release_resource(dev, child, type, rid, r)); + return (bus_generic_rl_release_resource(dev, child, r)); } DRIVER_MODULE(atkbdc, isa, atkbdc_isa_driver, 0, 0); DRIVER_MODULE(atkbdc, acpi, atkbdc_isa_driver, 0, 0); ISA_PNP_INFO(atkbdc_ids); diff --git a/sys/dev/bhnd/bhnd_subr.c b/sys/dev/bhnd/bhnd_subr.c index 44f8ae9cc0a2..0d38c1ca8a24 100644 --- a/sys/dev/bhnd/bhnd_subr.c +++ b/sys/dev/bhnd/bhnd_subr.c @@ -1,2336 +1,2336 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #include #include #include #include #include #include #include #include #include "nvram/bhnd_nvram.h" #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhnd_nvram_map.h" #include "bhndreg.h" #include "bhndvar.h" #include "bhnd_private.h" static void bhnd_service_registry_free_entry( struct bhnd_service_entry *entry); static int compare_ascending_probe_order(const void *lhs, const void *rhs); static int compare_descending_probe_order(const void *lhs, const void *rhs); /* BHND core device description table. */ static const struct bhnd_core_desc { uint16_t vendor; uint16_t device; bhnd_devclass_t class; const char *desc; } bhnd_core_descs[] = { #define BHND_CDESC(_mfg, _cid, _cls, _desc) \ { BHND_MFGID_ ## _mfg, BHND_COREID_ ## _cid, \ BHND_DEVCLASS_ ## _cls, _desc } BHND_CDESC(BCM, CC, CC, "ChipCommon I/O Controller"), BHND_CDESC(BCM, ILINE20, OTHER, "iLine20 HPNA"), BHND_CDESC(BCM, SRAM, RAM, "SRAM"), BHND_CDESC(BCM, SDRAM, RAM, "SDRAM"), BHND_CDESC(BCM, PCI, PCI, "PCI Bridge"), BHND_CDESC(BCM, MIPS, CPU, "BMIPS CPU"), BHND_CDESC(BCM, ENET, ENET_MAC, "Fast Ethernet MAC"), BHND_CDESC(BCM, V90_CODEC, SOFTMODEM, "V.90 SoftModem Codec"), BHND_CDESC(BCM, USB, USB_DUAL, "USB 1.1 Device/Host Controller"), BHND_CDESC(BCM, ADSL, OTHER, "ADSL Core"), BHND_CDESC(BCM, ILINE100, OTHER, "iLine100 HPNA"), BHND_CDESC(BCM, IPSEC, OTHER, "IPsec Accelerator"), BHND_CDESC(BCM, UTOPIA, OTHER, "UTOPIA ATM Core"), BHND_CDESC(BCM, PCMCIA, PCCARD, "PCMCIA Bridge"), BHND_CDESC(BCM, SOCRAM, RAM, "Internal Memory"), BHND_CDESC(BCM, MEMC, MEMC, "MEMC SDRAM Controller"), BHND_CDESC(BCM, OFDM, OTHER, "OFDM PHY"), BHND_CDESC(BCM, EXTIF, OTHER, "External Interface"), BHND_CDESC(BCM, D11, WLAN, "802.11 MAC/PHY/Radio"), BHND_CDESC(BCM, APHY, WLAN_PHY, "802.11a PHY"), BHND_CDESC(BCM, BPHY, WLAN_PHY, "802.11b PHY"), BHND_CDESC(BCM, GPHY, WLAN_PHY, "802.11g PHY"), BHND_CDESC(BCM, MIPS33, CPU, "BMIPS33 CPU"), BHND_CDESC(BCM, USB11H, USB_HOST, "USB 1.1 Host Controller"), BHND_CDESC(BCM, USB11D, USB_DEV, "USB 1.1 Device Controller"), BHND_CDESC(BCM, USB20H, USB_HOST, "USB 2.0 Host Controller"), BHND_CDESC(BCM, USB20D, USB_DEV, "USB 2.0 Device Controller"), BHND_CDESC(BCM, SDIOH, OTHER, "SDIO Host Controller"), BHND_CDESC(BCM, ROBO, OTHER, "RoboSwitch"), BHND_CDESC(BCM, ATA100, OTHER, "Parallel ATA Controller"), BHND_CDESC(BCM, SATAXOR, OTHER, "SATA DMA/XOR Controller"), BHND_CDESC(BCM, GIGETH, ENET_MAC, "Gigabit Ethernet MAC"), BHND_CDESC(BCM, PCIE, PCIE, "PCIe Bridge"), BHND_CDESC(BCM, NPHY, WLAN_PHY, "802.11n 2x2 PHY"), BHND_CDESC(BCM, SRAMC, MEMC, "SRAM Controller"), BHND_CDESC(BCM, MINIMAC, OTHER, "MINI MAC/PHY"), BHND_CDESC(BCM, ARM11, CPU, "ARM1176 CPU"), BHND_CDESC(BCM, ARM7S, CPU, "ARM7TDMI-S CPU"), BHND_CDESC(BCM, LPPHY, WLAN_PHY, "802.11a/b/g PHY"), BHND_CDESC(BCM, PMU, PMU, "PMU"), BHND_CDESC(BCM, SSNPHY, WLAN_PHY, "802.11n Single-Stream PHY"), BHND_CDESC(BCM, SDIOD, OTHER, "SDIO Device Core"), BHND_CDESC(BCM, ARMCM3, CPU, "ARM Cortex-M3 CPU"), BHND_CDESC(BCM, HTPHY, WLAN_PHY, "802.11n 4x4 PHY"), BHND_CDESC(MIPS,MIPS74K, CPU, "MIPS74k CPU"), BHND_CDESC(BCM, GMAC, ENET_MAC, "Gigabit MAC core"), BHND_CDESC(BCM, DMEMC, MEMC, "DDR1/DDR2 Memory Controller"), BHND_CDESC(BCM, PCIERC, OTHER, "PCIe Root Complex"), BHND_CDESC(BCM, OCP, SOC_BRIDGE, "OCP to OCP Bridge"), BHND_CDESC(BCM, SC, OTHER, "Shared Common Core"), BHND_CDESC(BCM, AHB, SOC_BRIDGE, "OCP to AHB Bridge"), BHND_CDESC(BCM, SPIH, OTHER, "SPI Host Controller"), BHND_CDESC(BCM, I2S, OTHER, "I2S Digital Audio Interface"), BHND_CDESC(BCM, DMEMS, MEMC, "SDR/DDR1 Memory Controller"), BHND_CDESC(BCM, UBUS_SHIM, OTHER, "BCM6362/UBUS WLAN SHIM"), BHND_CDESC(BCM, PCIE2, PCIE, "PCIe Bridge (Gen2)"), BHND_CDESC(ARM, APB_BRIDGE, SOC_BRIDGE, "BP135 AMBA3 AXI to APB Bridge"), BHND_CDESC(ARM, PL301, SOC_ROUTER, "PL301 AMBA3 Interconnect"), BHND_CDESC(ARM, EROM, EROM, "PL366 Device Enumeration ROM"), BHND_CDESC(ARM, OOB_ROUTER, OTHER, "PL367 OOB Interrupt Router"), BHND_CDESC(ARM, AXI_UNMAPPED, OTHER, "Unmapped Address Ranges"), BHND_CDESC(BCM, 4706_CC, CC, "ChipCommon I/O Controller"), BHND_CDESC(BCM, NS_PCIE2, PCIE, "PCIe Bridge (Gen2)"), BHND_CDESC(BCM, NS_DMA, OTHER, "DMA engine"), BHND_CDESC(BCM, NS_SDIO, OTHER, "SDIO 3.0 Host Controller"), BHND_CDESC(BCM, NS_USB20H, USB_HOST, "USB 2.0 Host Controller"), BHND_CDESC(BCM, NS_USB30H, USB_HOST, "USB 3.0 Host Controller"), BHND_CDESC(BCM, NS_A9JTAG, OTHER, "ARM Cortex A9 JTAG Interface"), BHND_CDESC(BCM, NS_DDR23_MEMC, MEMC, "Denali DDR2/DD3 Memory Controller"), BHND_CDESC(BCM, NS_ROM, NVRAM, "System ROM"), BHND_CDESC(BCM, NS_NAND, NVRAM, "NAND Flash Controller"), BHND_CDESC(BCM, NS_QSPI, NVRAM, "QSPI Flash Controller"), BHND_CDESC(BCM, NS_CC_B, CC_B, "ChipCommon B Auxiliary I/O Controller"), BHND_CDESC(BCM, 4706_SOCRAM, RAM, "Internal Memory"), BHND_CDESC(BCM, IHOST_ARMCA9, CPU, "ARM Cortex A9 CPU"), BHND_CDESC(BCM, 4706_GMAC_CMN, ENET, "Gigabit MAC (Common)"), BHND_CDESC(BCM, 4706_GMAC, ENET_MAC, "Gigabit MAC"), BHND_CDESC(BCM, AMEMC, MEMC, "Denali DDR1/DDR2 Memory Controller"), #undef BHND_CDESC /* Derived from inspection of the BCM4331 cores that provide PrimeCell * IDs. Due to lack of documentation, the surmised device name/purpose * provided here may be incorrect. */ { BHND_MFGID_ARM, BHND_PRIMEID_EROM, BHND_DEVCLASS_OTHER, "PL364 Device Enumeration ROM" }, { BHND_MFGID_ARM, BHND_PRIMEID_SWRAP, BHND_DEVCLASS_OTHER, "PL368 Device Management Interface" }, { BHND_MFGID_ARM, BHND_PRIMEID_MWRAP, BHND_DEVCLASS_OTHER, "PL369 Device Management Interface" }, { 0, 0, 0, NULL } }; static const struct bhnd_device_quirk bhnd_chipc_clkctl_quirks[]; static const struct bhnd_device_quirk bhnd_pcmcia_clkctl_quirks[]; /** * Device table entries for core-specific CLKCTL quirk lookup. */ static const struct bhnd_device bhnd_clkctl_devices[] = { BHND_DEVICE(BCM, CC, NULL, bhnd_chipc_clkctl_quirks), BHND_DEVICE(BCM, PCMCIA, NULL, bhnd_pcmcia_clkctl_quirks), BHND_DEVICE_END, }; /** ChipCommon CLKCTL quirks */ static const struct bhnd_device_quirk bhnd_chipc_clkctl_quirks[] = { /* HTAVAIL/ALPAVAIL are bitswapped in chipc's CLKCTL */ BHND_CHIP_QUIRK(4328, HWREV_ANY, BHND_CLKCTL_QUIRK_CCS0), BHND_CHIP_QUIRK(5354, HWREV_ANY, BHND_CLKCTL_QUIRK_CCS0), BHND_DEVICE_QUIRK_END }; /** PCMCIA CLKCTL quirks */ static const struct bhnd_device_quirk bhnd_pcmcia_clkctl_quirks[] = { /* HTAVAIL/ALPAVAIL are bitswapped in pcmcia's CLKCTL */ BHND_CHIP_QUIRK(4328, HWREV_ANY, BHND_CLKCTL_QUIRK_CCS0), BHND_CHIP_QUIRK(5354, HWREV_ANY, BHND_CLKCTL_QUIRK_CCS0), BHND_DEVICE_QUIRK_END }; /** * Return the name for a given JEP106 manufacturer ID. * * @param vendor A JEP106 Manufacturer ID, including the non-standard ARM 4-bit * JEP106 continuation code. */ const char * bhnd_vendor_name(uint16_t vendor) { switch (vendor) { case BHND_MFGID_ARM: return "ARM"; case BHND_MFGID_BCM: return "Broadcom"; case BHND_MFGID_MIPS: return "MIPS"; default: return "unknown"; } } /** * Return the name of a port type. * * @param port_type The port type to look up. */ const char * bhnd_port_type_name(bhnd_port_type port_type) { switch (port_type) { case BHND_PORT_DEVICE: return ("device"); case BHND_PORT_BRIDGE: return ("bridge"); case BHND_PORT_AGENT: return ("agent"); default: return "unknown"; } } /** * Return the name of an NVRAM source. * * @param nvram_src The NVRAM source type to look up. */ const char * bhnd_nvram_src_name(bhnd_nvram_src nvram_src) { switch (nvram_src) { case BHND_NVRAM_SRC_FLASH: return ("flash"); case BHND_NVRAM_SRC_OTP: return ("OTP"); case BHND_NVRAM_SRC_SPROM: return ("SPROM"); case BHND_NVRAM_SRC_UNKNOWN: return ("none"); default: return ("unknown"); } } static const struct bhnd_core_desc * bhnd_find_core_desc(uint16_t vendor, uint16_t device) { for (u_int i = 0; bhnd_core_descs[i].desc != NULL; i++) { if (bhnd_core_descs[i].vendor != vendor) continue; if (bhnd_core_descs[i].device != device) continue; return (&bhnd_core_descs[i]); } return (NULL); } /** * Return a human-readable name for a BHND core. * * @param vendor The core designer's JEDEC-106 Manufacturer ID. * @param device The core identifier. */ const char * bhnd_find_core_name(uint16_t vendor, uint16_t device) { const struct bhnd_core_desc *desc; if ((desc = bhnd_find_core_desc(vendor, device)) == NULL) return ("unknown"); return desc->desc; } /** * Return the device class for a BHND core. * * @param vendor The core designer's JEDEC-106 Manufacturer ID. * @param device The core identifier. */ bhnd_devclass_t bhnd_find_core_class(uint16_t vendor, uint16_t device) { const struct bhnd_core_desc *desc; if ((desc = bhnd_find_core_desc(vendor, device)) == NULL) return (BHND_DEVCLASS_OTHER); return desc->class; } /** * Return a human-readable name for a BHND core. * * @param ci The core's info record. */ const char * bhnd_core_name(const struct bhnd_core_info *ci) { return bhnd_find_core_name(ci->vendor, ci->device); } /** * Return the device class for a BHND core. * * @param ci The core's info record. */ bhnd_devclass_t bhnd_core_class(const struct bhnd_core_info *ci) { return bhnd_find_core_class(ci->vendor, ci->device); } /** * Write a human readable name representation of the given * BHND_CHIPID_* constant to @p buffer. * * @param buffer Output buffer, or NULL to compute the required size. * @param size Capacity of @p buffer, in bytes. * @param chip_id Chip ID to be formatted. * * @return The required number of bytes on success, or a negative integer on * failure. No more than @p size-1 characters be written, with the @p size'th * set to '\0'. * * @sa BHND_CHIPID_MAX_NAMELEN */ int bhnd_format_chip_id(char *buffer, size_t size, uint16_t chip_id) { /* All hex formatted IDs are within the range of 0x4000-0x9C3F (40000-1) */ if (chip_id >= 0x4000 && chip_id <= 0x9C3F) return (snprintf(buffer, size, "BCM%hX", chip_id)); else return (snprintf(buffer, size, "BCM%hu", chip_id)); } /** * Return a core info record populated from a bhnd-attached @p dev. * * @param dev A bhnd device. * * @return A core info record for @p dev. */ struct bhnd_core_info bhnd_get_core_info(device_t dev) { return (struct bhnd_core_info) { .vendor = bhnd_get_vendor(dev), .device = bhnd_get_device(dev), .hwrev = bhnd_get_hwrev(dev), .core_idx = bhnd_get_core_index(dev), .unit = bhnd_get_core_unit(dev) }; } /** * Find a @p class child device with @p unit on @p bus. * * @param bus The bhnd-compatible bus to be searched. * @param class The device class to match on. * @param unit The core unit number; specify -1 to return the first match * regardless of unit number. * * @retval device_t if a matching child device is found. * @retval NULL if no matching child device is found. */ device_t bhnd_bus_find_child(device_t bus, bhnd_devclass_t class, int unit) { struct bhnd_core_match md = { BHND_MATCH_CORE_CLASS(class), BHND_MATCH_CORE_UNIT(unit) }; if (unit == -1) md.m.match.core_unit = 0; return bhnd_bus_match_child(bus, &md); } /** * Find the first child device on @p bus that matches @p desc. * * @param bus The bhnd-compatible bus to be searched. * @param desc A match descriptor. * * @retval device_t if a matching child device is found. * @retval NULL if no matching child device is found. */ device_t bhnd_bus_match_child(device_t bus, const struct bhnd_core_match *desc) { device_t *devlistp; device_t match; int devcnt; int error; error = device_get_children(bus, &devlistp, &devcnt); if (error != 0) return (NULL); match = NULL; for (int i = 0; i < devcnt; i++) { struct bhnd_core_info ci = bhnd_get_core_info(devlistp[i]); if (bhnd_core_matches(&ci, desc)) { match = devlistp[i]; goto done; } } done: free(devlistp, M_TEMP); return match; } /** * Retrieve an ordered list of all device instances currently connected to * @p bus, returning a pointer to the array in @p devlistp and the count * in @p ndevs. * * The memory allocated for the table must be freed via * bhnd_bus_free_children(). * * @param bus The bhnd-compatible bus to be queried. * @param[out] devlist The array of devices. * @param[out] devcount The number of devices in @p devlistp * @param order The order in which devices will be returned * in @p devlist. * * @retval 0 success * @retval non-zero if an error occurs, a regular unix error code will * be returned. */ int bhnd_bus_get_children(device_t bus, device_t **devlist, int *devcount, bhnd_device_order order) { int error; /* Fetch device array */ if ((error = device_get_children(bus, devlist, devcount))) return (error); /* Perform requested sorting */ if ((error = bhnd_sort_devices(*devlist, *devcount, order))) { bhnd_bus_free_children(*devlist); return (error); } return (0); } /** * Free any memory allocated in a previous call to bhnd_bus_get_children(). * * @param devlist The device array returned by bhnd_bus_get_children(). */ void bhnd_bus_free_children(device_t *devlist) { free(devlist, M_TEMP); } /** * Perform in-place sorting of an array of bhnd device instances. * * @param devlist An array of bhnd devices. * @param devcount The number of devices in @p devs. * @param order The sort order to be used. * * @retval 0 success * @retval EINVAL if the sort order is unknown. */ int bhnd_sort_devices(device_t *devlist, size_t devcount, bhnd_device_order order) { int (*compare)(const void *, const void *); switch (order) { case BHND_DEVICE_ORDER_ATTACH: compare = compare_ascending_probe_order; break; case BHND_DEVICE_ORDER_DETACH: compare = compare_descending_probe_order; break; default: printf("unknown sort order: %d\n", order); return (EINVAL); } qsort(devlist, devcount, sizeof(*devlist), compare); return (0); } /* * Ascending comparison of bhnd device's probe order. */ static int compare_ascending_probe_order(const void *lhs, const void *rhs) { device_t ldev, rdev; int lorder, rorder; ldev = (*(const device_t *) lhs); rdev = (*(const device_t *) rhs); lorder = BHND_BUS_GET_PROBE_ORDER(device_get_parent(ldev), ldev); rorder = BHND_BUS_GET_PROBE_ORDER(device_get_parent(rdev), rdev); if (lorder < rorder) { return (-1); } else if (lorder > rorder) { return (1); } else { return (0); } } /* * Descending comparison of bhnd device's probe order. */ static int compare_descending_probe_order(const void *lhs, const void *rhs) { return (compare_ascending_probe_order(rhs, lhs)); } /** * Call device_probe_and_attach() for each of the bhnd bus device's * children, in bhnd attach order. * * @param bus The bhnd-compatible bus for which all children should be probed * and attached. */ int bhnd_bus_probe_children(device_t bus) { device_t *devs; int ndevs; int error; /* Fetch children in attach order */ error = bhnd_bus_get_children(bus, &devs, &ndevs, BHND_DEVICE_ORDER_ATTACH); if (error) return (error); /* Probe and attach all children */ for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; device_probe_and_attach(child); } bhnd_bus_free_children(devs); return (0); } /** * Walk up the bhnd device hierarchy to locate the root device * to which the bhndb bridge is attached. * * This can be used from within bhnd host bridge drivers to locate the * actual upstream host device. * * @param dev A bhnd device. * @param bus_class The expected bus (e.g. "pci") to which the bridge root * should be attached. * * @retval device_t if a matching parent device is found. * @retval NULL if @p dev is not attached via a bhndb bus. * @retval NULL if no parent device is attached via @p bus_class. */ device_t bhnd_find_bridge_root(device_t dev, devclass_t bus_class) { devclass_t bhndb_class; device_t parent; KASSERT(device_get_devclass(device_get_parent(dev)) == devclass_find("bhnd"), ("%s not a bhnd device", device_get_nameunit(dev))); bhndb_class = devclass_find("bhndb"); /* Walk the device tree until we hit a bridge */ parent = dev; while ((parent = device_get_parent(parent)) != NULL) { if (device_get_devclass(parent) == bhndb_class) break; } /* No bridge? */ if (parent == NULL) return (NULL); /* Search for a parent attached to the expected bus class */ while ((parent = device_get_parent(parent)) != NULL) { device_t bus; bus = device_get_parent(parent); if (bus != NULL && device_get_devclass(bus) == bus_class) return (parent); } /* Not found */ return (NULL); } /** * Find the first core in @p cores that matches @p desc. * * @param cores The table to search. * @param num_cores The length of @p cores. * @param desc A match descriptor. * * @retval bhnd_core_info if a matching core is found. * @retval NULL if no matching core is found. */ const struct bhnd_core_info * bhnd_match_core(const struct bhnd_core_info *cores, u_int num_cores, const struct bhnd_core_match *desc) { for (u_int i = 0; i < num_cores; i++) { if (bhnd_core_matches(&cores[i], desc)) return &cores[i]; } return (NULL); } /** * Find the first core in @p cores with the given @p class. * * @param cores The table to search. * @param num_cores The length of @p cores. * @param class The device class to match on. * * @retval non-NULL if a matching core is found. * @retval NULL if no matching core is found. */ const struct bhnd_core_info * bhnd_find_core(const struct bhnd_core_info *cores, u_int num_cores, bhnd_devclass_t class) { struct bhnd_core_match md = { BHND_MATCH_CORE_CLASS(class) }; return bhnd_match_core(cores, num_cores, &md); } /** * Create an equality match descriptor for @p core. * * @param core The core info to be matched on. * * @return an equality match descriptor for @p core. */ struct bhnd_core_match bhnd_core_get_match_desc(const struct bhnd_core_info *core) { return ((struct bhnd_core_match) { BHND_MATCH_CORE_VENDOR(core->vendor), BHND_MATCH_CORE_ID(core->device), BHND_MATCH_CORE_REV(HWREV_EQ(core->hwrev)), BHND_MATCH_CORE_CLASS(bhnd_core_class(core)), BHND_MATCH_CORE_IDX(core->core_idx), BHND_MATCH_CORE_UNIT(core->unit) }); } /** * Return true if the @p lhs is equal to @p rhs. * * @param lhs The first bhnd core descriptor to compare. * @param rhs The second bhnd core descriptor to compare. * * @retval true if @p lhs is equal to @p rhs * @retval false if @p lhs is not equal to @p rhs */ bool bhnd_cores_equal(const struct bhnd_core_info *lhs, const struct bhnd_core_info *rhs) { struct bhnd_core_match md; /* Use an equality match descriptor to perform the comparison */ md = bhnd_core_get_match_desc(rhs); return (bhnd_core_matches(lhs, &md)); } /** * Return true if the @p core matches @p desc. * * @param core A bhnd core descriptor. * @param desc A match descriptor to compare against @p core. * * @retval true if @p core matches @p match. * @retval false if @p core does not match @p match. */ bool bhnd_core_matches(const struct bhnd_core_info *core, const struct bhnd_core_match *desc) { if (desc->m.match.core_vendor && desc->core_vendor != core->vendor) return (false); if (desc->m.match.core_id && desc->core_id != core->device) return (false); if (desc->m.match.core_unit && desc->core_unit != core->unit) return (false); if (desc->m.match.core_rev && !bhnd_hwrev_matches(core->hwrev, &desc->core_rev)) return (false); if (desc->m.match.core_idx && desc->core_idx != core->core_idx) return (false); if (desc->m.match.core_class && desc->core_class != bhnd_core_class(core)) return (false); return true; } /** * Return true if the @p chip matches @p desc. * * @param chip A bhnd chip identifier. * @param desc A match descriptor to compare against @p chip. * * @retval true if @p chip matches @p match. * @retval false if @p chip does not match @p match. */ bool bhnd_chip_matches(const struct bhnd_chipid *chip, const struct bhnd_chip_match *desc) { if (desc->m.match.chip_id && chip->chip_id != desc->chip_id) return (false); if (desc->m.match.chip_pkg && chip->chip_pkg != desc->chip_pkg) return (false); if (desc->m.match.chip_rev && !bhnd_hwrev_matches(chip->chip_rev, &desc->chip_rev)) return (false); if (desc->m.match.chip_type && chip->chip_type != desc->chip_type) return (false); return (true); } /** * Return true if the @p board matches @p desc. * * @param board The bhnd board info. * @param desc A match descriptor to compare against @p board. * * @retval true if @p chip matches @p match. * @retval false if @p chip does not match @p match. */ bool bhnd_board_matches(const struct bhnd_board_info *board, const struct bhnd_board_match *desc) { if (desc->m.match.board_srom_rev && !bhnd_hwrev_matches(board->board_srom_rev, &desc->board_srom_rev)) return (false); if (desc->m.match.board_vendor && board->board_vendor != desc->board_vendor) return (false); if (desc->m.match.board_type && board->board_type != desc->board_type) return (false); if (desc->m.match.board_devid && board->board_devid != desc->board_devid) return (false); if (desc->m.match.board_rev && !bhnd_hwrev_matches(board->board_rev, &desc->board_rev)) return (false); return (true); } /** * Return true if the @p hwrev matches @p desc. * * @param hwrev A bhnd hardware revision. * @param desc A match descriptor to compare against @p core. * * @retval true if @p hwrev matches @p match. * @retval false if @p hwrev does not match @p match. */ bool bhnd_hwrev_matches(uint16_t hwrev, const struct bhnd_hwrev_match *desc) { if (desc->start != BHND_HWREV_INVALID && desc->start > hwrev) return false; if (desc->end != BHND_HWREV_INVALID && desc->end < hwrev) return false; return true; } /** * Return true if the @p dev matches @p desc. * * @param dev A bhnd device. * @param desc A match descriptor to compare against @p dev. * * @retval true if @p dev matches @p match. * @retval false if @p dev does not match @p match. */ bool bhnd_device_matches(device_t dev, const struct bhnd_device_match *desc) { struct bhnd_core_info core; const struct bhnd_chipid *chip; struct bhnd_board_info board; device_t parent; int error; /* Construct individual match descriptors */ struct bhnd_core_match m_core = { _BHND_CORE_MATCH_COPY(desc) }; struct bhnd_chip_match m_chip = { _BHND_CHIP_MATCH_COPY(desc) }; struct bhnd_board_match m_board = { _BHND_BOARD_MATCH_COPY(desc) }; /* Fetch and match core info */ if (m_core.m.match_flags) { /* Only applicable to bhnd-attached cores */ parent = device_get_parent(dev); if (device_get_devclass(parent) != devclass_find("bhnd")) { device_printf(dev, "attempting to match core " "attributes against non-core device\n"); return (false); } core = bhnd_get_core_info(dev); if (!bhnd_core_matches(&core, &m_core)) return (false); } /* Fetch and match chip info */ if (m_chip.m.match_flags) { chip = bhnd_get_chipid(dev); if (!bhnd_chip_matches(chip, &m_chip)) return (false); } /* Fetch and match board info. * * This is not available until after NVRAM is up; earlier device * matches should not include board requirements */ if (m_board.m.match_flags) { if ((error = bhnd_read_board_info(dev, &board))) { device_printf(dev, "failed to read required board info " "during device matching: %d\n", error); return (false); } if (!bhnd_board_matches(&board, &m_board)) return (false); } /* All matched */ return (true); } /** * Search @p table for an entry matching @p dev. * * @param dev A bhnd device to match against @p table. * @param table The device table to search. * @param entry_size The @p table entry size, in bytes. * * @retval non-NULL the first matching device, if any. * @retval NULL if no matching device is found in @p table. */ const struct bhnd_device * bhnd_device_lookup(device_t dev, const struct bhnd_device *table, size_t entry_size) { const struct bhnd_device *entry; device_t hostb, parent; bhnd_attach_type attach_type; uint32_t dflags; parent = device_get_parent(dev); hostb = bhnd_bus_find_hostb_device(parent); attach_type = bhnd_get_attach_type(dev); for (entry = table; !BHND_DEVICE_IS_END(entry); entry = (const struct bhnd_device *) ((const char *) entry + entry_size)) { /* match core info */ if (!bhnd_device_matches(dev, &entry->core)) continue; /* match device flags */ dflags = entry->device_flags; /* hostb implies BHND_ATTACH_ADAPTER requirement */ if (dflags & BHND_DF_HOSTB) dflags |= BHND_DF_ADAPTER; if (dflags & BHND_DF_ADAPTER) if (attach_type != BHND_ATTACH_ADAPTER) continue; if (dflags & BHND_DF_HOSTB) if (dev != hostb) continue; if (dflags & BHND_DF_SOC) if (attach_type != BHND_ATTACH_NATIVE) continue; /* device found */ return (entry); } /* not found */ return (NULL); } /** * Scan the device @p table for all quirk flags applicable to @p dev. * * @param dev A bhnd device to match against @p table. * @param table The device table to search. * @param entry_size The @p table entry size, in bytes. * * @return all matching quirk flags. */ uint32_t bhnd_device_quirks(device_t dev, const struct bhnd_device *table, size_t entry_size) { const struct bhnd_device *dent; const struct bhnd_device_quirk *qent, *qtable; uint32_t quirks; /* Locate the device entry */ if ((dent = bhnd_device_lookup(dev, table, entry_size)) == NULL) return (0); /* Quirks table is optional */ qtable = dent->quirks_table; if (qtable == NULL) return (0); /* Collect matching device quirk entries */ quirks = 0; for (qent = qtable; !BHND_DEVICE_QUIRK_IS_END(qent); qent++) { if (bhnd_device_matches(dev, &qent->desc)) quirks |= qent->quirks; } return (quirks); } /** * Allocate bhnd(4) resources defined in @p rs from a parent bus. * * @param dev The device requesting ownership of the resources. * @param rs A standard bus resource specification. This will be updated * with the allocated resource's RIDs. * @param res On success, the allocated bhnd resources. * * @retval 0 success * @retval non-zero if allocation of any non-RF_OPTIONAL resource fails, * all allocated resources will be released and a regular * unix error code will be returned. */ int bhnd_alloc_resources(device_t dev, struct resource_spec *rs, struct bhnd_resource **res) { /* Initialize output array */ for (u_int i = 0; rs[i].type != -1; i++) res[i] = NULL; for (u_int i = 0; rs[i].type != -1; i++) { res[i] = bhnd_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); /* Clean up all allocations on failure */ if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bhnd_release_resources(dev, rs, res); return (ENXIO); } } return (0); } /** * Release bhnd(4) resources defined in @p rs from a parent bus. * * @param dev The device that owns the resources. * @param rs A standard bus resource specification previously initialized * by @p bhnd_alloc_resources. * @param res The bhnd resources to be released. */ void bhnd_release_resources(device_t dev, const struct resource_spec *rs, struct bhnd_resource **res) { for (u_int i = 0; rs[i].type != -1; i++) { if (res[i] == NULL) continue; bhnd_release_resource(dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * Allocate and return a new per-core PMU clock control/status (clkctl) * instance for @p dev. * * @param dev The bhnd(4) core device mapped by @p r. * @param pmu_dev The bhnd(4) PMU device, implmenting the bhnd_pmu_if * interface. The caller is responsible for ensuring that * this reference remains valid for the lifetime of the * returned clkctl instance. * @param r A resource mapping the core's clock control register * (see BHND_CLK_CTL_ST). The caller is responsible for * ensuring that this resource remains valid for the * lifetime of the returned clkctl instance. * @param offset The offset to the clock control register within @p r. * @param max_latency The PMU's maximum state transition latency in * microseconds; this upper bound will be used to busy-wait * on PMU state transitions. * * @retval non-NULL success * @retval NULL if allocation fails. * */ struct bhnd_core_clkctl * bhnd_alloc_core_clkctl(device_t dev, device_t pmu_dev, struct bhnd_resource *r, bus_size_t offset, u_int max_latency) { struct bhnd_core_clkctl *clkctl; clkctl = malloc(sizeof(*clkctl), M_BHND, M_ZERO | M_NOWAIT); if (clkctl == NULL) return (NULL); clkctl->cc_dev = dev; clkctl->cc_pmu_dev = pmu_dev; clkctl->cc_res = r; clkctl->cc_res_offset = offset; clkctl->cc_max_latency = max_latency; clkctl->cc_quirks = bhnd_device_quirks(dev, bhnd_clkctl_devices, sizeof(bhnd_clkctl_devices[0])); BHND_CLKCTL_LOCK_INIT(clkctl); return (clkctl); } /** * Free a clkctl instance previously allocated via bhnd_alloc_core_clkctl(). * * @param clkctl The clkctl instance to be freed. */ void bhnd_free_core_clkctl(struct bhnd_core_clkctl *clkctl) { BHND_CLKCTL_LOCK_DESTROY(clkctl); free(clkctl, M_BHND); } /** * Wait for the per-core clock status to be equal to @p value after * applying @p mask, timing out after the maximum transition latency is reached. * * @param clkctl Per-core clkctl state to be queryied. * @param value Value to wait for. * @param mask Mask to apply prior to value comparison. * * @retval 0 success * @retval ETIMEDOUT if the PMU's maximum transition delay is reached before * the clock status matches @p value and @p mask. */ int bhnd_core_clkctl_wait(struct bhnd_core_clkctl *clkctl, uint32_t value, uint32_t mask) { uint32_t clkst; BHND_CLKCTL_LOCK_ASSERT(clkctl, MA_OWNED); /* Bitswapped HTAVAIL/ALPAVAIL work-around */ if (clkctl->cc_quirks & BHND_CLKCTL_QUIRK_CCS0) { uint32_t fmask, fval; fmask = mask & ~(BHND_CCS_HTAVAIL | BHND_CCS_ALPAVAIL); fval = value & ~(BHND_CCS_HTAVAIL | BHND_CCS_ALPAVAIL); if (mask & BHND_CCS_HTAVAIL) fmask |= BHND_CCS0_HTAVAIL; if (value & BHND_CCS_HTAVAIL) fval |= BHND_CCS0_HTAVAIL; if (mask & BHND_CCS_ALPAVAIL) fmask |= BHND_CCS0_ALPAVAIL; if (value & BHND_CCS_ALPAVAIL) fval |= BHND_CCS0_ALPAVAIL; mask = fmask; value = fval; } for (u_int i = 0; i < clkctl->cc_max_latency; i += 10) { clkst = bhnd_bus_read_4(clkctl->cc_res, clkctl->cc_res_offset); if ((clkst & mask) == (value & mask)) return (0); DELAY(10); } device_printf(clkctl->cc_dev, "clkst wait timeout (value=%#x, " "mask=%#x)\n", value, mask); return (ETIMEDOUT); } /** * Read an NVRAM variable's NUL-terminated string value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] buf A buffer large enough to hold @p len bytes. On * success, the NUL-terminated string value will be * written to this buffer. This argment may be NULL if * the value is not desired. * @param len The maximum capacity of @p buf. * @param[out] rlen On success, will be set to the actual size of * the requested value (including NUL termination). This * argment may be NULL if the size is not desired. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval ENOMEM If @p buf is non-NULL and a buffer of @p len is too * small to hold the requested value. * @retval EFTYPE If the variable data cannot be coerced to a valid * string representation. * @retval ERANGE If value coercion would overflow @p type. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_str(device_t dev, const char *name, char *buf, size_t len, size_t *rlen) { size_t larg; int error; larg = len; error = bhnd_nvram_getvar(dev, name, buf, &larg, BHND_NVRAM_TYPE_STRING); if (rlen != NULL) *rlen = larg; return (error); } /** * Read an NVRAM variable's unsigned integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * @param width The output integer type width (1, 2, or * 4 bytes). * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid unsigned integer representation. * @retval ERANGE If value coercion would overflow (or underflow) an * unsigned representation of the given @p width. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_uint(device_t dev, const char *name, void *value, int width) { bhnd_nvram_type type; size_t len; switch (width) { case 1: type = BHND_NVRAM_TYPE_UINT8; break; case 2: type = BHND_NVRAM_TYPE_UINT16; break; case 4: type = BHND_NVRAM_TYPE_UINT32; break; default: device_printf(dev, "unsupported NVRAM integer width: %d\n", width); return (EINVAL); } len = width; return (bhnd_nvram_getvar(dev, name, value, &len, type)); } /** * Read an NVRAM variable's unsigned 8-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid unsigned integer representation. * @retval ERANGE If value coercion would overflow (or underflow) uint8_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_uint8(device_t dev, const char *name, uint8_t *value) { return (bhnd_nvram_getvar_uint(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's unsigned 16-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid unsigned integer representation. * @retval ERANGE If value coercion would overflow (or underflow) * uint16_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_uint16(device_t dev, const char *name, uint16_t *value) { return (bhnd_nvram_getvar_uint(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's unsigned 32-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid unsigned integer representation. * @retval ERANGE If value coercion would overflow (or underflow) * uint32_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_uint32(device_t dev, const char *name, uint32_t *value) { return (bhnd_nvram_getvar_uint(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's signed integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * @param width The output integer type width (1, 2, or * 4 bytes). * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid integer representation. * @retval ERANGE If value coercion would overflow (or underflow) an * signed representation of the given @p width. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_int(device_t dev, const char *name, void *value, int width) { bhnd_nvram_type type; size_t len; switch (width) { case 1: type = BHND_NVRAM_TYPE_INT8; break; case 2: type = BHND_NVRAM_TYPE_INT16; break; case 4: type = BHND_NVRAM_TYPE_INT32; break; default: device_printf(dev, "unsupported NVRAM integer width: %d\n", width); return (EINVAL); } len = width; return (bhnd_nvram_getvar(dev, name, value, &len, type)); } /** * Read an NVRAM variable's signed 8-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid integer representation. * @retval ERANGE If value coercion would overflow (or underflow) int8_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_int8(device_t dev, const char *name, int8_t *value) { return (bhnd_nvram_getvar_int(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's signed 16-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid integer representation. * @retval ERANGE If value coercion would overflow (or underflow) * int16_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_int16(device_t dev, const char *name, int16_t *value) { return (bhnd_nvram_getvar_int(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's signed 32-bit integer value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] value On success, the requested value will be written * to this pointer. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval EFTYPE If the variable data cannot be coerced to a * a valid integer representation. * @retval ERANGE If value coercion would overflow (or underflow) * int32_t. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_int32(device_t dev, const char *name, int32_t *value) { return (bhnd_nvram_getvar_int(dev, name, value, sizeof(*value))); } /** * Read an NVRAM variable's array value. * * @param dev A bhnd bus child device. * @param name The NVRAM variable name. * @param[out] buf A buffer large enough to hold @p size bytes. * On success, the requested value will be written * to this buffer. * @param[in,out] size The required number of bytes to write to * @p buf. * @param type The desired array element data representation. * * @retval 0 success * @retval ENOENT The requested variable was not found. * @retval ENODEV No valid NVRAM source could be found. * @retval ENXIO If less than @p size bytes are available. * @retval ENOMEM If a buffer of @p size is too small to hold the * requested value. * @retval EFTYPE If the variable data cannot be coerced to a * a valid instance of @p type. * @retval ERANGE If value coercion would overflow (or underflow) a * representation of @p type. * @retval non-zero If reading @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvram_getvar_array(device_t dev, const char *name, void *buf, size_t size, bhnd_nvram_type type) { size_t nbytes; int error; /* Attempt read */ nbytes = size; if ((error = bhnd_nvram_getvar(dev, name, buf, &nbytes, type))) return (error); /* Verify that the expected number of bytes were fetched */ if (nbytes < size) return (ENXIO); return (0); } /** * Initialize a service provider registry. * * @param bsr The service registry to initialize. * * @retval 0 success * @retval non-zero if an error occurs initializing the service registry, * a regular unix error code will be returned. */ int bhnd_service_registry_init(struct bhnd_service_registry *bsr) { STAILQ_INIT(&bsr->entries); mtx_init(&bsr->lock, "bhnd_service_registry lock", NULL, MTX_DEF); return (0); } /** * Release all resources held by @p bsr. * * @param bsr A service registry instance previously successfully * initialized via bhnd_service_registry_init(). * * @retval 0 success * @retval EBUSY if active references to service providers registered * with @p bsr exist. */ int bhnd_service_registry_fini(struct bhnd_service_registry *bsr) { struct bhnd_service_entry *entry, *enext; /* Remove everthing we can */ mtx_lock(&bsr->lock); STAILQ_FOREACH_SAFE(entry, &bsr->entries, link, enext) { if (entry->refs > 0) continue; STAILQ_REMOVE(&bsr->entries, entry, bhnd_service_entry, link); free(entry, M_BHND); } if (!STAILQ_EMPTY(&bsr->entries)) { mtx_unlock(&bsr->lock); return (EBUSY); } mtx_unlock(&bsr->lock); mtx_destroy(&bsr->lock); return (0); } /** * Register a @p provider for the given @p service. * * @param bsr Service registry to be modified. * @param provider Service provider to register. * @param service Service for which @p provider will be registered. * @param flags Service provider flags (see BHND_SPF_*). * * @retval 0 success * @retval EEXIST if an entry for @p service already exists. * @retval EINVAL if @p service is BHND_SERVICE_ANY. * @retval non-zero if registering @p provider otherwise fails, a regular * unix error code will be returned. */ int bhnd_service_registry_add(struct bhnd_service_registry *bsr, device_t provider, bhnd_service_t service, uint32_t flags) { struct bhnd_service_entry *entry; if (service == BHND_SERVICE_ANY) return (EINVAL); mtx_lock(&bsr->lock); /* Is a service provider already registered? */ STAILQ_FOREACH(entry, &bsr->entries, link) { if (entry->service == service) { mtx_unlock(&bsr->lock); return (EEXIST); } } /* Initialize and insert our new entry */ entry = malloc(sizeof(*entry), M_BHND, M_NOWAIT); if (entry == NULL) { mtx_unlock(&bsr->lock); return (ENOMEM); } entry->provider = provider; entry->service = service; entry->flags = flags; refcount_init(&entry->refs, 0); STAILQ_INSERT_HEAD(&bsr->entries, entry, link); mtx_unlock(&bsr->lock); return (0); } /** * Free an unreferenced registry entry. * * @param entry The entry to be deallocated. */ static void bhnd_service_registry_free_entry(struct bhnd_service_entry *entry) { KASSERT(entry->refs == 0, ("provider has active references")); free(entry, M_BHND); } /** * Attempt to remove the @p service provider registration for @p provider. * * @param bsr The service registry to be modified. * @param provider The service provider to be deregistered. * @param service The service for which @p provider will be deregistered, * or BHND_SERVICE_ANY to remove all service * registrations for @p provider. * * @retval 0 success * @retval EBUSY if active references to @p provider exist; see * bhnd_service_registry_retain() and * bhnd_service_registry_release(). */ int bhnd_service_registry_remove(struct bhnd_service_registry *bsr, device_t provider, bhnd_service_t service) { struct bhnd_service_entry *entry, *enext; mtx_lock(&bsr->lock); #define BHND_PROV_MATCH(_e) \ ((_e)->provider == provider && \ (service == BHND_SERVICE_ANY || (_e)->service == service)) /* Validate matching provider entries before making any * modifications */ STAILQ_FOREACH(entry, &bsr->entries, link) { /* Skip non-matching entries */ if (!BHND_PROV_MATCH(entry)) continue; /* Entry is in use? */ if (entry->refs > 0) { mtx_unlock(&bsr->lock); return (EBUSY); } } /* We can now safely remove matching entries */ STAILQ_FOREACH_SAFE(entry, &bsr->entries, link, enext) { /* Skip non-matching entries */ if (!BHND_PROV_MATCH(entry)) continue; /* Remove from list */ STAILQ_REMOVE(&bsr->entries, entry, bhnd_service_entry, link); /* Free provider entry */ bhnd_service_registry_free_entry(entry); } #undef BHND_PROV_MATCH mtx_unlock(&bsr->lock); return (0); } /** * Retain and return a reference to a registered @p service provider, if any. * * @param bsr The service registry to be queried. * @param service The service for which a provider should be returned. * * On success, the caller assumes ownership the returned provider, and * is responsible for releasing this reference via * bhnd_service_registry_release(). * * @retval device_t success * @retval NULL if no provider is registered for @p service. */ device_t bhnd_service_registry_retain(struct bhnd_service_registry *bsr, bhnd_service_t service) { struct bhnd_service_entry *entry; mtx_lock(&bsr->lock); STAILQ_FOREACH(entry, &bsr->entries, link) { if (entry->service != service) continue; /* With a live refcount, entry is gauranteed to remain alive * after we release our lock */ refcount_acquire(&entry->refs); mtx_unlock(&bsr->lock); return (entry->provider); } mtx_unlock(&bsr->lock); /* Not found */ return (NULL); } /** * Release a reference to a service provider previously returned by * bhnd_service_registry_retain(). * * If this is the last reference to an inherited service provider registration * (see BHND_SPF_INHERITED), the registration will also be removed, and * true will be returned. * * @param bsr The service registry from which @p provider * was returned. * @param provider The provider to be released. * @param service The service for which @p provider was previously * retained. * @retval true The inherited service provider registration was removed; * the caller should release its own reference to the * provider. * @retval false The service provider was not inherited, or active * references to the provider remain. * * @see BHND_SPF_INHERITED */ bool bhnd_service_registry_release(struct bhnd_service_registry *bsr, device_t provider, bhnd_service_t service) { struct bhnd_service_entry *entry; /* Exclusive lock, as we need to prevent any new references to the * entry from being taken if it's to be removed */ mtx_lock(&bsr->lock); STAILQ_FOREACH(entry, &bsr->entries, link) { bool removed; if (entry->provider != provider) continue; if (entry->service != service) continue; if (refcount_release(&entry->refs) && (entry->flags & BHND_SPF_INHERITED)) { /* If an inherited entry is no longer actively * referenced, remove the local registration and inform * the caller. */ STAILQ_REMOVE(&bsr->entries, entry, bhnd_service_entry, link); bhnd_service_registry_free_entry(entry); removed = true; } else { removed = false; } mtx_unlock(&bsr->lock); return (removed); } /* Caller owns a reference, but no such provider is registered? */ panic("invalid service provider reference"); } /** * Using the bhnd(4) bus-level core information and a custom core name, * populate @p dev's device description. * * @param dev A bhnd-bus attached device. * @param dev_name The core's name (e.g. "SDIO Device Core"). */ void bhnd_set_custom_core_desc(device_t dev, const char *dev_name) { const char *vendor_name; char *desc; vendor_name = bhnd_get_vendor_name(dev); asprintf(&desc, M_BHND, "%s %s, rev %hhu", vendor_name, dev_name, bhnd_get_hwrev(dev)); if (desc != NULL) { device_set_desc_copy(dev, desc); free(desc, M_BHND); } else { device_set_desc(dev, dev_name); } } /** * Using the bhnd(4) bus-level core information, populate @p dev's device * description. * * @param dev A bhnd-bus attached device. */ void bhnd_set_default_core_desc(device_t dev) { bhnd_set_custom_core_desc(dev, bhnd_get_device_name(dev)); } /** * Using the bhnd @p chip_id, populate the bhnd(4) bus @p dev's device * description. * * @param dev A bhnd-bus attached device. * @param chip_id The chip identification. */ void bhnd_set_default_bus_desc(device_t dev, const struct bhnd_chipid *chip_id) { const char *bus_name; char *desc; char chip_name[BHND_CHIPID_MAX_NAMELEN]; /* Determine chip type's bus name */ switch (chip_id->chip_type) { case BHND_CHIPTYPE_SIBA: bus_name = "SIBA bus"; break; case BHND_CHIPTYPE_BCMA: case BHND_CHIPTYPE_BCMA_ALT: bus_name = "BCMA bus"; break; case BHND_CHIPTYPE_UBUS: bus_name = "UBUS bus"; break; default: bus_name = "Unknown Type"; break; } /* Format chip name */ bhnd_format_chip_id(chip_name, sizeof(chip_name), chip_id->chip_id); /* Format and set device description */ asprintf(&desc, M_BHND, "%s %s", chip_name, bus_name); if (desc != NULL) { device_set_desc_copy(dev, desc); free(desc, M_BHND); } else { device_set_desc(dev, bus_name); } } /** * Helper function for implementing BHND_BUS_REGISTER_PROVIDER(). * * This implementation delegates the request to the BHND_BUS_REGISTER_PROVIDER() * method on the parent of @p dev. If no parent exists, the implementation * will return an error. */ int bhnd_bus_generic_register_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { device_t parent = device_get_parent(dev); if (parent != NULL) { return (BHND_BUS_REGISTER_PROVIDER(parent, child, provider, service)); } return (ENXIO); } /** * Helper function for implementing BHND_BUS_DEREGISTER_PROVIDER(). * * This implementation delegates the request to the * BHND_BUS_DEREGISTER_PROVIDER() method on the parent of @p dev. If no parent * exists, the implementation will panic. */ int bhnd_bus_generic_deregister_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { device_t parent = device_get_parent(dev); if (parent != NULL) { return (BHND_BUS_DEREGISTER_PROVIDER(parent, child, provider, service)); } panic("missing BHND_BUS_DEREGISTER_PROVIDER()"); } /** * Helper function for implementing BHND_BUS_RETAIN_PROVIDER(). * * This implementation delegates the request to the * BHND_BUS_DEREGISTER_PROVIDER() method on the parent of @p dev. If no parent * exists, the implementation will return NULL. */ device_t bhnd_bus_generic_retain_provider(device_t dev, device_t child, bhnd_service_t service) { device_t parent = device_get_parent(dev); if (parent != NULL) { return (BHND_BUS_RETAIN_PROVIDER(parent, child, service)); } return (NULL); } /** * Helper function for implementing BHND_BUS_RELEASE_PROVIDER(). * * This implementation delegates the request to the * BHND_BUS_DEREGISTER_PROVIDER() method on the parent of @p dev. If no parent * exists, the implementation will panic. */ void bhnd_bus_generic_release_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { device_t parent = device_get_parent(dev); if (parent != NULL) { return (BHND_BUS_RELEASE_PROVIDER(parent, child, provider, service)); } panic("missing BHND_BUS_RELEASE_PROVIDER()"); } /** * Helper function for implementing BHND_BUS_REGISTER_PROVIDER(). * * This implementation uses the bhnd_service_registry_add() function to * do most of the work. It calls BHND_BUS_GET_SERVICE_REGISTRY() to find * a suitable service registry to edit. */ int bhnd_bus_generic_sr_register_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { struct bhnd_service_registry *bsr; bsr = BHND_BUS_GET_SERVICE_REGISTRY(dev, child); KASSERT(bsr != NULL, ("NULL service registry")); return (bhnd_service_registry_add(bsr, provider, service, 0)); } /** * Helper function for implementing BHND_BUS_DEREGISTER_PROVIDER(). * * This implementation uses the bhnd_service_registry_remove() function to * do most of the work. It calls BHND_BUS_GET_SERVICE_REGISTRY() to find * a suitable service registry to edit. */ int bhnd_bus_generic_sr_deregister_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { struct bhnd_service_registry *bsr; bsr = BHND_BUS_GET_SERVICE_REGISTRY(dev, child); KASSERT(bsr != NULL, ("NULL service registry")); return (bhnd_service_registry_remove(bsr, provider, service)); } /** * Helper function for implementing BHND_BUS_RETAIN_PROVIDER(). * * This implementation uses the bhnd_service_registry_retain() function to * do most of the work. It calls BHND_BUS_GET_SERVICE_REGISTRY() to find * a suitable service registry. * * If a local provider for the service is not available, and a parent device is * available, this implementation will attempt to fetch and locally register * a service provider reference from the parent of @p dev. */ device_t bhnd_bus_generic_sr_retain_provider(device_t dev, device_t child, bhnd_service_t service) { struct bhnd_service_registry *bsr; device_t parent, provider; int error; bsr = BHND_BUS_GET_SERVICE_REGISTRY(dev, child); KASSERT(bsr != NULL, ("NULL service registry")); /* * Attempt to fetch a service provider reference from either the local * service registry, or if not found, from our parent. * * If we fetch a provider from our parent, we register the provider * with the local service registry to prevent conflicting local * registrations from being added. */ while (1) { /* Check the local service registry first */ provider = bhnd_service_registry_retain(bsr, service); if (provider != NULL) return (provider); /* Otherwise, try to delegate to our parent (if any) */ if ((parent = device_get_parent(dev)) == NULL) return (NULL); provider = BHND_BUS_RETAIN_PROVIDER(parent, dev, service); if (provider == NULL) return (NULL); /* Register the inherited service registration with the local * registry */ error = bhnd_service_registry_add(bsr, provider, service, BHND_SPF_INHERITED); if (error) { BHND_BUS_RELEASE_PROVIDER(parent, dev, provider, service); if (error == EEXIST) { /* A valid service provider was registered * concurrently; retry fetching from the local * registry */ continue; } device_printf(dev, "failed to register service " "provider: %d\n", error); return (NULL); } } } /** * Helper function for implementing BHND_BUS_RELEASE_PROVIDER(). * * This implementation uses the bhnd_service_registry_release() function to * do most of the work. It calls BHND_BUS_GET_SERVICE_REGISTRY() to find * a suitable service registry. */ void bhnd_bus_generic_sr_release_provider(device_t dev, device_t child, device_t provider, bhnd_service_t service) { struct bhnd_service_registry *bsr; bsr = BHND_BUS_GET_SERVICE_REGISTRY(dev, child); KASSERT(bsr != NULL, ("NULL service registry")); /* Release the provider reference; if the refcount hits zero on an * inherited reference, true will be returned, and we need to drop * our own bus reference to the provider */ if (!bhnd_service_registry_release(bsr, provider, service)) return; /* Drop our reference to the borrowed provider */ BHND_BUS_RELEASE_PROVIDER(device_get_parent(dev), dev, provider, service); } /** * Helper function for implementing BHND_BUS_IS_HW_DISABLED(). * * If a parent device is available, this implementation delegates the * request to the BHND_BUS_IS_HW_DISABLED() method on the parent of @p dev. * * If no parent device is available (i.e. on a the bus root), the hardware * is assumed to be usable and false is returned. */ bool bhnd_bus_generic_is_hw_disabled(device_t dev, device_t child) { if (device_get_parent(dev) != NULL) return (BHND_BUS_IS_HW_DISABLED(device_get_parent(dev), child)); return (false); } /** * Helper function for implementing BHND_BUS_GET_CHIPID(). * * This implementation delegates the request to the BHND_BUS_GET_CHIPID() * method on the parent of @p dev. If no parent exists, the implementation * will panic. */ const struct bhnd_chipid * bhnd_bus_generic_get_chipid(device_t dev, device_t child) { if (device_get_parent(dev) != NULL) return (BHND_BUS_GET_CHIPID(device_get_parent(dev), child)); panic("missing BHND_BUS_GET_CHIPID()"); } /** * Helper function for implementing BHND_BUS_GET_DMA_TRANSLATION(). * * If a parent device is available, this implementation delegates the * request to the BHND_BUS_GET_DMA_TRANSLATION() method on the parent of @p dev. * * If no parent device is available, this implementation will panic. */ int bhnd_bus_generic_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { if (device_get_parent(dev) != NULL) { return (BHND_BUS_GET_DMA_TRANSLATION(device_get_parent(dev), child, width, flags, dmat, translation)); } panic("missing BHND_BUS_GET_DMA_TRANSLATION()"); } /* nvram board_info population macros for bhnd_bus_generic_read_board_info() */ #define BHND_GV(_dest, _name) \ bhnd_nvram_getvar_uint(child, BHND_NVAR_ ## _name, &_dest, \ sizeof(_dest)) #define REQ_BHND_GV(_dest, _name) do { \ if ((error = BHND_GV(_dest, _name))) { \ device_printf(dev, \ "error reading " __STRING(_name) ": %d\n", error); \ return (error); \ } \ } while(0) #define OPT_BHND_GV(_dest, _name, _default) do { \ if ((error = BHND_GV(_dest, _name))) { \ if (error != ENOENT) { \ device_printf(dev, \ "error reading " \ __STRING(_name) ": %d\n", error); \ return (error); \ } \ _dest = _default; \ } \ } while(0) /** * Helper function for implementing BHND_BUS_READ_BOARDINFO(). * * This implementation populates @p info with information from NVRAM, * defaulting board_vendor and board_type fields to 0 if the * requested variables cannot be found. * * This behavior is correct for most SoCs, but must be overridden on * bridged (PCI, PCMCIA, etc) devices to produce a complete bhnd_board_info * result. */ int bhnd_bus_generic_read_board_info(device_t dev, device_t child, struct bhnd_board_info *info) { int error; OPT_BHND_GV(info->board_vendor, BOARDVENDOR, 0); OPT_BHND_GV(info->board_type, BOARDTYPE, 0); /* srom >= 2 */ OPT_BHND_GV(info->board_devid, DEVID, 0); /* srom >= 8 */ REQ_BHND_GV(info->board_rev, BOARDREV); OPT_BHND_GV(info->board_srom_rev,SROMREV, 0); /* missing in some SoC NVRAM */ REQ_BHND_GV(info->board_flags, BOARDFLAGS); OPT_BHND_GV(info->board_flags2, BOARDFLAGS2, 0); /* srom >= 4 */ OPT_BHND_GV(info->board_flags3, BOARDFLAGS3, 0); /* srom >= 11 */ return (0); } #undef BHND_GV #undef BHND_GV_REQ #undef BHND_GV_OPT /** * Helper function for implementing BHND_BUS_GET_NVRAM_VAR(). * * This implementation searches @p dev for a usable NVRAM child device. * * If no usable child device is found on @p dev, the request is delegated to * the BHND_BUS_GET_NVRAM_VAR() method on the parent of @p dev. */ int bhnd_bus_generic_get_nvram_var(device_t dev, device_t child, const char *name, void *buf, size_t *size, bhnd_nvram_type type) { device_t nvram; device_t parent; bus_topo_assert(); /* Look for a directly-attached NVRAM child */ if ((nvram = device_find_child(dev, "bhnd_nvram", -1)) != NULL) return BHND_NVRAM_GETVAR(nvram, name, buf, size, type); /* Try to delegate to parent */ if ((parent = device_get_parent(dev)) == NULL) return (ENODEV); return (BHND_BUS_GET_NVRAM_VAR(device_get_parent(dev), child, name, buf, size, type)); } /** * Helper function for implementing BHND_BUS_ALLOC_RESOURCE(). * * This implementation of BHND_BUS_ALLOC_RESOURCE() delegates allocation * of the underlying resource to BUS_ALLOC_RESOURCE(), and activation * to @p dev's BHND_BUS_ACTIVATE_RESOURCE(). */ struct bhnd_resource * bhnd_bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhnd_resource *br; struct resource *res; int error; br = NULL; res = NULL; /* Allocate the real bus resource (without activating it) */ res = BUS_ALLOC_RESOURCE(dev, child, type, rid, start, end, count, (flags & ~RF_ACTIVE)); if (res == NULL) return (NULL); /* Allocate our bhnd resource wrapper. */ br = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT); if (br == NULL) goto failed; br->direct = false; br->res = res; /* Attempt activation */ if (flags & RF_ACTIVE) { error = BHND_BUS_ACTIVATE_RESOURCE(dev, child, type, *rid, br); if (error) goto failed; } return (br); failed: if (res != NULL) - BUS_RELEASE_RESOURCE(dev, child, type, *rid, res); + BUS_RELEASE_RESOURCE(dev, child, res); free(br, M_BHND); return (NULL); } /** * Helper function for implementing BHND_BUS_RELEASE_RESOURCE(). * * This implementation of BHND_BUS_RELEASE_RESOURCE() delegates release of * the backing resource to BUS_RELEASE_RESOURCE(). */ int bhnd_bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; - if ((error = BUS_RELEASE_RESOURCE(dev, child, type, rid, r->res))) + if ((error = BUS_RELEASE_RESOURCE(dev, child, r->res))) return (error); free(r, M_BHND); return (0); } /** * Helper function for implementing BHND_BUS_ACTIVATE_RESOURCE(). * * This implementation of BHND_BUS_ACTIVATE_RESOURCE() first calls the * BHND_BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. * * If this fails, and if @p dev is the direct parent of @p child, standard * resource activation is attempted via bus_activate_resource(). This enables * direct use of the bhnd(4) resource APIs on devices that may not be attached * to a parent bhnd bus or bridge. */ int bhnd_bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; bool passthrough; passthrough = (device_get_parent(child) != dev); /* Try to delegate to the parent */ if (device_get_parent(dev) != NULL) { error = BHND_BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); } else { error = ENODEV; } /* If bhnd(4) activation has failed and we're the child's direct * parent, try falling back on standard resource activation. */ if (error && !passthrough) { error = bus_activate_resource(child, type, rid, r->res); if (!error) r->direct = true; } return (error); } /** * Helper function for implementing BHND_BUS_DEACTIVATE_RESOURCE(). * * This implementation of BHND_BUS_ACTIVATE_RESOURCE() simply calls the * BHND_BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bhnd_bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { if (device_get_parent(dev) != NULL) return (BHND_BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r)); return (EINVAL); } /** * Helper function for implementing BHND_BUS_GET_INTR_DOMAIN(). * * This implementation simply returns the address of nearest bhnd(4) bus, * which may be @p dev; this behavior may be incompatible with FDT/OFW targets. */ uintptr_t bhnd_bus_generic_get_intr_domain(device_t dev, device_t child, bool self) { return ((uintptr_t)dev); } diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index 5148c1c8452b..af62057690ac 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2301 +1,2300 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); rman_set_type(rv, type); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int -bhndb_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bhndb_release_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ - if (bhndb_get_rman(sc, child, type) == NULL) { + if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), - child, type, rid, r)); + child, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), - type, rid); + rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, rman_get_type(r)); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error, type; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; type = rman_get_type(r); switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, rman_get_type(r)) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, r)); } return (bhndb_try_activate_resource(sc, child, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error, type; sc = device_get_softc(dev); type = rman_get_type(r); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { u_int ivec; int error; /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index bdba61a2b942..60cb04400cb0 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1392 +1,1391 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(device_t dev, int type, u_int flags); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) * @param flags Resource flags (e.g. RF_PREFETCHABLE) */ static struct rman * chipc_get_rman(device_t dev, int type, u_int flags) { struct chipc_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(dev, type, flags); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int -chipc_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +chipc_release_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ - rm = chipc_get_rman(dev, type, rman_get_flags(r)); + rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { - return (bus_generic_rl_release_resource(dev, child, type, rid, - r)); + return (bus_generic_rl_release_resource(dev, child, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ - error = bus_generic_rman_release_resource(dev, child, type, rid, r); + error = bus_generic_rman_release_resource(dev, child, r); if (error != 0) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ - rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); + rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), + rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(device_t dev, device_t child, struct resource *r, bool req_direct) { struct chipc_softc *sc = device_get_softc(dev); struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct rman *rm; int error; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, type, rman_get_flags(r->res)); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(dev, child, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, struct resource *r) { struct rman *rm; /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(dev, child, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); bus_topo_lock(); parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { bus_topo_unlock(); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); bus_topo_unlock(); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_get_rman, chipc_get_rman), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/bhnd/cores/usb/bhnd_usb.c b/sys/dev/bhnd/cores/usb/bhnd_usb.c index fa9e6d7ec31a..7a86db79731f 100644 --- a/sys/dev/bhnd/cores/usb/bhnd_usb.c +++ b/sys/dev/bhnd/cores/usb/bhnd_usb.c @@ -1,551 +1,549 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010, Aleksandr Rybalko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Ported version of BroadCom USB core driver from ZRouter project */ #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_usbvar.h" /****************************** Variables ************************************/ static const struct bhnd_device bhnd_usb_devs[] = { BHND_DEVICE(BCM, USB20H, "USB2.0 Host core", NULL), BHND_DEVICE_END }; /****************************** Prototypes ***********************************/ static int bhnd_usb_attach(device_t); static int bhnd_usb_probe(device_t); static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit); static int bhnd_usb_print_all_resources(device_t dev); static int bhnd_usb_print_child(device_t bus, device_t child); static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int bhnd_usb_release_resource(device_t dev, - device_t child, int type, int rid, - struct resource *r); + device_t child, struct resource *r); static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child); static int bhnd_usb_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, bhnd_usb_devs, sizeof(bhnd_usb_devs[0])); if (id == NULL) return (ENXIO); device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } static int bhnd_usb_attach(device_t dev) { struct bhnd_usb_softc *sc; int rid; uint32_t tmp; int tries, err; sc = device_get_softc(dev); bhnd_reset_hw(dev, 0, 0); /* * Allocate the resources which the parent bus has already * determined for us. * XXX: There are few windows (usually 2), RID should be chip-specific */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { BHND_ERROR_DEV(dev, "unable to allocate memory"); return (ENXIO); } sc->sc_bt = rman_get_bustag(sc->sc_mem); sc->sc_bh = rman_get_bushandle(sc->sc_mem); sc->sc_maddr = rman_get_start(sc->sc_mem); sc->sc_msize = rman_get_size(sc->sc_mem); rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { BHND_ERROR_DEV(dev, "unable to allocate IRQ"); return (ENXIO); } sc->sc_irqn = rman_get_start(sc->sc_irq); sc->mem_rman.rm_start = sc->sc_maddr; sc->mem_rman.rm_end = sc->sc_maddr + sc->sc_msize - 1; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "BHND USB core I/O memory addresses"; if (rman_init(&sc->mem_rman) != 0 || rman_manage_region(&sc->mem_rman, sc->mem_rman.rm_start, sc->mem_rman.rm_end) != 0) { panic("%s: sc->mem_rman", __func__); } /* TODO: macros for registers */ bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(100); #define OHCI_CONTROL 0x04 bus_write_4(sc->sc_mem, OHCI_CONTROL, 0); if ( bhnd_get_device(dev) == BHND_COREID_USB20H) { uint32_t rev = bhnd_get_hwrev(dev); BHND_INFO_DEV(dev, "USB HOST 2.0 setup for rev %d", rev); if (rev == 1/* ? == 2 */) { /* SiBa code */ /* Change Flush control reg */ tmp = bus_read_4(sc->sc_mem, 0x400) & ~0x8; bus_write_4(sc->sc_mem, 0x400, tmp); tmp = bus_read_4(sc->sc_mem, 0x400); BHND_DEBUG_DEV(dev, "USB20H fcr: 0x%x", tmp); /* Change Shim control reg */ tmp = bus_read_4(sc->sc_mem, 0x304) & ~0x100; bus_write_4(sc->sc_mem, 0x304, tmp); tmp = bus_read_4(sc->sc_mem, 0x304); BHND_DEBUG_DEV(dev, "USB20H shim: 0x%x", tmp); } else if (rev >= 5) { /* BCMA code */ err = bhnd_alloc_pmu(dev); if(err) { BHND_ERROR_DEV(dev, "can't alloc pmu: %d", err); return (err); } err = bhnd_request_ext_rsrc(dev, 1); if(err) { BHND_ERROR_DEV(dev, "can't req ext: %d", err); return (err); } /* Take out of resets */ bus_write_4(sc->sc_mem, 0x200, 0x4ff); DELAY(25); bus_write_4(sc->sc_mem, 0x200, 0x6ff); DELAY(25); /* Make sure digital and AFE are locked in USB PHY */ bus_write_4(sc->sc_mem, 0x524, 0x6b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0xab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x2b); DELAY(50); bus_read_4(sc->sc_mem, 0x524); DELAY(50); bus_write_4(sc->sc_mem, 0x524, 0x10ab); DELAY(50); bus_read_4(sc->sc_mem, 0x524); tries = 10000; for (;;) { DELAY(10); tmp = bus_read_4(sc->sc_mem, 0x528); if (tmp & 0xc000) break; if (--tries != 0) continue; tmp = bus_read_4(sc->sc_mem, 0x528); BHND_ERROR_DEV(dev, "USB20H mdio_rddata 0x%08x", tmp); } /* XXX: Puzzle code */ bus_write_4(sc->sc_mem, 0x528, 0x80000000); bus_read_4(sc->sc_mem, 0x314); DELAY(265); bus_write_4(sc->sc_mem, 0x200, 0x7ff); DELAY(10); /* Take USB and HSIC out of non-driving modes */ bus_write_4(sc->sc_mem, 0x510, 0); } } bus_generic_probe(dev); if (bhnd_get_device(dev) == BHND_COREID_USB20H && ( bhnd_get_hwrev(dev) > 0)) bhnd_usb_add_child(dev, 0, "ehci", -1); bhnd_usb_add_child(dev, 1, "ohci", -1); bus_generic_attach(dev); return (0); } static struct rman * bhnd_usb_get_rman(device_t bus, int type, u_int flags) { struct bhnd_usb_softc *sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int passthrough, isdefault; isdefault = RMAN_IS_DEFAULT_RANGE(start,end); passthrough = (device_get_parent(child) != bus); rle = NULL; if (!passthrough && isdefault) { BHND_INFO_DEV(bus, "trying allocate def %d - %d for %s", type, *rid, device_get_nameunit(child) ); rl = BUS_GET_RESOURCE_LIST(bus, child); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; end = rle->end; count = rle->count; } else { BHND_INFO_DEV(bus, "trying allocate %d - %d (%jx-%jx) for %s", type, *rid, start, end, device_get_nameunit(child) ); } /* * If the request is for a resource which we manage, * attempt to satisfy the allocation ourselves. */ if (type == SYS_RES_MEMORY) { rv = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); if (rv == NULL) { BHND_ERROR_DEV(bus, "could not allocate resource"); return (NULL); } return (rv); } /* * Pass the request to the parent. */ return (bus_generic_rl_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static struct resource_list * bhnd_usb_get_reslist(device_t dev, device_t child) { struct bhnd_usb_devinfo *sdi; sdi = device_get_ivars(child); return (&sdi->sdi_rl); } static int -bhnd_usb_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bhnd_usb_release_resource(device_t dev, device_t child, + struct resource *r) { struct bhnd_usb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (type != SYS_RES_MEMORY) { - return (bus_generic_rl_release_resource(dev, child, type, rid, - r)); + return (bus_generic_rl_release_resource(dev, child, r)); } - error = bus_generic_rman_release_resource(dev, child, type, rid, r); + error = bus_generic_rman_release_resource(dev, child, r); if (error != 0) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), - type, rid); + rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; } return (0); } static int bhnd_usb_activate_resource(device_t dev, device_t child, struct resource *r) { if (type != SYS_RES_MEMORY) return (bus_generic_activate_resource(dev, child, r)); return (bus_generic_rman_activate_resource(dev, child, r)); } static int bhnd_usb_deactivate_resource(device_t dev, device_t child, struct resource *r) { if (type != SYS_RES_MEMORY) return (bus_generic_deactivate_resource(dev, child, r)); return (bus_generic_rman_deactivate_resource(dev, child, r)); } static int bhnd_usb_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct bhnd_usb_softc *sc = device_get_softc(dev); struct resource_map_request args; rman_res_t length, start; int error; if (type != SYS_RES_MEMORY) return (bus_generic_map_resource(dev, child, type, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); args.offset = start - rman_get_start(sc->sc_mem); args.length = length; return (bus_generic_map_resource(dev, child, type, sc->sc_mem, &args, map)); } static int bhnd_usb_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { struct bhnd_usb_softc *sc = device_get_softc(dev); if (type == SYS_RES_MEMORY) r = sc->sc_mem; return (bus_generic_unmap_resource(dev, child, type, r, map)); } static int bhnd_usb_print_all_resources(device_t dev) { struct bhnd_usb_devinfo *sdi; struct resource_list *rl; int retval; retval = 0; sdi = device_get_ivars(dev); rl = &sdi->sdi_rl; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return (retval); } static int bhnd_usb_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += bhnd_usb_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhnd_usb_softc *sc; struct bhnd_usb_devinfo *sdi; device_t child; int error; sc = device_get_softc(dev); sdi = malloc(sizeof(struct bhnd_usb_devinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (sdi == NULL) return (NULL); resource_list_init(&sdi->sdi_rl); sdi->sdi_irq_mapped = false; if (strncmp(name, "ohci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x200; } else if (strncmp(name, "ehci", 4) == 0) { sdi->sdi_maddr = sc->sc_maddr + 0x000; sdi->sdi_msize = 0x1000; } else { panic("Unknown subdevice"); } /* Map the child's IRQ */ if ((error = bhnd_map_intr(dev, 0, &sdi->sdi_irq))) { BHND_ERROR_DEV(dev, "could not map %s interrupt: %d", name, error); goto failed; } sdi->sdi_irq_mapped = true; BHND_INFO_DEV(dev, "%s: irq=%ju maddr=0x%jx", name, sdi->sdi_irq, sdi->sdi_maddr); /* * Add memory window and irq to child's resource list. */ resource_list_add(&sdi->sdi_rl, SYS_RES_MEMORY, 0, sdi->sdi_maddr, sdi->sdi_maddr + sdi->sdi_msize - 1, sdi->sdi_msize); resource_list_add(&sdi->sdi_rl, SYS_RES_IRQ, 0, sdi->sdi_irq, sdi->sdi_irq, 1); child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) { BHND_ERROR_DEV(dev, "could not add %s", name); goto failed; } device_set_ivars(child, sdi); return (child); failed: if (sdi->sdi_irq_mapped) bhnd_unmap_intr(dev, sdi->sdi_irq); resource_list_free(&sdi->sdi_rl); free(sdi, M_DEVBUF); return (NULL); } static void bhnd_usb_child_deleted(device_t dev, device_t child) { struct bhnd_usb_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == NULL) return; if (dinfo->sdi_irq_mapped) bhnd_unmap_intr(dev, dinfo->sdi_irq); resource_list_free(&dinfo->sdi_rl); free(dinfo, M_DEVBUF); } static device_method_t bhnd_usb_methods[] = { /* Device interface */ DEVMETHOD(device_attach, bhnd_usb_attach), DEVMETHOD(device_probe, bhnd_usb_probe), /* Bus interface */ DEVMETHOD(bus_add_child, bhnd_usb_add_child), DEVMETHOD(bus_child_deleted, bhnd_usb_child_deleted), DEVMETHOD(bus_alloc_resource, bhnd_usb_alloc_resource), DEVMETHOD(bus_get_resource_list, bhnd_usb_get_reslist), DEVMETHOD(bus_get_rman, bhnd_usb_get_rman), DEVMETHOD(bus_print_child, bhnd_usb_print_child), DEVMETHOD(bus_release_resource, bhnd_usb_release_resource), DEVMETHOD(bus_activate_resource, bhnd_usb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhnd_usb_deactivate_resource), DEVMETHOD(bus_map_resource, bhnd_usb_map_resource), DEVMETHOD(bus_unmap_resource, bhnd_usb_unmap_resource), /* Bus interface: generic part */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_usb, bhnd_usb_driver, bhnd_usb_methods, sizeof(struct bhnd_usb_softc)); DRIVER_MODULE(bhnd_usb, bhnd, bhnd_usb_driver, 0, 0); MODULE_VERSION(bhnd_usb, 1); diff --git a/sys/dev/dpaa/fman.c b/sys/dev/dpaa/fman.c index 2364df0be801..9dc4ac151789 100644 --- a/sys/dev/dpaa/fman.c +++ b/sys/dev/dpaa/fman.c @@ -1,554 +1,554 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #include #include #include #include #include "fman.h" static MALLOC_DEFINE(M_FMAN, "fman", "fman devices information"); /** * @group FMan private defines. * @{ */ enum fman_irq_enum { FMAN_IRQ_NUM = 0, FMAN_ERR_IRQ_NUM = 1 }; enum fman_mu_ram_map { FMAN_MURAM_OFF = 0x0, FMAN_MURAM_SIZE = 0x28000 }; struct fman_config { device_t fman_device; uintptr_t mem_base_addr; uintptr_t irq_num; uintptr_t err_irq_num; uint8_t fm_id; t_FmExceptionsCallback *exception_callback; t_FmBusErrorCallback *bus_error_callback; }; /** * @group FMan private methods/members. * @{ */ /** * Frame Manager firmware. * We use the same firmware for both P3041 and P2041 devices. */ const uint32_t fman_firmware[] = FMAN_UC_IMG; const uint32_t fman_firmware_size = sizeof(fman_firmware); int fman_activate_resource(device_t bus, device_t child, struct resource *res) { struct fman_softc *sc; bus_space_tag_t bt; bus_space_handle_t bh; int i, rv; sc = device_get_softc(bus); if (rman_get_type(res) != SYS_RES_IRQ) { for (i = 0; i < sc->sc_base.nranges; i++) { if (rman_is_region_manager(res, &sc->rman) != 0) { bt = rman_get_bustag(sc->mem_res); rv = bus_space_subregion(bt, rman_get_bushandle(sc->mem_res), rman_get_start(res) - rman_get_start(sc->mem_res), rman_get_size(res), &bh); if (rv != 0) return (rv); rman_set_bustag(res, bt); rman_set_bushandle(res, bh); return (rman_activate_resource(res)); } } return (EINVAL); } return (bus_generic_activate_resource(bus, child, res)); } int -fman_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +fman_release_resource(device_t bus, device_t child, struct resource *res) { struct resource_list *rl; struct resource_list_entry *rle; int passthrough, rv; passthrough = (device_get_parent(child) != bus); rl = BUS_GET_RESOURCE_LIST(bus, child); - if (type != SYS_RES_IRQ) { + if (rman_get_type(res) != SYS_RES_IRQ) { if ((rman_get_flags(res) & RF_ACTIVE) != 0 ){ - rv = bus_deactivate_resource(child, type, rid, res); + rv = bus_deactivate_resource(child, res); if (rv != 0) return (rv); } rv = rman_release_resource(res); if (rv != 0) return (rv); if (!passthrough) { - rle = resource_list_find(rl, type, rid); + rle = resource_list_find(rl, rman_get_type(res), + rman_get_rid(res)); KASSERT(rle != NULL, ("%s: resource entry not found!", __func__)); KASSERT(rle->res != NULL, ("%s: resource entry is not busy", __func__)); rle->res = NULL; } return (0); } - return (resource_list_release(rl, bus, child, type, rid, res)); + return (resource_list_release(rl, bus, child, res)); } struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fman_softc *sc; struct resource_list *rl; struct resource_list_entry *rle = NULL; struct resource *res; int i, isdefault, passthrough; isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); sc = device_get_softc(bus); rl = BUS_GET_RESOURCE_LIST(bus, child); switch (type) { case SYS_RES_MEMORY: KASSERT(!(isdefault && passthrough), ("%s: passthrough of default allocation", __func__)); if (!passthrough) { rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); KASSERT(rle->res == NULL, ("%s: resource entry is busy", __func__)); if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } } res = NULL; /* Map fman ranges to nexus ranges. */ for (i = 0; i < sc->sc_base.nranges; i++) { if (start >= sc->sc_base.ranges[i].bus && end < sc->sc_base.ranges[i].bus + sc->sc_base.ranges[i].size) { start += rman_get_start(sc->mem_res); end += rman_get_start(sc->mem_res); res = rman_reserve_resource(&sc->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_type(res, type); if ((flags & RF_ACTIVE) != 0 && bus_activate_resource( child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } break; } } if (!passthrough) rle->res = res; return (res); case SYS_RES_IRQ: return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } return (NULL); } static int fman_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } static t_Handle fman_init(struct fman_softc *sc, struct fman_config *cfg) { phandle_t node; t_FmParams fm_params; t_Handle muram_handle, fm_handle; t_Error error; t_FmRevisionInfo revision_info; uint16_t clock; uint32_t tmp, mod; /* MURAM configuration */ muram_handle = FM_MURAM_ConfigAndInit(cfg->mem_base_addr + FMAN_MURAM_OFF, FMAN_MURAM_SIZE); if (muram_handle == NULL) { device_printf(cfg->fman_device, "couldn't init FM MURAM module" "\n"); return (NULL); } sc->muram_handle = muram_handle; /* Fill in FM configuration */ fm_params.fmId = cfg->fm_id; /* XXX we support only one partition thus each fman has master id */ fm_params.guestId = NCSW_MASTER_ID; fm_params.baseAddr = cfg->mem_base_addr; fm_params.h_FmMuram = muram_handle; /* Get FMan clock in Hz */ if ((tmp = fman_get_clock(sc)) == 0) return (NULL); /* Convert FMan clock to MHz */ clock = (uint16_t)(tmp / 1000000); mod = tmp % 1000000; if (mod >= 500000) ++clock; fm_params.fmClkFreq = clock; fm_params.f_Exception = cfg->exception_callback; fm_params.f_BusError = cfg->bus_error_callback; fm_params.h_App = cfg->fman_device; fm_params.irq = cfg->irq_num; fm_params.errIrq = cfg->err_irq_num; fm_params.firmware.size = fman_firmware_size; fm_params.firmware.p_Code = (uint32_t*)fman_firmware; fm_handle = FM_Config(&fm_params); if (fm_handle == NULL) { device_printf(cfg->fman_device, "couldn't configure FM " "module\n"); goto err; } FM_ConfigResetOnInit(fm_handle, TRUE); error = FM_Init(fm_handle); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't init FM module\n"); goto err2; } error = FM_GetRevision(fm_handle, &revision_info); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't get FM revision\n"); goto err2; } device_printf(cfg->fman_device, "Hardware version: %d.%d.\n", revision_info.majorRev, revision_info.minorRev); /* Initialize the simplebus part of things */ simplebus_init(sc->sc_base.dev, 0); node = ofw_bus_get_node(sc->sc_base.dev); fman_fill_ranges(node, &sc->sc_base); sc->rman.rm_type = RMAN_ARRAY; sc->rman.rm_descr = "FMan range"; rman_init_from_resource(&sc->rman, sc->mem_res); for (node = OF_child(node); node > 0; node = OF_peer(node)) { simplebus_add_device(sc->sc_base.dev, node, 0, NULL, -1, NULL); } return (fm_handle); err2: FM_Free(fm_handle); err: FM_MURAM_Free(muram_handle); return (NULL); } static void fman_exception_callback(t_Handle app_handle, e_FmExceptions exception) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan exception occurred.\n"); } static void fman_error_callback(t_Handle app_handle, e_FmPortType port_type, uint8_t port_id, uint64_t addr, uint8_t tnum, uint16_t liodn) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan error occurred.\n"); } /** @} */ /** * @group FMan driver interface. * @{ */ int fman_get_handle(device_t dev, t_Handle *fmh) { struct fman_softc *sc = device_get_softc(dev); *fmh = sc->fm_handle; return (0); } int fman_get_muram_handle(device_t dev, t_Handle *muramh) { struct fman_softc *sc = device_get_softc(dev); *muramh = sc->muram_handle; return (0); } int fman_get_bushandle(device_t dev, vm_offset_t *fm_base) { struct fman_softc *sc = device_get_softc(dev); *fm_base = rman_get_bushandle(sc->mem_res); return (0); } int fman_attach(device_t dev) { struct fman_softc *sc; struct fman_config cfg; pcell_t qchan_range[2]; phandle_t node; sc = device_get_softc(dev); sc->sc_base.dev = dev; /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) { device_printf(dev, "could not initialize smart allocator.\n"); return (ENXIO); } node = ofw_bus_get_node(dev); if (OF_getencprop(node, "fsl,qman-channel-range", qchan_range, sizeof(qchan_range)) <= 0) { device_printf(dev, "Missing QMan channel range property!\n"); return (ENXIO); } sc->qman_chan_base = qchan_range[0]; sc->qman_chan_count = qchan_range[1]; sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->mem_res) { device_printf(dev, "could not allocate memory.\n"); return (ENXIO); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "could not allocate interrupt.\n"); goto err; } sc->err_irq_rid = 1; sc->err_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->err_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->err_irq_res) { device_printf(dev, "could not allocate error interrupt.\n"); goto err; } /* Set FMan configuration */ cfg.fman_device = dev; cfg.fm_id = device_get_unit(dev); cfg.mem_base_addr = rman_get_bushandle(sc->mem_res); cfg.irq_num = (uintptr_t)sc->irq_res; cfg.err_irq_num = (uintptr_t)sc->err_irq_res; cfg.exception_callback = fman_exception_callback; cfg.bus_error_callback = fman_error_callback; sc->fm_handle = fman_init(sc, &cfg); if (sc->fm_handle == NULL) { device_printf(dev, "could not be configured\n"); goto err; } return (bus_generic_attach(dev)); err: fman_detach(dev); return (ENXIO); } int fman_detach(device_t dev) { struct fman_softc *sc; sc = device_get_softc(dev); if (sc->muram_handle) { FM_MURAM_Free(sc->muram_handle); } if (sc->fm_handle) { FM_Free(sc->fm_handle); } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->err_irq_rid, sc->err_irq_res); } return (0); } int fman_suspend(device_t dev) { return (0); } int fman_resume_dev(device_t dev) { return (0); } int fman_shutdown(device_t dev) { return (0); } int fman_qman_channel_id(device_t dev, int port) { struct fman_softc *sc; int qman_port_id[] = {0x31, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; int i; sc = device_get_softc(dev); for (i = 0; i < sc->qman_chan_count; i++) { if (qman_port_id[i] == port) return (sc->qman_chan_base + i); } return (0); } /** @} */ diff --git a/sys/dev/dpaa/fman.h b/sys/dev/dpaa/fman.h index b201b9fd9355..4c30a633ae3e 100644 --- a/sys/dev/dpaa/fman.h +++ b/sys/dev/dpaa/fman.h @@ -1,77 +1,76 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef FMAN_H_ #define FMAN_H_ #include /** * FMan driver instance data. */ struct fman_softc { struct simplebus_softc sc_base; struct resource *mem_res; struct resource *irq_res; struct resource *err_irq_res; struct rman rman; int mem_rid; int irq_rid; int err_irq_rid; int qman_chan_base; int qman_chan_count; t_Handle fm_handle; t_Handle muram_handle; }; /** * @group QMan bus interface. * @{ */ struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int fman_activate_resource(device_t bus, device_t child, struct resource *res); -int fman_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res); +int fman_release_resource(device_t bus, device_t child, struct resource *res); int fman_attach(device_t dev); int fman_detach(device_t dev); int fman_suspend(device_t dev); int fman_resume_dev(device_t dev); int fman_shutdown(device_t dev); int fman_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); int fman_qman_channel_id(device_t, int); /** @} */ uint32_t fman_get_clock(struct fman_softc *sc); int fman_get_handle(device_t dev, t_Handle *fmh); int fman_get_muram_handle(device_t dev, t_Handle *muramh); int fman_get_bushandle(device_t dev, vm_offset_t *fm_base); #endif /* FMAN_H_ */ diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index 3e0920cbf8a5..66867a18068c 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,888 +1,886 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; bus_generic_detach(dev); sc = device_get_softc(dev); if (sc->rcdev) device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type, flags); if (rm == NULL) return (bus_generic_alloc_resource(mcdev, child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, end, count, flags); if (res == NULL) goto fail; return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_adjust_resource(mcdev, child, r, start, end)); return (bus_generic_adjust_resource(mcdev, child, r, start, end)); } int -dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, - struct resource *r) +dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; - rm = dpaa2_mc_rman(mcdev, type, rman_get_flags(r)); + rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) - return (bus_generic_rman_release_resource(mcdev, child, type, - rid, r)); - return (bus_generic_release_resource(mcdev, child, type, rid, r)); + return (bus_generic_rman_release_resource(mcdev, child, r)); + return (bus_generic_release_resource(mcdev, child, r)); } int dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_activate_resource(mcdev, child, r)); return (bus_generic_activate_resource(mcdev, child, r)); } int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_deactivate_resource(mcdev, child, r)); return (bus_generic_deactivate_resource(mcdev, child, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); if (!di) return (ENOMEM); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype, 0); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ struct rman * dpaa2_mc_rman(device_t mcdev, int type, u_int flags) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/dpaa2/dpaa2_mc.h b/sys/dev/dpaa2/dpaa2_mc.h index 7af3b2a4eb24..5ddac7aa2720 100644 --- a/sys/dev/dpaa2/dpaa2_mc.h +++ b/sys/dev/dpaa2/dpaa2_mc.h @@ -1,219 +1,219 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MC_H #define _DPAA2_MC_H #include #include #include #include #include #include "pci_if.h" #include "dpaa2_types.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_ni.h" #include "dpaa2_io.h" #include "dpaa2_mac.h" #include "dpaa2_con.h" #include "dpaa2_bp.h" /* * Maximum number of MSIs supported by the MC for its children without IOMMU. * * TODO: Should be much more with IOMMU translation. */ #define DPAA2_MC_MSI_COUNT 32 /* Flags for DPAA2 devices as resources. */ #define DPAA2_MC_DEV_ALLOCATABLE 0x01u /* to be managed by DPAA2-specific rman */ #define DPAA2_MC_DEV_ASSOCIATED 0x02u /* to obtain info about DPAA2 device */ #define DPAA2_MC_DEV_SHAREABLE 0x04u /* to be shared among DPAA2 devices */ struct dpaa2_mc_devinfo; /* about managed DPAA2 devices */ /** * @brief Software context for the DPAA2 Management Complex (MC) driver. * * dev: Device associated with this software context. * rcdev: Child device associated with the root resource container. * acpi_based: Attached using ACPI (true) or FDT (false). * ofw_node: FDT node of the Management Complex (acpi_based == false). * * res: Unmapped MC command portal and control registers resources. * map: Mapped MC command portal and control registers resources. * * dpio_rman: I/O objects resource manager. * dpbp_rman: Buffer Pools resource manager. * dpcon_rman: Concentrators resource manager. * dpmcp_rman: MC portals resource manager. */ struct dpaa2_mc_softc { device_t dev; device_t rcdev; bool acpi_based; phandle_t ofw_node; struct resource *res[2]; struct resource_map map[2]; /* For allocatable managed DPAA2 objects. */ struct rman dpio_rman; struct rman dpbp_rman; struct rman dpcon_rman; struct rman dpmcp_rman; /* For managed DPAA2 objects. */ struct mtx mdev_lock; STAILQ_HEAD(, dpaa2_mc_devinfo) mdev_list; /* NOTE: Workaround in case of no IOMMU available. */ #ifndef IOMMU device_t msi_owner; bool msi_allocated; struct mtx msi_lock; struct { device_t child; int irq; } msi[DPAA2_MC_MSI_COUNT]; #endif }; /** * @brief Software context for the DPAA2 Resource Container (RC) driver. * * dev: Device associated with this software context. * portal: Helper object to send commands to the MC portal. * unit: Helps to distinguish between root (0) and child DRPCs. * cont_id: Container ID. */ struct dpaa2_rc_softc { device_t dev; int unit; uint32_t cont_id; }; /** * @brief Information about MSI messages supported by the DPAA2 object. * * msi_msgnum: Number of MSI messages supported by the DPAA2 object. * msi_alloc: Number of MSI messages allocated for the DPAA2 object. * msi_handlers: Number of MSI message handlers configured. */ struct dpaa2_msinfo { uint8_t msi_msgnum; uint8_t msi_alloc; uint32_t msi_handlers; }; /** * @brief Information about DPAA2 device. * * pdev: Parent device. * dev: Device this devinfo is associated with. * * id: ID of a logical DPAA2 object resource. * portal_id: ID of the MC portal which belongs to the object's container. * icid: Isolation context ID of the DPAA2 object. It is shared * between a resource container and all of its children. * * dtype: Type of the DPAA2 object. * resources: Resources available for this DPAA2 device. * msi: Information about MSI messages supported by the DPAA2 object. */ struct dpaa2_devinfo { device_t pdev; device_t dev; uint32_t id; uint32_t portal_id; uint32_t icid; enum dpaa2_dev_type dtype; struct resource_list resources; struct dpaa2_msinfo msi; /* * DPAA2 object might or might not have its own portal allocated to * execute MC commands. If the portal has been allocated, it takes * precedence over the portal owned by the resource container. */ struct dpaa2_mcp *portal; }; DECLARE_CLASS(dpaa2_mc_driver); /* For device interface. */ int dpaa2_mc_attach(device_t dev); int dpaa2_mc_detach(device_t dev); /* For bus interface. */ struct rman *dpaa2_mc_rman(device_t mcdev, int type, u_int flags); struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); -int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, - int rid, struct resource *r); +int dpaa2_mc_release_resource(device_t mcdev, device_t child, + struct resource *r); int dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r); int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r); /* For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs); int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs); int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data); int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id); /* For DPAA2 MC bus interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags); int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id); int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype); #endif /* _DPAA2_MC_H */ diff --git a/sys/dev/dpaa2/dpaa2_rc.c b/sys/dev/dpaa2/dpaa2_rc.c index d14187e0c58f..49ed8944b64b 100644 --- a/sys/dev/dpaa2/dpaa2_rc.c +++ b/sys/dev/dpaa2/dpaa2_rc.c @@ -1,3560 +1,3559 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Resource Container (DPRC) bus driver. * * DPRC holds all the resources and object information that a software context * (kernel, virtual machine, etc.) can access or use. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_ni.h" #include "dpaa2_mc_if.h" #include "dpaa2_cmd_if.h" /* Timeouts to wait for a command response from MC. */ #define CMD_SPIN_TIMEOUT 100u /* us */ #define CMD_SPIN_ATTEMPTS 2000u /* max. 200 ms */ #define TYPE_LEN_MAX 16u #define LABEL_LEN_MAX 16u MALLOC_DEFINE(M_DPAA2_RC, "dpaa2_rc", "DPAA2 Resource Container"); /* Discover and add devices to the resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *); static int dpaa2_rc_add_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); /* Helper routines. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *, struct dpaa2_cmd *, uint8_t, bool, uint16_t); static int dpaa2_rc_configure_irq(device_t, device_t, int, uint64_t, uint32_t); static int dpaa2_rc_add_res(device_t, device_t, enum dpaa2_dev_type, int *, int); static int dpaa2_rc_print_type(struct resource_list *, enum dpaa2_dev_type); static struct dpaa2_mcp *dpaa2_rc_select_portal(device_t, device_t); /* Routines to send commands to MC. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *, uint16_t); static int dpaa2_rc_send_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *); static int dpaa2_rc_probe(device_t dev) { /* DPRC device will be added by the parent DPRC or MC bus itself. */ device_set_desc(dev, "DPAA2 Resource Container"); return (BUS_PROBE_DEFAULT); } static int dpaa2_rc_detach(device_t dev) { struct dpaa2_devinfo *dinfo; int error; error = bus_generic_detach(dev); if (error) return (error); dinfo = device_get_ivars(dev); if (dinfo->portal) dpaa2_mcp_free_portal(dinfo->portal); if (dinfo) free(dinfo, M_DPAA2_RC); return (device_delete_children(dev)); } static int dpaa2_rc_attach(device_t dev) { device_t pdev; struct dpaa2_mc_softc *mcsc; struct dpaa2_rc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; sc = device_get_softc(dev); sc->dev = dev; sc->unit = device_get_unit(dev); if (sc->unit == 0) { /* Root DPRC should be attached directly to the MC bus. */ pdev = device_get_parent(dev); mcsc = device_get_softc(pdev); KASSERT(strcmp(device_get_name(pdev), "dpaa2_mc") == 0, ("root DPRC should be attached to the MC bus")); /* * Allocate devinfo to let the parent MC bus access ICID of the * DPRC object. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(dev, "%s: failed to allocate " "dpaa2_devinfo\n", __func__); dpaa2_rc_detach(dev); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = pdev; dinfo->dev = dev; dinfo->dtype = DPAA2_DEV_RC; dinfo->portal = NULL; /* Prepare helper portal object to send commands to MC. */ error = dpaa2_mcp_init_portal(&dinfo->portal, mcsc->res[0], &mcsc->map[0], DPAA2_PORTAL_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_mcp: " "error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (ENXIO); } } else { /* TODO: Child DPRCs aren't supported yet. */ return (ENXIO); } /* Create DPAA2 devices for objects in this container. */ error = dpaa2_rc_discover(sc); if (error) { device_printf(dev, "%s: failed to discover objects in " "container: error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (error); } return (0); } /* * Bus interface. */ static struct resource_list * dpaa2_rc_get_resource_list(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static void dpaa2_rc_delete_resource(device_t rcdev, device_t child, int type, int rid) { struct resource_list *rl; struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(rcdev, "%s: resource still owned by " "child: type=%d, rid=%d, start=%jx\n", __func__, type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, rcdev, child, type, rid); } resource_list_delete(rl, type, rid); } static struct resource * dpaa2_rc_alloc_multi_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * By default, software portal interrupts are message-based, that is, * they are issued from QMan using a 4 byte write. * * TODO: However this default behavior can be changed by programming one * or more software portals to issue their interrupts via a * dedicated software portal interrupt wire. * See registers SWP_INTW0_CFG to SWP_INTW3_CFG for details. */ if (type == SYS_RES_IRQ && *rid == 0) return (NULL); return (resource_list_alloc(rl, rcdev, child, type, rid, start, end, count, flags)); } static struct resource * dpaa2_rc_alloc_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { if (device_get_parent(child) != rcdev) return (BUS_ALLOC_RESOURCE(device_get_parent(rcdev), child, type, rid, start, end, count, flags)); return (dpaa2_rc_alloc_multi_resource(rcdev, child, type, rid, start, end, count, flags)); } static int -dpaa2_rc_release_resource(device_t rcdev, device_t child, int type, int rid, - struct resource *r) +dpaa2_rc_release_resource(device_t rcdev, device_t child, struct resource *r) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return (BUS_RELEASE_RESOURCE(device_get_parent(rcdev), child, - type, rid, r)); + r)); dinfo = device_get_ivars(child); rl = &dinfo->resources; - return (resource_list_release(rl, rcdev, child, type, rid, r)); + return (resource_list_release(rl, rcdev, child, r)); } static void dpaa2_rc_child_deleted(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { device_printf(child, "%s: resource still owned: " "type=%d, rid=%d, addr=%lx\n", __func__, rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, rcdev, child, rle->type, rle->rid); } } resource_list_free(rl); if (dinfo) free(dinfo, M_DPAA2_RC); } static void dpaa2_rc_child_detached(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, rcdev, child, SYS_RES_IRQ) != 0) device_printf(child, "%s: leaked IRQ resources!\n", __func__); if (dinfo->msi.msi_alloc != 0) { device_printf(child, "%s: leaked %d MSI vectors!\n", __func__, dinfo->msi.msi_alloc); PCI_RELEASE_MSI(rcdev, child); } if (resource_list_release_active(rl, rcdev, child, SYS_RES_MEMORY) != 0) device_printf(child, "%s: leaked memory resources!\n", __func__); } static int dpaa2_rc_setup_intr(device_t rcdev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct dpaa2_devinfo *dinfo; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(rcdev, child, irq, flags, filter, intr, arg, &cookie); if (error) { device_printf(rcdev, "%s: bus_generic_setup_intr() failed: " "error=%d\n", __func__, error); return (error); } /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != rcdev) { *cookiep = cookie; return (0); } rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot setup interrupt with " "rid=0: INTx are not supported by DPAA2 objects " "yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); KASSERT(dinfo->msi.msi_alloc > 0, ("No MSI interrupts allocated")); /* * Ask our parent to map the MSI and give us the address and * data register values. If we fail for some reason, teardown * the interrupt handler. */ error = PCIB_MAP_MSI(device_get_parent(rcdev), child, rman_get_start(irq), &addr, &data); if (error) { device_printf(rcdev, "%s: PCIB_MAP_MSI failed: " "error=%d\n", __func__, error); (void)bus_generic_teardown_intr(rcdev, child, irq, cookie); return (error); } /* Configure MSI for this DPAA2 object. */ error = dpaa2_rc_configure_irq(rcdev, child, rid, addr, data); if (error) { device_printf(rcdev, "%s: failed to configure IRQ for " "DPAA2 object: rid=%d, type=%s, unit=%d\n", __func__, rid, dpaa2_ttos(dinfo->dtype), device_get_unit(child)); return (error); } dinfo->msi.msi_handlers++; } *cookiep = cookie; return (0); } static int dpaa2_rc_teardown_intr(device_t rcdev, device_t child, struct resource *irq, void *cookie) { struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != rcdev) return(bus_generic_teardown_intr(rcdev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot teardown interrupt " "with rid=0: INTx are not supported by DPAA2 " "objects yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); dinfo->msi.msi_handlers--; } error = bus_generic_teardown_intr(rcdev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI", __func__)); return (error); } static int dpaa2_rc_print_child(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; int retval = 0; retval += bus_print_child_header(rcdev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); /* Print DPAA2-specific resources. */ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_IO); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_BP); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_CON); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_MCP); retval += printf(" at %s (id=%u)", dpaa2_ttos(dinfo->dtype), dinfo->id); retval += bus_print_child_domain(rcdev, child); retval += bus_print_child_footer(rcdev, child); return (retval); } /* * Pseudo-PCI interface. */ /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_alloc_msi(device_t rcdev, device_t child, int *count) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); int error, actual, i, run, irqs[32]; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* MSI should be allocated by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Already have allocated messages? */ if (dinfo->msi.msi_alloc != 0) return (ENXIO); /* Don't ask for more than the device supports. */ actual = min(*count, dinfo->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(rcdev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ resources in * the irqs[] array, so add new resources starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) { device_printf(child, "using IRQ %d for MSI\n", irqs[0]); } else { /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update counts of alloc'd messages. */ dinfo->msi.msi_alloc = actual; dinfo->msi.msi_handlers = 0; *count = actual; return (0); } /* * Release the MSI messages associated with this DPAA2 device. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_release_msi(device_t rcdev, device_t child) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list_entry *rle; int i, irqs[32]; /* MSI should be released by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Do we have any messages to release? */ if (dinfo->msi.msi_alloc == 0) return (ENODEV); KASSERT(dinfo->msi.msi_alloc <= 32, ("more than 32 alloc'd MSI messages")); /* Make sure none of the resources are allocated. */ if (dinfo->msi.msi_handlers > 0) return (EBUSY); for (i = 0; i < dinfo->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(rcdev), child, dinfo->msi.msi_alloc, irqs); for (i = 0; i < dinfo->msi.msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ dinfo->msi.msi_alloc = 0; return (0); } /** * @brief Return the maximum number of the MSI supported by this DPAA2 device. */ static int dpaa2_rc_msi_count(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (dinfo->msi.msi_msgnum); } static int dpaa2_rc_get_id(device_t rcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); return (PCIB_GET_ID(device_get_parent(rcdev), child, type, id)); } /* * DPAA2 MC command interface. */ static int dpaa2_rc_mng_get_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *major, uint32_t *minor, uint32_t *rev) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL || rev == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_VER); if (!error) { *major = cmd->params[0] >> 32; *minor = cmd->params[1] & 0xFFFFFFFF; *rev = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_soc_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *pvr, uint32_t *svr) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || pvr == NULL || svr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_SOC_VER); if (!error) { *pvr = cmd->params[0] >> 32; *svr = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_container_id(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *cont_id) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cont_id == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_CONT_ID); if (!error) *cont_id = cmd->params[0] & 0xFFFFFFFF; return (error); } static int dpaa2_rc_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t cont_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = cont_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_CLOSE)); } static int dpaa2_rc_get_obj_count(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *obj_count) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj_count == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_COUNT); if (!error) *obj_count = (uint32_t)(cmd->params[0] >> 32); return (error); } static int dpaa2_rc_get_obj(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_idx, struct dpaa2_obj *obj) { struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = obj_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_obj_descriptor(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, enum dpaa2_dev_type dtype, struct dpaa2_obj *obj) { struct __packed get_obj_desc_args { uint32_t obj_id; uint32_t _reserved1; uint8_t type[16]; } *args; struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_obj_desc_args *) &cmd->params[0]; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_DESC); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_rc_attr *attr) { struct __packed dpaa2_rc_attr { uint32_t cont_id; uint32_t icid; uint32_t options; uint32_t portal_id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_ATTR); if (!error) { pattr = (struct dpaa2_rc_attr *) &cmd->params[0]; attr->cont_id = pattr->cont_id; attr->portal_id = pattr->portal_id; attr->options = pattr->options; attr->icid = pattr->icid; } return (error); } static int dpaa2_rc_get_obj_region(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, uint8_t reg_idx, enum dpaa2_dev_type dtype, struct dpaa2_rc_obj_region *reg) { struct __packed obj_region_args { uint32_t obj_id; uint16_t _reserved1; uint8_t reg_idx; uint8_t _reserved2; uint64_t _reserved3; uint64_t _reserved4; uint8_t type[16]; } *args; struct __packed obj_region { uint64_t _reserved1; uint64_t base_offset; uint32_t size; uint32_t type; uint32_t flags; uint32_t _reserved2; uint64_t base_paddr; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); uint16_t cmdid, api_major, api_minor; const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || reg == NULL) return (DPAA2_CMD_STAT_ERR); /* * If the DPRC object version was not yet cached, cache it now. * Otherwise use the already cached value. */ if (!portal->rc_api_major && !portal->rc_api_minor) { error = DPAA2_CMD_RC_GET_API_VERSION(dev, child, cmd, &api_major, &api_minor); if (error) return (error); portal->rc_api_major = api_major; portal->rc_api_minor = api_minor; } else { api_major = portal->rc_api_major; api_minor = portal->rc_api_minor; } /* TODO: Remove magic numbers. */ if (api_major > 6u || (api_major == 6u && api_minor >= 6u)) /* * MC API version 6.6 changed the size of the MC portals and * software portals to 64K (as implemented by hardware). */ cmdid = CMDID_RC_GET_OBJ_REG_V3; else if (api_major == 6u && api_minor >= 3u) /* * MC API version 6.3 introduced a new field to the region * descriptor: base_address. */ cmdid = CMDID_RC_GET_OBJ_REG_V2; else cmdid = CMDID_RC_GET_OBJ_REG; args = (struct obj_region_args *) &cmd->params[0]; args->obj_id = obj_id; args->reg_idx = reg_idx; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, cmdid); if (!error) { resp = (struct obj_region *) &cmd->params[0]; reg->base_paddr = resp->base_paddr; reg->base_offset = resp->base_offset; reg->size = resp->size; reg->flags = resp->flags; reg->type = resp->type & 0xFu; } return (error); } static int dpaa2_rc_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct __packed rc_api_version { uint16_t major; uint16_t minor; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_API_VERSION); if (!error) { resp = (struct rc_api_version *) &cmd->params[0]; *major = resp->major; *minor = resp->minor; } return (error); } static int dpaa2_rc_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint8_t enable) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_enable_irq(portal, cmd, irq_idx, enable, CMDID_RC_SET_IRQ_ENABLE)); } static int dpaa2_rc_set_obj_irq(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint64_t addr, uint32_t data, uint32_t irq_usr, uint32_t obj_id, enum dpaa2_dev_type dtype) { struct __packed set_obj_irq_args { uint32_t data; uint8_t irq_idx; uint8_t _reserved1[3]; uint64_t addr; uint32_t irq_usr; uint32_t obj_id; uint8_t type[16]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_obj_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->addr = addr; args->data = data; args->irq_usr = irq_usr; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_SET_OBJ_IRQ)); } static int dpaa2_rc_get_conn(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ep_desc *ep1_desc, struct dpaa2_ep_desc *ep2_desc, uint32_t *link_stat) { struct __packed get_conn_args { uint32_t ep1_id; uint32_t ep1_ifid; uint8_t ep1_type[16]; uint64_t _reserved[4]; } *args; struct __packed get_conn_resp { uint64_t _reserved1[3]; uint32_t ep2_id; uint32_t ep2_ifid; uint8_t ep2_type[16]; uint32_t link_stat; uint32_t _reserved2; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || ep1_desc == NULL || ep2_desc == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_conn_args *) &cmd->params[0]; args->ep1_id = ep1_desc->obj_id; args->ep1_ifid = ep1_desc->if_id; /* TODO: Remove magic number. */ strncpy(args->ep1_type, dpaa2_ttos(ep1_desc->type), 16); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_CONN); if (!error) { resp = (struct get_conn_resp *) &cmd->params[0]; ep2_desc->obj_id = resp->ep2_id; ep2_desc->if_id = resp->ep2_ifid; ep2_desc->type = dpaa2_stot((const char *) resp->ep2_type); if (link_stat != NULL) *link_stat = resp->link_stat; } return (error); } static int dpaa2_rc_ni_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpni_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpni_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_ni_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLOSE)); } static int dpaa2_rc_ni_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ENABLE)); } static int dpaa2_rc_ni_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_DISABLE)); } static int dpaa2_rc_ni_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_API_VER); if (!error) { *major = cmd->params[0] & 0xFFFFU; *minor = (cmd->params[0] >> 16) & 0xFFFFU; } return (error); } static int dpaa2_rc_ni_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_RESET)); } static int dpaa2_rc_ni_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_attr *attr) { struct __packed ni_attr { uint32_t options; uint8_t num_queues; uint8_t num_rx_tcs; uint8_t mac_entries; uint8_t num_tx_tcs; uint8_t vlan_entries; uint8_t num_channels; uint8_t qos_entries; uint8_t _reserved1; uint16_t fs_entries; uint16_t _reserved2; uint8_t qos_key_size; uint8_t fs_key_size; uint16_t wriop_ver; uint8_t num_cgs; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_ATTR); if (!error) { resp = (struct ni_attr *) &cmd->params[0]; attr->options = resp->options; attr->wriop_ver = resp->wriop_ver; attr->entries.fs = resp->fs_entries; attr->entries.mac = resp->mac_entries; attr->entries.vlan = resp->vlan_entries; attr->entries.qos = resp->qos_entries; attr->num.queues = resp->num_queues; attr->num.rx_tcs = resp->num_rx_tcs; attr->num.tx_tcs = resp->num_tx_tcs; attr->num.channels = resp->num_channels; attr->num.cgs = resp->num_cgs; attr->key_size.fs = resp->fs_key_size; attr->key_size.qos = resp->qos_key_size; } return (error); } static int dpaa2_rc_ni_set_buf_layout(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_buf_layout *bl) { struct __packed set_buf_layout_args { uint8_t queue_type; uint8_t _reserved1; uint16_t _reserved2; uint16_t options; uint8_t params; uint8_t _reserved3; uint16_t priv_data_size; uint16_t data_align; uint16_t head_room; uint16_t tail_room; uint64_t _reserved4[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || bl == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_buf_layout_args *) &cmd->params[0]; args->queue_type = (uint8_t) bl->queue_type; args->options = bl->options; args->params = 0; args->priv_data_size = bl->pd_size; args->data_align = bl->fd_align; args->head_room = bl->head_size; args->tail_room = bl->tail_size; args->params |= bl->pass_timestamp ? 1U : 0U; args->params |= bl->pass_parser_result ? 2U : 0U; args->params |= bl->pass_frame_status ? 4U : 0U; args->params |= bl->pass_sw_opaque ? 8U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_BUF_LAYOUT)); } static int dpaa2_rc_ni_get_tx_data_offset(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *offset) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || offset == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_TX_DATA_OFF); if (!error) *offset = cmd->params[0] & 0xFFFFU; return (error); } static int dpaa2_rc_ni_get_port_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PORT_MAC_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_ni_set_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed set_prim_mac_args { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct set_prim_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_PRIM_MAC_ADDR)); } static int dpaa2_rc_ni_get_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed get_prim_mac_resp { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PRIM_MAC_ADDR); if (!error) { resp = (struct get_prim_mac_resp *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) mac[ETHER_ADDR_LEN - i] = resp->mac[i - 1]; } return (error); } static int dpaa2_rc_ni_set_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_args { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct link_cfg_args *) &cmd->params[0]; args->rate = cfg->rate; args->options = cfg->options; args->adv_speeds = cfg->adv_speeds; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_LINK_CFG)); } static int dpaa2_rc_ni_get_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_resp { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_CFG); if (!error) { resp = (struct link_cfg_resp *) &cmd->params[0]; cfg->rate = resp->rate; cfg->options = resp->options; cfg->adv_speeds = resp->adv_speeds; } return (error); } static int dpaa2_rc_ni_get_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_state *state) { struct __packed link_state_resp { uint32_t _reserved1; uint32_t flags; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t supported; uint64_t advert; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_STATE); if (!error) { resp = (struct link_state_resp *) &cmd->params[0]; state->options = resp->options; state->adv_speeds = resp->advert; state->sup_speeds = resp->supported; state->rate = resp->rate; state->link_up = resp->flags & 0x1u ? true : false; state->state_valid = resp->flags & 0x2u ? true : false; } return (error); } static int dpaa2_rc_ni_set_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_qos_table *tbl) { struct __packed qos_table_args { uint32_t _reserved1; uint8_t default_tc; uint8_t options; uint16_t _reserved2; uint64_t _reserved[5]; uint64_t kcfg_busaddr; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || tbl == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct qos_table_args *) &cmd->params[0]; args->default_tc = tbl->default_tc; args->kcfg_busaddr = tbl->kcfg_busaddr; args->options |= tbl->discard_on_miss ? 1U : 0U; args->options |= tbl->keep_entries ? 2U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QOS_TABLE)); } static int dpaa2_rc_ni_clear_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_QOS_TABLE)); } static int dpaa2_rc_ni_set_pools(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_pools_cfg *cfg) { struct __packed set_pools_args { uint8_t pools_num; uint8_t backup_pool_mask; uint8_t _reserved1; uint8_t pool_as; /* assigning: 0 - QPRI, 1 - QDBIN */ uint32_t bp_obj_id[DPAA2_NI_MAX_POOLS]; uint16_t buf_sz[DPAA2_NI_MAX_POOLS]; uint32_t _reserved2; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_pools_args *) &cmd->params[0]; args->pools_num = cfg->pools_num < DPAA2_NI_MAX_POOLS ? cfg->pools_num : DPAA2_NI_MAX_POOLS; for (uint32_t i = 0; i < args->pools_num; i++) { args->bp_obj_id[i] = cfg->pools[i].bp_obj_id; args->buf_sz[i] = cfg->pools[i].buf_sz; args->backup_pool_mask |= (cfg->pools[i].backup_flag & 1) << i; } return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_POOLS)); } static int dpaa2_rc_ni_set_err_behavior(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_err_cfg *cfg) { struct __packed err_behavior_args { uint32_t err_mask; uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct err_behavior_args *) &cmd->params[0]; args->err_mask = cfg->err_mask; args->flags |= cfg->set_err_fas ? 0x10u : 0u; args->flags |= ((uint8_t) cfg->action) & 0x0Fu; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_ERR_BEHAVIOR)); } static int dpaa2_rc_ni_get_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed get_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t chan_id; } *args; struct __packed get_queue_resp { uint64_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint32_t fqid; uint16_t qdbin; uint16_t _reserved3; uint8_t cgid; uint8_t _reserved[15]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->chan_id = cfg->chan_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QUEUE); if (!error) { resp = (struct get_queue_resp *) &cmd->params[0]; cfg->dest_id = resp->dest_id; cfg->priority = resp->priority; cfg->flow_ctx = resp->flc; cfg->user_ctx = resp->user_ctx; cfg->fqid = resp->fqid; cfg->qdbin = resp->qdbin; cfg->cgid = resp->cgid; cfg->dest_type = (enum dpaa2_ni_dest_type) resp->flags & 0x0Fu; cfg->cgid_valid = (resp->flags & 0x20u) > 0u ? true : false; cfg->stash_control = (resp->flags & 0x40u) > 0u ? true : false; cfg->hold_active = (resp->flags & 0x80u) > 0u ? true : false; } return (error); } static int dpaa2_rc_ni_set_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed set_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t options; uint32_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint8_t cgid; uint8_t chan_id; uint8_t _reserved[23]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->options = cfg->options; args->dest_id = cfg->dest_id; args->priority = cfg->priority; args->flc = cfg->flow_ctx; args->user_ctx = cfg->user_ctx; args->cgid = cfg->cgid; args->chan_id = cfg->chan_id; args->flags |= (uint8_t)(cfg->dest_type & 0x0Fu); args->flags |= cfg->stash_control ? 0x40u : 0u; args->flags |= cfg->hold_active ? 0x80u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QUEUE)); } static int dpaa2_rc_ni_get_qdid(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_queue_type type, uint16_t *qdid) { struct __packed get_qdid_args { uint8_t queue_type; } *args; struct __packed get_qdid_resp { uint16_t qdid; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || qdid == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_qdid_args *) &cmd->params[0]; args->queue_type = (uint8_t) type; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QDID); if (!error) { resp = (struct get_qdid_resp *) &cmd->params[0]; *qdid = resp->qdid; } return (error); } static int dpaa2_rc_ni_add_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed add_mac_args { uint8_t flags; uint8_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint8_t tc_id; uint8_t fq_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ADD_MAC_ADDR)); } static int dpaa2_rc_ni_remove_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed rem_mac_args { uint16_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint64_t _reserved1[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct rem_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_REMOVE_MAC_ADDR)); } static int dpaa2_rc_ni_clear_mac_filters(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool rm_uni, bool rm_multi) { struct __packed clear_mac_filters_args { uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct clear_mac_filters_args *) &cmd->params[0]; args->flags |= rm_uni ? 0x1 : 0x0; args->flags |= rm_multi ? 0x2 : 0x0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_MAC_FILTERS)); } static int dpaa2_rc_ni_set_mfl(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t length) { struct __packed set_mfl_args { uint16_t length; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_mfl_args *) &cmd->params[0]; args->length = length; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MFL)); } static int dpaa2_rc_ni_set_offload(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_ofl_type ofl_type, bool en) { struct __packed set_ofl_args { uint8_t _reserved[3]; uint8_t ofl_type; uint32_t config; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_ofl_args *) &cmd->params[0]; args->ofl_type = (uint8_t) ofl_type; args->config = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_OFFLOAD)); } static int dpaa2_rc_ni_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_MASK)); } static int dpaa2_rc_ni_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_ENABLE)); } static int dpaa2_rc_ni_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_ni_set_uni_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { struct __packed set_uni_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_uni_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_UNI_PROMISC)); } static int dpaa2_rc_ni_set_multi_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { /* TODO: Implementation is the same as for ni_set_uni_promisc(). */ struct __packed set_multi_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_multi_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MULTI_PROMISC)); } static int dpaa2_rc_ni_get_statistics(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t page, uint16_t param, uint64_t *cnt) { struct __packed get_statistics_args { uint8_t page; uint16_t param; } *args; struct __packed get_statistics_resp { uint64_t cnt[7]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cnt == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_statistics_args *) &cmd->params[0]; args->page = page; args->param = param; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_STATISTICS); if (!error) { resp = (struct get_statistics_resp *) &cmd->params[0]; for (int i = 0; i < DPAA2_NI_STAT_COUNTERS; i++) cnt[i] = resp->cnt[i]; } return (error); } static int dpaa2_rc_ni_set_rx_tc_dist(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t dist_size, uint8_t tc, enum dpaa2_ni_dist_mode dist_mode, bus_addr_t key_cfg_buf) { struct __packed set_rx_tc_dist_args { uint16_t dist_size; uint8_t tc; uint8_t ma_dm; /* miss action + dist. mode */ uint32_t _reserved1; uint64_t _reserved2[5]; uint64_t key_cfg_iova; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_rx_tc_dist_args *) &cmd->params[0]; args->dist_size = dist_size; args->tc = tc; args->ma_dm = ((uint8_t) dist_mode) & 0x0Fu; args->key_cfg_iova = key_cfg_buf; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_RX_TC_DIST)); } static int dpaa2_rc_io_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpio_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpio_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_io_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_CLOSE)); } static int dpaa2_rc_io_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ENABLE)); } static int dpaa2_rc_io_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_DISABLE)); } static int dpaa2_rc_io_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_RESET)); } static int dpaa2_rc_io_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_io_attr *attr) { struct __packed dpaa2_io_attr { uint32_t id; uint16_t swp_id; uint8_t priors_num; uint8_t chan_mode; uint64_t swp_ce_paddr; uint64_t swp_ci_paddr; uint32_t swp_version; uint32_t _reserved1; uint32_t swp_clk; uint32_t _reserved2[5]; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_ATTR); if (!error) { pattr = (struct dpaa2_io_attr *) &cmd->params[0]; attr->swp_ce_paddr = pattr->swp_ce_paddr; attr->swp_ci_paddr = pattr->swp_ci_paddr; attr->swp_version = pattr->swp_version; attr->swp_clk = pattr->swp_clk; attr->id = pattr->id; attr->swp_id = pattr->swp_id; attr->priors_num = pattr->priors_num; attr->chan_mode = (enum dpaa2_io_chan_mode) pattr->chan_mode; } return (error); } static int dpaa2_rc_io_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Extract similar *_set_irq_mask() into one function. */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_MASK)); } static int dpaa2_rc_io_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Extract similar *_get_irq_status() into one function. */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_io_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Extract similar *_set_irq_enable() into one function. */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_ENABLE)); } static int dpaa2_rc_io_add_static_dq_chan(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint8_t *chan_idx) { struct __packed add_static_dq_chan_args { uint32_t dpcon_id; } *args; struct __packed add_static_dq_chan_resp { uint8_t chan_idx; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || chan_idx == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_static_dq_chan_args *) &cmd->params[0]; args->dpcon_id = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ADD_STATIC_DQ_CHAN); if (!error) { resp = (struct add_static_dq_chan_resp *) &cmd->params[0]; *chan_idx = resp->chan_idx; } return (error); } static int dpaa2_rc_bp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpbp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpbp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_bp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_CLOSE)); } static int dpaa2_rc_bp_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_ENABLE)); } static int dpaa2_rc_bp_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_DISABLE)); } static int dpaa2_rc_bp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_RESET)); } static int dpaa2_rc_bp_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_bp_attr *attr) { struct __packed dpaa2_bp_attr { uint16_t _reserved1; uint16_t bpid; uint32_t id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_GET_ATTR); if (!error) { pattr = (struct dpaa2_bp_attr *) &cmd->params[0]; attr->id = pattr->id; attr->bpid = pattr->bpid; } return (error); } static int dpaa2_rc_mac_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmac_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmac_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mac_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_CLOSE)); } static int dpaa2_rc_mac_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_RESET)); } static int dpaa2_rc_mac_mdio_read(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t *val) { struct __packed mdio_read_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint32_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || val == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_read_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->clause = 0; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_READ); if (!error) *val = cmd->params[0] & 0xFFFF; return (error); } static int dpaa2_rc_mac_mdio_write(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t val) { struct __packed mdio_write_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint16_t val; uint16_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_write_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->val = val; args->clause = 0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_WRITE)); } static int dpaa2_rc_mac_get_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_mac_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_attr *attr) { struct __packed mac_attr_resp { uint8_t eth_if; uint8_t link_type; uint16_t id; uint32_t max_rate; uint8_t fec_mode; uint8_t ifg_mode; uint8_t ifg_len; uint8_t _reserved1; uint32_t _reserved2; uint8_t sgn_post_pre; uint8_t serdes_cfg_mode; uint8_t eq_amp_red; uint8_t eq_post1q; uint8_t eq_preq; uint8_t eq_type; uint16_t _reserved3; uint64_t _reserved[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ATTR); if (!error) { resp = (struct mac_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->max_rate = resp->max_rate; attr->eth_if = resp->eth_if; attr->link_type = resp->link_type; } return (error); } static int dpaa2_rc_mac_set_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_link_state *state) { struct __packed mac_set_link_args { uint64_t options; uint32_t rate; uint32_t _reserved1; uint32_t flags; uint32_t _reserved2; uint64_t supported; uint64_t advert; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct mac_set_link_args *) &cmd->params[0]; args->options = state->options; args->rate = state->rate; args->supported = state->supported; args->advert = state->advert; args->flags |= state->up ? 0x1u : 0u; args->flags |= state->state_valid ? 0x2u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_LINK_STATE)); } static int dpaa2_rc_mac_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Implementation is the same as for ni_set_irq_mask(). */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_MASK)); } static int dpaa2_rc_mac_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Implementation is the same as for ni_set_irq_enable(). */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_ENABLE)); } static int dpaa2_rc_mac_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Implementation is the same as ni_get_irq_status(). */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_con_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_con_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_CLOSE)); } static int dpaa2_rc_con_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_RESET)); } static int dpaa2_rc_con_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_ENABLE)); } static int dpaa2_rc_con_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_DISABLE)); } static int dpaa2_rc_con_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_attr *attr) { struct __packed con_attr_resp { uint32_t id; uint16_t chan_id; uint8_t prior_num; uint8_t _reserved1; uint64_t _reserved2[6]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_GET_ATTR); if (!error) { resp = (struct con_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->chan_id = resp->chan_id; attr->prior_num = resp->prior_num; } return (error); } static int dpaa2_rc_con_set_notif(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_notif_cfg *cfg) { struct __packed set_notif_args { uint32_t dpio_id; uint8_t prior; uint8_t _reserved1; uint16_t _reserved2; uint64_t ctx; uint64_t _reserved3[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_notif_args *) &cmd->params[0]; args->dpio_id = cfg->dpio_id; args->prior = cfg->prior; args->ctx = cfg->qman_ctx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_SET_NOTIF)); } static int dpaa2_rc_mcp_create(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t portal_id, uint32_t options, uint32_t *dpmcp_id) { struct __packed mcp_create_args { uint32_t portal_id; uint32_t options; uint64_t _reserved[6]; } *args; struct __packed mcp_create_resp { uint32_t dpmcp_id; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || dpmcp_id == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_create_args *) &cmd->params[0]; args->portal_id = portal_id; args->options = options; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CREATE); if (!error) { resp = (struct mcp_create_resp *) &cmd->params[0]; *dpmcp_id = resp->dpmcp_id; } return (error); } static int dpaa2_rc_mcp_destroy(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id) { struct __packed mcp_destroy_args { uint32_t dpmcp_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_destroy_args *) &cmd->params[0]; args->dpmcp_id = dpmcp_id; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_DESTROY)); } static int dpaa2_rc_mcp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmcp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mcp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CLOSE)); } static int dpaa2_rc_mcp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_RESET)); } /** * @brief Create and add devices for DPAA2 objects in this resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *sc) { device_t rcdev = sc->dev; device_t child = sc->dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_cmd cmd; struct dpaa2_rc_attr dprc_attr; struct dpaa2_obj obj; uint32_t major, minor, rev, obj_count; uint16_t rc_token; int rc; DPAA2_CMD_INIT(&cmd); /* Print MC firmware version. */ rc = DPAA2_CMD_MNG_GET_VERSION(rcdev, child, &cmd, &major, &minor, &rev); if (rc) { device_printf(rcdev, "%s: failed to get MC firmware version: " "error=%d\n", __func__, rc); return (ENXIO); } device_printf(rcdev, "MC firmware version: %u.%u.%u\n", major, minor, rev); /* Obtain container ID associated with a given MC portal. */ rc = DPAA2_CMD_MNG_GET_CONTAINER_ID(rcdev, child, &cmd, &sc->cont_id); if (rc) { device_printf(rcdev, "%s: failed to get container id: " "error=%d\n", __func__, rc); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Resource container ID: %u\n", sc->cont_id); } /* Open the resource container. */ rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, sc->cont_id, &rc_token); if (rc) { device_printf(rcdev, "%s: failed to open container: cont_id=%u, " "error=%d\n", __func__, sc->cont_id, rc); return (ENXIO); } /* Obtain a number of objects in this container. */ rc = DPAA2_CMD_RC_GET_OBJ_COUNT(rcdev, child, &cmd, &obj_count); if (rc) { device_printf(rcdev, "%s: failed to count objects in container: " "cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); (void)DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Objects in container: %u\n", obj_count); } rc = DPAA2_CMD_RC_GET_ATTRIBUTES(rcdev, child, &cmd, &dprc_attr); if (rc) { device_printf(rcdev, "%s: failed to get attributes of the " "container: cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Isolation context ID: %u\n", dprc_attr.icid); } if (rcinfo) { rcinfo->id = dprc_attr.cont_id; rcinfo->portal_id = dprc_attr.portal_id; rcinfo->icid = dprc_attr.icid; } /* * Add MC portals before everything else. * TODO: Discover DPAA2 objects on-demand. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc) { continue; /* Skip silently for now. */ } if (obj.type != DPAA2_DEV_MCP) { continue; } dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach MC portals. */ bus_generic_probe(rcdev); rc = bus_generic_attach(rcdev); if (rc) { DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add managed devices (except DPMCPs) to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc && bootverbose) { if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ) { device_printf(rcdev, "%s: skip unsupported " "DPAA2 object: idx=%u\n", __func__, i); continue; } else { device_printf(rcdev, "%s: failed to get " "information about DPAA2 object: idx=%u, " "error=%d\n", __func__, i, rc); continue; } } if (obj.type == DPAA2_DEV_MCP) { continue; /* Already added. */ } dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach managed devices properly. */ bus_generic_probe(rcdev); rc = bus_generic_attach(rcdev); if (rc) { DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add other devices to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ && bootverbose) { device_printf(rcdev, "%s: skip unsupported DPAA2 " "object: idx=%u\n", __func__, i); continue; } else if (rc) { device_printf(rcdev, "%s: failed to get object: " "idx=%u, error=%d\n", __func__, i, rc); continue; } dpaa2_rc_add_child(sc, &cmd, &obj); } DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); /* Probe and attach the rest of devices. */ bus_generic_probe(rcdev); return (bus_generic_attach(rcdev)); } /** * @brief Add a new DPAA2 device to the resource container bus. */ static int dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev; struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; struct resource_spec *res_spec; const char *devclass; int dpio_n = 0; /* to limit DPIOs by # of CPUs */ int dpcon_n = 0; /* to limit DPCONs by # of CPUs */ int rid, error; rcdev = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_NI: devclass = "dpaa2_ni"; res_spec = dpaa2_ni_spec; break; default: return (ENXIO); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for a child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; /* Limit DPIOs and DPCONs by number of CPUs. */ if (res_spec->type == DPAA2_DEV_IO && dpio_n >= mp_ncpus) { dpio_n++; continue; } if (res_spec->type == DPAA2_DEV_CON && dpcon_n >= mp_ncpus) { dpcon_n++; continue; } error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); if (res_spec->type == DPAA2_DEV_IO) dpio_n++; if (res_spec->type == DPAA2_DEV_CON) dpcon_n++; } return (0); } /** * @brief Add a new managed DPAA2 device to the resource container bus. * * There are DPAA2 objects (DPIO, DPBP) which have their own drivers and can be * allocated as resources or associated with the other DPAA2 objects. This * function is supposed to discover such managed objects in the resource * container and add them as children to perform a proper initialization. * * NOTE: It must be called together with bus_generic_probe() and * bus_generic_attach() before dpaa2_rc_add_child(). */ static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev, child; struct dpaa2_devinfo *rcinfo, *dinfo; struct dpaa2_rc_obj_region reg; struct resource_spec *res_spec; const char *devclass; uint64_t start, end, count; uint32_t flags = 0; int rid, error; rcdev = sc->dev; child = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_IO: devclass = "dpaa2_io"; res_spec = dpaa2_io_spec; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; case DPAA2_DEV_BP: devclass = "dpaa2_bp"; res_spec = dpaa2_bp_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_CON: devclass = "dpaa2_con"; res_spec = dpaa2_con_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_MAC: devclass = "dpaa2_mac"; res_spec = dpaa2_mac_spec; flags = DPAA2_MC_DEV_ASSOCIATED; break; case DPAA2_DEV_MCP: devclass = "dpaa2_mcp"; res_spec = NULL; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; default: /* Only managed devices above are supported. */ return (EINVAL); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for the child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add memory regions to the resource list. */ for (uint8_t i = 0; i < obj->reg_count; i++) { error = DPAA2_CMD_RC_GET_OBJ_REGION(rcdev, child, cmd, obj->id, i, obj->type, ®); if (error) { device_printf(rcdev, "%s: failed to obtain memory " "region for type=%s, id=%u, reg_idx=%u: error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, i, error); continue; } count = reg.size; start = reg.base_paddr + reg.base_offset; end = reg.base_paddr + reg.base_offset + reg.size - 1; resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, start, end, count); } /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); } /* Inform MC about a new managed device. */ error = DPAA2_MC_MANAGE_DEV(rcdev, dev, flags); if (error) { device_printf(rcdev, "%s: failed to add a managed DPAA2 device: " "type=%s, id=%u, error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, error); return (ENXIO); } return (0); } /** * @brief Configure given IRQ using MC command interface. */ static int dpaa2_rc_configure_irq(device_t rcdev, device_t child, int rid, uint64_t addr, uint32_t data) { struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; struct dpaa2_cmd cmd; uint16_t rc_token; int rc = EINVAL; DPAA2_CMD_INIT(&cmd); if (device_get_parent(child) == rcdev && rid >= 1) { rcinfo = device_get_ivars(rcdev); dinfo = device_get_ivars(child); rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, rcinfo->id, &rc_token); if (rc) { device_printf(rcdev, "%s: failed to open DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } /* Set MSI address and value. */ rc = DPAA2_CMD_RC_SET_OBJ_IRQ(rcdev, child, &cmd, rid - 1, addr, data, rid, dinfo->id, dinfo->dtype); if (rc) { device_printf(rcdev, "%s: failed to setup IRQ: " "rid=%d, addr=%jx, data=%x, error=%d\n", __func__, rid, addr, data, rc); return (ENODEV); } rc = DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); if (rc) { device_printf(rcdev, "%s: failed to close DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } rc = 0; } return (rc); } /** * @brief General implementation of the MC command to enable IRQ. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool enable, uint16_t cmdid) { struct __packed enable_irq_args { uint8_t enable; uint8_t _reserved1; uint16_t _reserved2; uint8_t irq_idx; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[6]; } *args; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); args = (struct enable_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->enable = enable == 0u ? 0u : 1u; return (dpaa2_rc_exec_cmd(mcp, cmd, cmdid)); } /** * @brief Sends a command to MC and waits for response. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint16_t cmdid) { struct dpaa2_cmd_header *hdr; uint16_t flags; int error; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); /* Prepare a command for the MC hardware. */ hdr = (struct dpaa2_cmd_header *) &cmd->header; hdr->cmdid = cmdid; hdr->status = DPAA2_CMD_STAT_READY; DPAA2_MCP_LOCK(mcp, &flags); if (flags & DPAA2_PORTAL_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_INVALID_STATE); } /* Send a command to MC and wait for the result. */ dpaa2_rc_send_cmd(mcp, cmd); error = dpaa2_rc_wait_for_cmd(mcp, cmd); if (error) { DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_ERR); } if (hdr->status != DPAA2_CMD_STAT_OK) { DPAA2_MCP_UNLOCK(mcp); return (int)(hdr->status); } DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_OK); } /** * @brief Writes a command to the MC command portal. */ static int dpaa2_rc_send_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { /* Write command parameters. */ for (uint32_t i = 1; i <= DPAA2_CMD_PARAMS_N; i++) bus_write_8(mcp->map, sizeof(uint64_t) * i, cmd->params[i-1]); bus_barrier(mcp->map, 0, sizeof(struct dpaa2_cmd), BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Write command header to trigger execution. */ bus_write_8(mcp->map, 0, cmd->header); return (0); } /** * @brief Polls the MC command portal in order to receive a result of the * command execution. */ static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { struct dpaa2_cmd_header *hdr; uint64_t val; uint32_t i; /* Wait for a command execution result from the MC hardware. */ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) { val = bus_read_8(mcp->map, 0); hdr = (struct dpaa2_cmd_header *) &val; if (hdr->status != DPAA2_CMD_STAT_READY) { break; } DELAY(CMD_SPIN_TIMEOUT); } if (i > CMD_SPIN_ATTEMPTS) { /* Return an error on expired timeout. */ return (DPAA2_CMD_STAT_TIMEOUT); } else { /* Read command response. */ cmd->header = val; for (i = 1; i <= DPAA2_CMD_PARAMS_N; i++) { cmd->params[i-1] = bus_read_8(mcp->map, i * sizeof(uint64_t)); } } return (DPAA2_CMD_STAT_OK); } /** * @brief Reserve a DPAA2-specific device of the given devtype for the child. */ static int dpaa2_rc_add_res(device_t rcdev, device_t child, enum dpaa2_dev_type devtype, int *rid, int flags) { device_t dpaa2_dev; struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource *res; bool shared = false; int error; /* Request a free DPAA2 device of the given type from MC. */ error = DPAA2_MC_GET_FREE_DEV(rcdev, &dpaa2_dev, devtype); if (error && !(flags & RF_SHAREABLE)) { device_printf(rcdev, "%s: failed to obtain a free %s (rid=%d) " "for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } /* Request a shared DPAA2 device of the given type from MC. */ if (error) { error = DPAA2_MC_GET_SHARED_DEV(rcdev, &dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to obtain a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } shared = true; } /* Add DPAA2 device to the resource list of the child device. */ resource_list_add(&dinfo->resources, devtype, *rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1); /* Reserve a newly added DPAA2 resource. */ res = resource_list_reserve(&dinfo->resources, rcdev, child, devtype, rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1, flags & ~RF_ACTIVE); if (!res) { device_printf(rcdev, "%s: failed to reserve %s (rid=%d) for: %s " "(id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (EBUSY); } /* Reserve a shared DPAA2 device of the given type. */ if (shared) { error = DPAA2_MC_RESERVE_DEV(rcdev, dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to reserve a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } } return (0); } static int dpaa2_rc_print_type(struct resource_list *rl, enum dpaa2_dev_type type) { struct dpaa2_devinfo *dinfo; struct resource_list_entry *rle; uint32_t prev_id; int printed = 0, series = 0; int retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { dinfo = device_get_ivars((device_t) rle->start); if (printed == 0) { retval += printf(" %s (id=", dpaa2_ttos(dinfo->dtype)); } else { if (dinfo->id == prev_id + 1) { if (series == 0) { series = 1; retval += printf("-"); } } else { if (series == 1) { retval += printf("%u", prev_id); series = 0; } retval += printf(","); } } printed++; if (series == 0) retval += printf("%u", dinfo->id); prev_id = dinfo->id; } } if (printed) { if (series == 1) retval += printf("%u", prev_id); retval += printf(")"); } return (retval); } static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *cmd) { if (cmd != NULL) { memset(cmd->params, 0, sizeof(cmd->params[0]) * DPAA2_CMD_PARAMS_N); } return (0); } static struct dpaa2_mcp * dpaa2_rc_select_portal(device_t dev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *cinfo = device_get_ivars(child); if (cinfo == NULL || dinfo == NULL || dinfo->dtype != DPAA2_DEV_RC) return (NULL); return (cinfo->portal != NULL ? cinfo->portal : dinfo->portal); } static device_method_t dpaa2_rc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_rc_probe), DEVMETHOD(device_attach, dpaa2_rc_attach), DEVMETHOD(device_detach, dpaa2_rc_detach), /* Bus interface */ DEVMETHOD(bus_get_resource_list, dpaa2_rc_get_resource_list), DEVMETHOD(bus_delete_resource, dpaa2_rc_delete_resource), DEVMETHOD(bus_alloc_resource, dpaa2_rc_alloc_resource), DEVMETHOD(bus_release_resource, dpaa2_rc_release_resource), DEVMETHOD(bus_child_deleted, dpaa2_rc_child_deleted), DEVMETHOD(bus_child_detached, dpaa2_rc_child_detached), DEVMETHOD(bus_setup_intr, dpaa2_rc_setup_intr), DEVMETHOD(bus_teardown_intr, dpaa2_rc_teardown_intr), DEVMETHOD(bus_print_child, dpaa2_rc_print_child), DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), /* Pseudo-PCI interface */ DEVMETHOD(pci_alloc_msi, dpaa2_rc_alloc_msi), DEVMETHOD(pci_release_msi, dpaa2_rc_release_msi), DEVMETHOD(pci_msi_count, dpaa2_rc_msi_count), DEVMETHOD(pci_get_id, dpaa2_rc_get_id), /* DPAA2 MC command interface */ DEVMETHOD(dpaa2_cmd_mng_get_version, dpaa2_rc_mng_get_version), DEVMETHOD(dpaa2_cmd_mng_get_soc_version, dpaa2_rc_mng_get_soc_version), DEVMETHOD(dpaa2_cmd_mng_get_container_id, dpaa2_rc_mng_get_container_id), /* DPRC commands */ DEVMETHOD(dpaa2_cmd_rc_open, dpaa2_rc_open), DEVMETHOD(dpaa2_cmd_rc_close, dpaa2_rc_close), DEVMETHOD(dpaa2_cmd_rc_get_obj_count, dpaa2_rc_get_obj_count), DEVMETHOD(dpaa2_cmd_rc_get_obj, dpaa2_rc_get_obj), DEVMETHOD(dpaa2_cmd_rc_get_obj_descriptor, dpaa2_rc_get_obj_descriptor), DEVMETHOD(dpaa2_cmd_rc_get_attributes, dpaa2_rc_get_attributes), DEVMETHOD(dpaa2_cmd_rc_get_obj_region, dpaa2_rc_get_obj_region), DEVMETHOD(dpaa2_cmd_rc_get_api_version, dpaa2_rc_get_api_version), DEVMETHOD(dpaa2_cmd_rc_set_irq_enable, dpaa2_rc_set_irq_enable), DEVMETHOD(dpaa2_cmd_rc_set_obj_irq, dpaa2_rc_set_obj_irq), DEVMETHOD(dpaa2_cmd_rc_get_conn, dpaa2_rc_get_conn), /* DPNI commands */ DEVMETHOD(dpaa2_cmd_ni_open, dpaa2_rc_ni_open), DEVMETHOD(dpaa2_cmd_ni_close, dpaa2_rc_ni_close), DEVMETHOD(dpaa2_cmd_ni_enable, dpaa2_rc_ni_enable), DEVMETHOD(dpaa2_cmd_ni_disable, dpaa2_rc_ni_disable), DEVMETHOD(dpaa2_cmd_ni_get_api_version, dpaa2_rc_ni_get_api_version), DEVMETHOD(dpaa2_cmd_ni_reset, dpaa2_rc_ni_reset), DEVMETHOD(dpaa2_cmd_ni_get_attributes, dpaa2_rc_ni_get_attributes), DEVMETHOD(dpaa2_cmd_ni_set_buf_layout, dpaa2_rc_ni_set_buf_layout), DEVMETHOD(dpaa2_cmd_ni_get_tx_data_off, dpaa2_rc_ni_get_tx_data_offset), DEVMETHOD(dpaa2_cmd_ni_get_port_mac_addr, dpaa2_rc_ni_get_port_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_prim_mac_addr, dpaa2_rc_ni_set_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_get_prim_mac_addr, dpaa2_rc_ni_get_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_link_cfg, dpaa2_rc_ni_set_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_cfg, dpaa2_rc_ni_get_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_state, dpaa2_rc_ni_get_link_state), DEVMETHOD(dpaa2_cmd_ni_set_qos_table, dpaa2_rc_ni_set_qos_table), DEVMETHOD(dpaa2_cmd_ni_clear_qos_table, dpaa2_rc_ni_clear_qos_table), DEVMETHOD(dpaa2_cmd_ni_set_pools, dpaa2_rc_ni_set_pools), DEVMETHOD(dpaa2_cmd_ni_set_err_behavior,dpaa2_rc_ni_set_err_behavior), DEVMETHOD(dpaa2_cmd_ni_get_queue, dpaa2_rc_ni_get_queue), DEVMETHOD(dpaa2_cmd_ni_set_queue, dpaa2_rc_ni_set_queue), DEVMETHOD(dpaa2_cmd_ni_get_qdid, dpaa2_rc_ni_get_qdid), DEVMETHOD(dpaa2_cmd_ni_add_mac_addr, dpaa2_rc_ni_add_mac_addr), DEVMETHOD(dpaa2_cmd_ni_remove_mac_addr, dpaa2_rc_ni_remove_mac_addr), DEVMETHOD(dpaa2_cmd_ni_clear_mac_filters, dpaa2_rc_ni_clear_mac_filters), DEVMETHOD(dpaa2_cmd_ni_set_mfl, dpaa2_rc_ni_set_mfl), DEVMETHOD(dpaa2_cmd_ni_set_offload, dpaa2_rc_ni_set_offload), DEVMETHOD(dpaa2_cmd_ni_set_irq_mask, dpaa2_rc_ni_set_irq_mask), DEVMETHOD(dpaa2_cmd_ni_set_irq_enable, dpaa2_rc_ni_set_irq_enable), DEVMETHOD(dpaa2_cmd_ni_get_irq_status, dpaa2_rc_ni_get_irq_status), DEVMETHOD(dpaa2_cmd_ni_set_uni_promisc, dpaa2_rc_ni_set_uni_promisc), DEVMETHOD(dpaa2_cmd_ni_set_multi_promisc, dpaa2_rc_ni_set_multi_promisc), DEVMETHOD(dpaa2_cmd_ni_get_statistics, dpaa2_rc_ni_get_statistics), DEVMETHOD(dpaa2_cmd_ni_set_rx_tc_dist, dpaa2_rc_ni_set_rx_tc_dist), /* DPIO commands */ DEVMETHOD(dpaa2_cmd_io_open, dpaa2_rc_io_open), DEVMETHOD(dpaa2_cmd_io_close, dpaa2_rc_io_close), DEVMETHOD(dpaa2_cmd_io_enable, dpaa2_rc_io_enable), DEVMETHOD(dpaa2_cmd_io_disable, dpaa2_rc_io_disable), DEVMETHOD(dpaa2_cmd_io_reset, dpaa2_rc_io_reset), DEVMETHOD(dpaa2_cmd_io_get_attributes, dpaa2_rc_io_get_attributes), DEVMETHOD(dpaa2_cmd_io_set_irq_mask, dpaa2_rc_io_set_irq_mask), DEVMETHOD(dpaa2_cmd_io_get_irq_status, dpaa2_rc_io_get_irq_status), DEVMETHOD(dpaa2_cmd_io_set_irq_enable, dpaa2_rc_io_set_irq_enable), DEVMETHOD(dpaa2_cmd_io_add_static_dq_chan, dpaa2_rc_io_add_static_dq_chan), /* DPBP commands */ DEVMETHOD(dpaa2_cmd_bp_open, dpaa2_rc_bp_open), DEVMETHOD(dpaa2_cmd_bp_close, dpaa2_rc_bp_close), DEVMETHOD(dpaa2_cmd_bp_enable, dpaa2_rc_bp_enable), DEVMETHOD(dpaa2_cmd_bp_disable, dpaa2_rc_bp_disable), DEVMETHOD(dpaa2_cmd_bp_reset, dpaa2_rc_bp_reset), DEVMETHOD(dpaa2_cmd_bp_get_attributes, dpaa2_rc_bp_get_attributes), /* DPMAC commands */ DEVMETHOD(dpaa2_cmd_mac_open, dpaa2_rc_mac_open), DEVMETHOD(dpaa2_cmd_mac_close, dpaa2_rc_mac_close), DEVMETHOD(dpaa2_cmd_mac_reset, dpaa2_rc_mac_reset), DEVMETHOD(dpaa2_cmd_mac_mdio_read, dpaa2_rc_mac_mdio_read), DEVMETHOD(dpaa2_cmd_mac_mdio_write, dpaa2_rc_mac_mdio_write), DEVMETHOD(dpaa2_cmd_mac_get_addr, dpaa2_rc_mac_get_addr), DEVMETHOD(dpaa2_cmd_mac_get_attributes, dpaa2_rc_mac_get_attributes), DEVMETHOD(dpaa2_cmd_mac_set_link_state, dpaa2_rc_mac_set_link_state), DEVMETHOD(dpaa2_cmd_mac_set_irq_mask, dpaa2_rc_mac_set_irq_mask), DEVMETHOD(dpaa2_cmd_mac_set_irq_enable, dpaa2_rc_mac_set_irq_enable), DEVMETHOD(dpaa2_cmd_mac_get_irq_status, dpaa2_rc_mac_get_irq_status), /* DPCON commands */ DEVMETHOD(dpaa2_cmd_con_open, dpaa2_rc_con_open), DEVMETHOD(dpaa2_cmd_con_close, dpaa2_rc_con_close), DEVMETHOD(dpaa2_cmd_con_reset, dpaa2_rc_con_reset), DEVMETHOD(dpaa2_cmd_con_enable, dpaa2_rc_con_enable), DEVMETHOD(dpaa2_cmd_con_disable, dpaa2_rc_con_disable), DEVMETHOD(dpaa2_cmd_con_get_attributes, dpaa2_rc_con_get_attributes), DEVMETHOD(dpaa2_cmd_con_set_notif, dpaa2_rc_con_set_notif), /* DPMCP commands */ DEVMETHOD(dpaa2_cmd_mcp_create, dpaa2_rc_mcp_create), DEVMETHOD(dpaa2_cmd_mcp_destroy, dpaa2_rc_mcp_destroy), DEVMETHOD(dpaa2_cmd_mcp_open, dpaa2_rc_mcp_open), DEVMETHOD(dpaa2_cmd_mcp_close, dpaa2_rc_mcp_close), DEVMETHOD(dpaa2_cmd_mcp_reset, dpaa2_rc_mcp_reset), DEVMETHOD_END }; static driver_t dpaa2_rc_driver = { "dpaa2_rc", dpaa2_rc_methods, sizeof(struct dpaa2_rc_softc), }; /* For root container */ DRIVER_MODULE(dpaa2_rc, dpaa2_mc, dpaa2_rc_driver, 0, 0); /* For child containers */ DRIVER_MODULE(dpaa2_rc, dpaa2_rc, dpaa2_rc_driver, 0, 0); diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index 8069d3af69cf..37db238f2108 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -1,545 +1,533 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* * Bus interface. */ static int simplebus_probe(device_t dev); static struct resource *simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int simplebus_release_resource(device_t bus, device_t child, - int type, int rid, struct resource *r); static void simplebus_probe_nomatch(device_t bus, device_t child); static int simplebus_print_child(device_t bus, device_t child); static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit); static struct resource_list *simplebus_get_resource_list(device_t bus, device_t child); static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); /* * ofw_bus interface */ static const struct ofw_bus_devinfo *simplebus_get_devinfo(device_t bus, device_t child); /* * Driver methods. */ static device_method_t simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, simplebus_probe), DEVMETHOD(device_attach, simplebus_attach), DEVMETHOD(device_detach, simplebus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, simplebus_add_child), DEVMETHOD(bus_print_child, simplebus_print_child), DEVMETHOD(bus_probe_nomatch, simplebus_probe_nomatch), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, simplebus_alloc_resource), - DEVMETHOD(bus_release_resource, simplebus_release_resource), + DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_map_resource, bus_generic_map_resource), DEVMETHOD(bus_unmap_resource, bus_generic_unmap_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_resource_list, simplebus_get_resource_list), DEVMETHOD(bus_get_property, simplebus_get_property), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(simplebus, simplebus_driver, simplebus_methods, sizeof(struct simplebus_softc)); EARLY_DRIVER_MODULE(simplebus, ofwbus, simplebus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(simplebus, simplebus, simplebus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * XXX We should attach only to pure' compatible = "simple-bus"', * without any other compatible string. * For now, filter only know cases: * "syscon", "simple-bus"; is handled by fdt/syscon driver * "simple-mfd", "simple-bus"; is handled by fdt/simple-mfd driver */ if (ofw_bus_is_compatible(dev, "syscon") || ofw_bus_is_compatible(dev, "simple-mfd")) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really buses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); device_set_desc(dev, "Flattened device tree simple bus"); return (BUS_PROBE_GENERIC); } int simplebus_attach_impl(device_t dev) { struct simplebus_softc *sc; phandle_t node; sc = device_get_softc(dev); simplebus_init(dev, 0); if ((sc->flags & SB_FLAG_NO_RANGES) == 0 && simplebus_fill_ranges(sc->node, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* * In principle, simplebus could have an interrupt map, but ignore that * for now */ for (node = OF_child(sc->node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (0); } int simplebus_attach(device_t dev) { int rv; rv = simplebus_attach_impl(dev); if (rv != 0) return (rv); return (bus_generic_attach(dev)); } int simplebus_detach(device_t dev) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (sc->ranges != NULL) free(sc->ranges, M_DEVBUF); return (bus_generic_detach(dev)); } void simplebus_init(device_t dev, phandle_t node) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (node == 0) node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); } int simplebus_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } struct simplebus_devinfo * simplebus_setup_dinfo(device_t dev, phandle_t node, struct simplebus_devinfo *di) { struct simplebus_softc *sc; struct simplebus_devinfo *ndi; sc = device_get_softc(dev); if (di == NULL) ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); else ndi = di; if (ofw_bus_gen_setup_devinfo(&ndi->obdinfo, node) != 0) { if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } resource_list_init(&ndi->rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &ndi->rl); ofw_bus_intr_to_rl(dev, node, &ndi->rl, NULL); return (ndi); } device_t simplebus_add_device(device_t dev, phandle_t node, u_int order, const char *name, int unit, struct simplebus_devinfo *di) { struct simplebus_devinfo *ndi; device_t cdev; if ((ndi = simplebus_setup_dinfo(dev, node, di)) == NULL) return (NULL); cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->obdinfo.obd_name); resource_list_free(&ndi->rl); ofw_bus_gen_destroy_devinfo(&ndi->obdinfo); if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } device_set_ivars(cdev, ndi); return(cdev); } static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t cdev; struct simplebus_devinfo *ndi; cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) return (NULL); ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); ndi->obdinfo.obd_node = -1; resource_list_init(&ndi->rl); device_set_ivars(cdev, ndi); return (cdev); } static const struct ofw_bus_devinfo * simplebus_get_devinfo(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->obdinfo); } static struct resource_list * simplebus_get_resource_list(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->rl); } static ssize_t simplebus_get_property(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { phandle_t node, xref; ssize_t ret, i; uint32_t *buffer; uint64_t val; switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: case DEVICE_PROP_HANDLE: break; default: return (-1); } node = ofw_bus_get_node(child); if (propvalue == NULL || size == 0) return (OF_getproplen(node, propname)); /* * Integer values are stored in BE format. * If caller declared that the underlying property type is uint32_t * we need to do the conversion to match host endianness. */ if (type == DEVICE_PROP_UINT32) return (OF_getencprop(node, propname, propvalue, size)); /* * uint64_t also requires endianness handling. * In FDT every 8 byte value is stored using two uint32_t variables * in BE format. Now, since the upper bits are stored as the first * of the pair, both halves require swapping. */ if (type == DEVICE_PROP_UINT64) { ret = OF_getencprop(node, propname, propvalue, size); if (ret <= 0) { return (ret); } buffer = (uint32_t *)propvalue; for (i = 0; i < size / 4; i += 2) { val = (uint64_t)buffer[i] << 32 | buffer[i + 1]; ((uint64_t *)buffer)[i / 2] = val; } return (ret); } if (type == DEVICE_PROP_HANDLE) { if (size < sizeof(node)) return (-1); ret = OF_getencprop(node, propname, &xref, sizeof(xref)); if (ret <= 0) return (ret); node = OF_node_from_xref(xref); if (propvalue != NULL) *(uint32_t *)propvalue = node; return (ret); } return (OF_getprop(node, propname, propvalue, size)); } static struct resource * simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct simplebus_softc *sc; struct simplebus_devinfo *di; struct resource_list_entry *rle; int j; sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } -static int -simplebus_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) -{ - - if (type == SYS_RES_IOPORT) - type = SYS_RES_MEMORY; - return (bus_generic_release_resource(bus, child, type, rid, r)); -} - static int simplebus_print_res(struct simplebus_devinfo *di) { int rv; if (di == NULL) return (0); rv = 0; rv += resource_list_print_type(&di->rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(&di->rl, "irq", SYS_RES_IRQ, "%jd"); return (rv); } static void simplebus_probe_nomatch(device_t bus, device_t child) { const char *name, *type, *compat; if (!bootverbose) return; compat = ofw_bus_get_compat(child); if (compat == NULL) return; name = ofw_bus_get_name(child); type = ofw_bus_get_type(child); device_printf(bus, "<%s>", name != NULL ? name : "unknown"); simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) printf(" disabled"); if (type) printf(" type %s", type); printf(" compat %s (no driver attached)\n", compat); } static int simplebus_print_child(device_t bus, device_t child) { int rv; rv = bus_print_child_header(bus, child); rv += simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) rv += printf(" disabled"); rv += bus_print_child_footer(bus, child); return (rv); } diff --git a/sys/dev/hyperv/pcib/vmbus_pcib.c b/sys/dev/hyperv/pcib/vmbus_pcib.c index fd8a7feae984..f6237535cce3 100644 --- a/sys/dev/hyperv/pcib/vmbus_pcib.c +++ b/sys/dev/hyperv/pcib/vmbus_pcib.c @@ -1,2047 +1,2046 @@ /*- * Copyright (c) 2016-2017 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef NEW_PCIB #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #endif #if defined(__aarch64__) #include #include #include #include #endif #include #include #include #include #include "vmbus_if.h" struct completion { unsigned int done; struct mtx lock; }; static void init_completion(struct completion *c) { memset(c, 0, sizeof(*c)); mtx_init(&c->lock, "hvcmpl", NULL, MTX_DEF); c->done = 0; } static void reinit_completion(struct completion *c) { c->done = 0; } static void free_completion(struct completion *c) { mtx_destroy(&c->lock); } static void complete(struct completion *c) { mtx_lock(&c->lock); c->done++; mtx_unlock(&c->lock); wakeup(c); } static void wait_for_completion(struct completion *c) { mtx_lock(&c->lock); while (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", 0); c->done--; mtx_unlock(&c->lock); } /* * Return: 0 if completed, a non-zero value if timed out. */ static int wait_for_completion_timeout(struct completion *c, int timeout) { int ret; mtx_lock(&c->lock); if (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", timeout); if (c->done > 0) { c->done--; ret = 0; } else { ret = 1; } mtx_unlock(&c->lock); return (ret); } #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define PCI_MAKE_VERSION(major, minor) ((uint32_t)(((major) << 16) | (minor))) enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), }; static enum pci_protocol_version_t pci_protocol_versions[] = { PCI_PROTOCOL_VERSION_1_4, PCI_PROTOCOL_VERSION_1_1, }; #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) /* * Message Types */ enum pci_message_type { /* * Version 1.1 */ PCI_MESSAGE_BASE = 0x42490000, PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, PCI_EJECT = PCI_MESSAGE_BASE + 0xB, PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19, PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A, PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B, PCI_MESSAGE_MAXIMUM }; #define STATUS_REVISION_MISMATCH 0xC0000059 /* * Structures defining the virtual PCI Express protocol. */ union pci_version { struct { uint16_t minor_version; uint16_t major_version; } parts; uint32_t version; } __packed; /* * This representation is the one used in Windows, which is * what is expected when sending this back and forth with * the Hyper-V parent partition. */ union win_slot_encoding { struct { uint32_t slot:5; uint32_t func:3; uint32_t reserved:24; } bits; uint32_t val; } __packed; struct pci_func_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ } __packed; struct pci_func_desc2 { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; uint16_t reserved; } __packed; struct hv_msi_desc { uint8_t vector; uint8_t delivery_mode; uint16_t vector_count; uint32_t reserved; uint64_t cpu_mask; } __packed; struct hv_msi_desc3 { uint32_t vector; uint8_t delivery_mode; uint8_t reserved; uint16_t vector_count; uint16_t processor_count; uint16_t processor_array[32]; } __packed; struct tran_int_desc { uint16_t reserved; uint16_t vector_count; uint32_t data; uint64_t address; } __packed; struct pci_message { uint32_t type; } __packed; struct pci_child_message { struct pci_message message_type; union win_slot_encoding wslot; } __packed; struct pci_incoming_message { struct vmbus_chanpkt_hdr hdr; struct pci_message message_type; } __packed; struct pci_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ } __packed; struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; struct pci_message message[0]; }; /* * Specific message types supporting the PCI protocol. */ struct pci_version_request { struct pci_message message_type; uint32_t protocol_version; uint32_t reservedz:31; } __packed; struct pci_bus_d0_entry { struct pci_message message_type; uint32_t reserved; uint64_t mmio_base; } __packed; struct pci_bus_relations { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc func[0]; } __packed; struct pci_bus_relations2 { struct pci_incoming_message incoming; uint32_t device_count; struct pci_func_desc2 func[0]; } __packed; #define MAX_NUM_BARS (PCIR_MAX_BAR_0 + 1) struct pci_q_res_req_response { struct vmbus_chanpkt_hdr hdr; int32_t status; /* negative values are failures */ uint32_t probed_bar[MAX_NUM_BARS]; } __packed; struct pci_resources_assigned { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][MAX_NUM_BARS]; /* unused here */ uint32_t msi_descriptors; uint32_t reserved[4]; } __packed; struct pci_resources_assigned2 { struct pci_message message_type; union win_slot_encoding wslot; uint8_t memory_range[0x14][6]; /* not used here */ uint32_t msi_descriptor_count; uint8_t reserved[70]; } __packed; struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc int_desc; } __packed; struct pci_create_interrupt3 { struct pci_message message_type; union win_slot_encoding wslot; struct hv_msi_desc3 int_desc; } __packed; struct pci_create_int_response { struct pci_response response; uint32_t reserved; struct tran_int_desc int_desc; } __packed; struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; struct tran_int_desc int_desc; } __packed; struct pci_dev_incoming { struct pci_incoming_message incoming; union win_slot_encoding wslot; } __packed; struct pci_eject_response { struct pci_message message_type; union win_slot_encoding wslot; uint32_t status; } __packed; /* * Driver specific state. */ enum hv_pcibus_state { hv_pcibus_init = 0, hv_pcibus_installed, }; struct hv_pcibus { device_t pcib; device_t pci_bus; struct vmbus_pcib_softc *sc; uint16_t pci_domain; enum hv_pcibus_state state; struct resource *cfg_res; struct completion query_completion, *query_comp; struct mtx config_lock; /* Avoid two threads writing index page */ struct mtx device_list_lock; /* Protect lists below */ uint32_t protocol_version; TAILQ_HEAD(, hv_pci_dev) children; TAILQ_HEAD(, hv_dr_state) dr_list; volatile int detaching; }; struct hv_pcidev_desc { uint16_t v_id; /* vendor ID */ uint16_t d_id; /* device ID */ uint8_t rev; uint8_t prog_intf; uint8_t subclass; uint8_t base_class; uint32_t subsystem_id; union win_slot_encoding wslot; uint32_t ser; /* serial number */ uint32_t flags; uint16_t virtual_numa_node; } __packed; struct hv_pci_dev { TAILQ_ENTRY(hv_pci_dev) link; struct hv_pcidev_desc desc; bool reported_missing; struct hv_pcibus *hbus; struct task eject_task; TAILQ_HEAD(, hv_irq_desc) irq_desc_list; /* * What would be observed if one wrote 0xFFFFFFFF to a BAR and then * read it back, for each of the BAR offsets within config space. */ uint32_t probed_bar[MAX_NUM_BARS]; }; /* * Tracks "Device Relations" messages from the host, which must be both * processed in order. */ struct hv_dr_work { struct task task; struct hv_pcibus *bus; }; struct hv_dr_state { TAILQ_ENTRY(hv_dr_state) link; uint32_t device_count; struct hv_pcidev_desc func[0]; }; struct hv_irq_desc { TAILQ_ENTRY(hv_irq_desc) link; struct tran_int_desc desc; int irq; }; #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) static uint32_t devfn_to_wslot(unsigned int devfn) { union win_slot_encoding wslot; wslot.val = 0; wslot.bits.slot = PCI_SLOT(devfn); wslot.bits.func = PCI_FUNC(devfn); return (wslot.val); } static unsigned int wslot_to_devfn(uint32_t wslot) { union win_slot_encoding encoding; unsigned int slot; unsigned int func; encoding.val = wslot; slot = encoding.bits.slot; func = encoding.bits.func; return (PCI_DEVFN(slot, func)); } struct vmbus_pcib_softc { struct vmbus_channel *chan; void *rx_buf; struct taskqueue *taskq; struct hv_pcibus *hbus; }; /* {44C4F61D-4444-4400-9D52-802E27EDE19F} */ static const struct hyperv_guid g_pass_through_dev_type = { .hv_guid = {0x1D, 0xF6, 0xC4, 0x44, 0x44, 0x44, 0x00, 0x44, 0x9D, 0x52, 0x80, 0x2E, 0x27, 0xED, 0xE1, 0x9F} }; struct hv_pci_compl { struct completion host_event; int32_t completion_status; }; struct q_res_req_compl { struct completion host_event; struct hv_pci_dev *hpdev; }; struct compose_comp_ctxt { struct hv_pci_compl comp_pkt; struct tran_int_desc int_desc; }; /* * It is possible the device is revoked during initialization. * Check if this happens during wait. * Return: 0 if response arrived, ENODEV if device revoked. */ static int wait_for_response(struct hv_pcibus *hbus, struct completion *c) { do { if (vmbus_chan_is_revoked(hbus->sc->chan)) { device_printf(hbus->pcib, "The device is revoked.\n"); return (ENODEV); } } while (wait_for_completion_timeout(c, hz /10) != 0); return 0; } static void hv_pci_generic_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct hv_pci_compl *comp_pkt = context; if (resp_packet_size >= sizeof(struct pci_response)) comp_pkt->completion_status = resp->status; else comp_pkt->completion_status = -1; complete(&comp_pkt->host_event); } static void q_resource_requirements(void *context, struct pci_response *resp, int resp_packet_size) { struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; int i; if (resp->status < 0) { printf("vmbus_pcib: failed to query resource requirements\n"); } else { for (i = 0; i < MAX_NUM_BARS; i++) completion->hpdev->probed_bar[i] = q_res_req->probed_bar[i]; } complete(&completion->host_event); } static void hv_pci_compose_compl(void *context, struct pci_response *resp, int resp_packet_size) { struct compose_comp_ctxt *comp_pkt = context; struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; complete(&comp_pkt->comp_pkt.host_event); } static void hv_int_desc_free(struct hv_pci_dev *hpdev, struct hv_irq_desc *hid) { struct pci_delete_interrupt *int_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.val = hpdev->desc.wslot.val; int_pkt->int_desc = hid->desc; vmbus_chan_send(hpdev->hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, int_pkt, sizeof(*int_pkt), 0); free(hid, M_DEVBUF); } static void hv_pci_delete_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct hv_irq_desc *hid, *tmp_hid; device_t pci_dev; int devfn; devfn = wslot_to_devfn(hpdev->desc.wslot.val); bus_topo_lock(); pci_dev = pci_find_dbsf(hbus->pci_domain, 0, PCI_SLOT(devfn), PCI_FUNC(devfn)); if (pci_dev) device_delete_child(hbus->pci_bus, pci_dev); bus_topo_unlock(); mtx_lock(&hbus->device_list_lock); TAILQ_REMOVE(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) hv_int_desc_free(hpdev, hid); free(hpdev, M_DEVBUF); } static struct hv_pci_dev * new_pcichild_device(struct hv_pcibus *hbus, struct hv_pcidev_desc *desc) { struct hv_pci_dev *hpdev; struct pci_child_message *res_req; struct q_res_req_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_child_message)]; } ctxt; int ret; hpdev = malloc(sizeof(*hpdev), M_DEVBUF, M_WAITOK | M_ZERO); hpdev->hbus = hbus; TAILQ_INIT(&hpdev->irq_desc_list); init_completion(&comp_pkt.host_event); comp_pkt.hpdev = hpdev; ctxt.pkt.compl_ctxt = &comp_pkt; ctxt.pkt.completion_func = q_resource_requirements; res_req = (struct pci_child_message *)&ctxt.pkt.message; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.val = desc->wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, res_req, sizeof(*res_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) goto err; if (wait_for_response(hbus, &comp_pkt.host_event)) goto err; free_completion(&comp_pkt.host_event); hpdev->desc = *desc; mtx_lock(&hbus->device_list_lock); if (TAILQ_EMPTY(&hbus->children)) hbus->pci_domain = desc->ser & 0xFFFF; TAILQ_INSERT_TAIL(&hbus->children, hpdev, link); mtx_unlock(&hbus->device_list_lock); return (hpdev); err: free_completion(&comp_pkt.host_event); free(hpdev, M_DEVBUF); return (NULL); } static int pci_rescan(device_t dev) { return (BUS_RESCAN(dev)); } static void pci_devices_present_work(void *arg, int pending __unused) { struct hv_dr_work *dr_wrk = arg; struct hv_dr_state *dr = NULL; struct hv_pcibus *hbus; uint32_t child_no; bool found; struct hv_pcidev_desc *new_desc; struct hv_pci_dev *hpdev, *tmp_hpdev; struct completion *query_comp; bool need_rescan = false; hbus = dr_wrk->bus; free(dr_wrk, M_DEVBUF); /* Pull this off the queue and process it if it was the last one. */ mtx_lock(&hbus->device_list_lock); while (!TAILQ_EMPTY(&hbus->dr_list)) { dr = TAILQ_FIRST(&hbus->dr_list); TAILQ_REMOVE(&hbus->dr_list, dr, link); /* Throw this away if the list still has stuff in it. */ if (!TAILQ_EMPTY(&hbus->dr_list)) { free(dr, M_DEVBUF); continue; } } mtx_unlock(&hbus->device_list_lock); if (!dr) return; /* First, mark all existing children as reported missing. */ mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) hpdev->reported_missing = true; mtx_unlock(&hbus->device_list_lock); /* Next, add back any reported devices. */ for (child_no = 0; child_no < dr->device_count; child_no++) { found = false; new_desc = &dr->func[child_no]; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if ((hpdev->desc.wslot.val == new_desc->wslot.val) && (hpdev->desc.v_id == new_desc->v_id) && (hpdev->desc.d_id == new_desc->d_id) && (hpdev->desc.ser == new_desc->ser)) { hpdev->reported_missing = false; found = true; break; } } mtx_unlock(&hbus->device_list_lock); if (!found) { if (!need_rescan) need_rescan = true; hpdev = new_pcichild_device(hbus, new_desc); if (!hpdev) printf("vmbus_pcib: failed to add a child\n"); } } /* Remove missing device(s), if any */ TAILQ_FOREACH_SAFE(hpdev, &hbus->children, link, tmp_hpdev) { if (hpdev->reported_missing) hv_pci_delete_device(hpdev); } /* Rescan the bus to find any new device, if necessary. */ if (hbus->state == hv_pcibus_installed && need_rescan) pci_rescan(hbus->pci_bus); /* Wake up hv_pci_query_relations(), if it's waiting. */ query_comp = hbus->query_comp; if (query_comp) { hbus->query_comp = NULL; complete(query_comp); } free(dr, M_DEVBUF); } static struct hv_pci_dev * get_pcichild_wslot(struct hv_pcibus *hbus, uint32_t wslot) { struct hv_pci_dev *hpdev, *ret = NULL; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { if (hpdev->desc.wslot.val == wslot) { ret = hpdev; break; } } mtx_unlock(&hbus->device_list_lock); return (ret); } static void hv_pci_devices_present(struct hv_pcibus *hbus, struct pci_bus_relations *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct hv_pcidev_desc) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_pci_devices_present2(struct hv_pcibus *hbus, struct pci_bus_relations2 *relations) { struct hv_dr_state *dr; struct hv_dr_work *dr_wrk; unsigned long dr_size; if (hbus->detaching && relations->device_count > 0) return; dr_size = offsetof(struct hv_dr_state, func) + (sizeof(struct pci_func_desc2) * relations->device_count); dr = malloc(dr_size, M_DEVBUF, M_WAITOK | M_ZERO); dr->device_count = relations->device_count; if (dr->device_count != 0) memcpy(dr->func, relations->func, sizeof(struct pci_func_desc2) * dr->device_count); mtx_lock(&hbus->device_list_lock); TAILQ_INSERT_TAIL(&hbus->dr_list, dr, link); mtx_unlock(&hbus->device_list_lock); dr_wrk = malloc(sizeof(*dr_wrk), M_DEVBUF, M_WAITOK | M_ZERO); dr_wrk->bus = hbus; TASK_INIT(&dr_wrk->task, 0, pci_devices_present_work, dr_wrk); taskqueue_enqueue(hbus->sc->taskq, &dr_wrk->task); } static void hv_eject_device_work(void *arg, int pending __unused) { struct hv_pci_dev *hpdev = arg; union win_slot_encoding wslot = hpdev->desc.wslot; struct hv_pcibus *hbus = hpdev->hbus; struct pci_eject_response *eject_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_eject_response)]; } ctxt; hv_pci_delete_device(hpdev); memset(&ctxt, 0, sizeof(ctxt)); eject_pkt = (struct pci_eject_response *)&ctxt.pkt.message; eject_pkt->message_type.type = PCI_EJECTION_COMPLETE; eject_pkt->wslot.val = wslot.val; vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, eject_pkt, sizeof(*eject_pkt), 0); } static void hv_pci_eject_device(struct hv_pci_dev *hpdev) { struct hv_pcibus *hbus = hpdev->hbus; struct taskqueue *taskq; if (hbus->detaching) return; /* * Push this task into the same taskqueue on which * vmbus_pcib_attach() runs, so we're sure this task can't run * concurrently with vmbus_pcib_attach(). */ TASK_INIT(&hpdev->eject_task, 0, hv_eject_device_work, hpdev); taskq = vmbus_chan_mgmt_tq(hbus->sc->chan); taskqueue_enqueue(taskq, &hpdev->eject_task); } #define PCIB_PACKET_SIZE 0x100 static void vmbus_pcib_on_channel_callback(struct vmbus_channel *chan, void *arg) { struct vmbus_pcib_softc *sc = arg; struct hv_pcibus *hbus = sc->hbus; void *buffer; int bufferlen = PCIB_PACKET_SIZE; struct pci_packet *comp_packet; struct pci_response *response; struct pci_incoming_message *new_msg; struct pci_bus_relations *bus_rel; struct pci_bus_relations2 *bus_rel2; struct pci_dev_incoming *dev_msg; struct hv_pci_dev *hpdev; buffer = sc->rx_buf; do { struct vmbus_chanpkt_hdr *pkt = buffer; uint32_t bytes_rxed; int ret; bytes_rxed = bufferlen; ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed); if (ret == ENOBUFS) { /* Handle large packet */ if (bufferlen > PCIB_PACKET_SIZE) { free(buffer, M_DEVBUF); buffer = NULL; } /* alloc new buffer */ buffer = malloc(bytes_rxed, M_DEVBUF, M_WAITOK | M_ZERO); bufferlen = bytes_rxed; continue; } if (ret != 0) { /* ignore EIO or EAGAIN */ break; } if (bytes_rxed <= sizeof(struct pci_response)) continue; switch (pkt->cph_type) { case VMBUS_CHANPKT_TYPE_COMP: comp_packet = (struct pci_packet *)(uintptr_t)pkt->cph_xactid; response = (struct pci_response *)pkt; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_rxed); break; case VMBUS_CHANPKT_TYPE_INBAND: new_msg = (struct pci_incoming_message *)buffer; switch (new_msg->message_type.type) { case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; if (bus_rel->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations, func) + (sizeof(struct pci_func_desc) * (bus_rel->device_count))) break; hv_pci_devices_present(hbus, bus_rel); break; case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; if (bus_rel2->device_count == 0) break; if (bytes_rxed < offsetof(struct pci_bus_relations2, func) + (sizeof(struct pci_func_desc2) * (bus_rel2->device_count))) break; hv_pci_devices_present2(hbus, bus_rel2); case PCI_EJECT: dev_msg = (struct pci_dev_incoming *)buffer; hpdev = get_pcichild_wslot(hbus, dev_msg->wslot.val); if (hpdev) hv_pci_eject_device(hpdev); break; default: printf("vmbus_pcib: Unknown msg type 0x%x\n", new_msg->message_type.type); break; } break; default: printf("vmbus_pcib: Unknown VMBus msg type %hd\n", pkt->cph_type); break; } } while (1); if (bufferlen > PCIB_PACKET_SIZE) free(buffer, M_DEVBUF); } static int hv_pci_protocol_negotiation(struct hv_pcibus *hbus, enum pci_protocol_version_t version[], int num_version) { struct pci_version_request *version_req; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_version_request)]; } ctxt; int ret; int i; init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&ctxt.pkt.message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for(i=0; i< num_version; i++) { version_req->protocol_version = version[i]; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, version_req, sizeof(*version_req), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) { device_printf(hbus->pcib, "vmbus_pcib failed to request version: %d\n", ret); goto out; } if (comp_pkt.completion_status >= 0) { hbus->protocol_version = version[i]; device_printf(hbus->pcib, "PCI VMBus using version 0x%x\n", hbus->protocol_version); ret = 0; goto out; } if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { device_printf(hbus->pcib, "vmbus_pcib version negotiation failed: %x\n", comp_pkt.completion_status); ret = EPROTO; goto out; } reinit_completion(&comp_pkt.host_event); } device_printf(hbus->pcib, "PCI pass-trhpugh VSP failed to find supported version\n"); out: free_completion(&comp_pkt.host_event); return (ret); } /* Ask the host to send along the list of child devices */ static int hv_pci_query_relations(struct hv_pcibus *hbus) { struct pci_message message; int ret; message.type = PCI_QUERY_BUS_RELATIONS; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &message, sizeof(message), 0); return (ret); } static int hv_pci_enter_d0(struct hv_pcibus *hbus) { struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct { struct pci_packet pkt; uint8_t buffer[sizeof(struct pci_bus_d0_entry)]; } ctxt; int ret; /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region * of memory-mapped I/O space has been chosen for configuration space * access. */ init_completion(&comp_pkt.host_event); ctxt.pkt.completion_func = hv_pci_generic_compl; ctxt.pkt.compl_ctxt = &comp_pkt; d0_entry = (struct pci_bus_d0_entry *)&ctxt.pkt.message; memset(d0_entry, 0, sizeof(*d0_entry)); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = rman_get_start(hbus->cfg_res); ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, d0_entry, sizeof(*d0_entry), (uint64_t)(uintptr_t)&ctxt.pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); if (ret) goto out; if (comp_pkt.completion_status < 0) { device_printf(hbus->pcib, "vmbus_pcib failed to enable D0\n"); ret = EPROTO; } else { ret = 0; } out: free_completion(&comp_pkt.host_event); return (ret); } /* * It looks this is only needed by Windows VM, but let's send the message too * just to make the host happy. */ static int hv_send_resources_allocated(struct hv_pcibus *hbus) { struct pci_resources_assigned *res_assigned; struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; uint32_t wslot; int ret = 0; size_t size_res; size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) ? sizeof(*res_assigned) : sizeof(*res_assigned2); pkt = malloc(sizeof(*pkt) + size_res, M_DEVBUF, M_WAITOK | M_ZERO); for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; init_completion(&comp_pkt.host_event); memset(pkt, 0, sizeof(*pkt) + size_res); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4) { res_assigned = (struct pci_resources_assigned *)&pkt->message; res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.val = hpdev->desc.wslot.val; } else { res_assigned2 = (struct pci_resources_assigned2 *)&pkt->message; res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.val = hpdev->desc.wslot.val; } ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &pkt->message, size_res, (uint64_t)(uintptr_t)pkt); if (!ret) ret = wait_for_response(hbus, &comp_pkt.host_event); free_completion(&comp_pkt.host_event); if (ret) break; if (comp_pkt.completion_status < 0) { ret = EPROTO; device_printf(hbus->pcib, "failed to send PCI_RESOURCES_ASSIGNED\n"); break; } } free(pkt, M_DEVBUF); return (ret); } static int hv_send_resources_released(struct hv_pcibus *hbus) { struct pci_child_message pkt; struct hv_pci_dev *hpdev; uint32_t wslot; int ret; for (wslot = 0; wslot < 256; wslot++) { hpdev = get_pcichild_wslot(hbus, wslot); if (!hpdev) continue; pkt.message_type.type = PCI_RESOURCES_RELEASED; pkt.wslot.val = hpdev->desc.wslot.val; ret = vmbus_chan_send(hbus->sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &pkt, sizeof(pkt), 0); if (ret) return (ret); } return (0); } #define hv_cfg_read(x, s) \ static inline uint##x##_t hv_cfg_read_##s(struct hv_pcibus *bus, \ bus_size_t offset) \ { \ return (bus_read_##s(bus->cfg_res, offset)); \ } #define hv_cfg_write(x, s) \ static inline void hv_cfg_write_##s(struct hv_pcibus *bus, \ bus_size_t offset, uint##x##_t val) \ { \ return (bus_write_##s(bus->cfg_res, offset, val)); \ } hv_cfg_read(8, 1) hv_cfg_read(16, 2) hv_cfg_read(32, 4) hv_cfg_write(8, 1) hv_cfg_write(16, 2) hv_cfg_write(32, 4) static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t *val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* * If the attempt is to read the IDs or the ROM BAR, simulate that. */ if (where + size <= PCIR_COMMAND) { memcpy(val, ((uint8_t *)&hpdev->desc.v_id) + where, size); } else if (where >= PCIR_REVID && where + size <= PCIR_CACHELNSZ) { memcpy(val, ((uint8_t *)&hpdev->desc.rev) + where - PCIR_REVID, size); } else if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_BIOS) { memcpy(val, (uint8_t *)&hpdev->desc.subsystem_id + where - PCIR_SUBVEND_0, size); } else if (where >= PCIR_BIOS && where + size <= PCIR_CAP_PTR) { /* ROM BARs are unimplemented */ *val = 0; } else if ((where >= PCIR_INTLINE && where + size <= PCIR_INTPIN) ||(where == PCIR_INTPIN && size == 1)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled * interrupts. */ *val = 0; } else if (where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be read. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start reading.*/ mb(); /* Read from that function's config space. */ switch (size) { case 1: *((uint8_t *)val) = hv_cfg_read_1(hbus, addr); break; case 2: *((uint16_t *)val) = hv_cfg_read_2(hbus, addr); break; default: *((uint32_t *)val) = hv_cfg_read_4(hbus, addr); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config read: it's unlikely to reach here. */ memset(val, 0, size); } } static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, int size, uint32_t val) { struct hv_pcibus *hbus = hpdev->hbus; bus_size_t addr = CFG_PAGE_OFFSET + where; /* SSIDs and ROM BARs are read-only */ if (where >= PCIR_SUBVEND_0 && where + size <= PCIR_CAP_PTR) return; if (where >= PCIR_COMMAND && where + size <= CFG_PAGE_SIZE) { mtx_lock(&hbus->config_lock); /* Choose the function to be written. */ hv_cfg_write_4(hbus, 0, hpdev->desc.wslot.val); /* Make sure the function was chosen before we start writing.*/ wmb(); /* Write to that function's config space. */ switch (size) { case 1: hv_cfg_write_1(hbus, addr, (uint8_t)val); break; case 2: hv_cfg_write_2(hbus, addr, (uint16_t)val); break; default: hv_cfg_write_4(hbus, addr, (uint32_t)val); break; } /* * Make sure the write was done before we release the lock, * allowing consecutive reads/writes. */ mb(); mtx_unlock(&hbus->config_lock); } else { /* Invalid config write: it's unlikely to reach here. */ return; } } /* * The vPCI in some Hyper-V releases do not initialize the last 4 * bit of BAR registers. This could result weird problems causing PCI * code fail to configure BAR correctly. * * Just write all 1's to those BARs whose probed values are not zero. * This seems to make the Hyper-V vPCI and pci_write_bar() to cooperate * correctly. */ static void vmbus_pcib_prepopulate_bars(struct hv_pcibus *hbus) { struct hv_pci_dev *hpdev; int i; mtx_lock(&hbus->device_list_lock); TAILQ_FOREACH(hpdev, &hbus->children, link) { for (i = 0; i < 6; i++) { /* Ignore empty bar */ if (hpdev->probed_bar[i] == 0) continue; uint32_t bar_val = 0; _hv_pcifront_read_config(hpdev, PCIR_BAR(i), 4, &bar_val); if (hpdev->probed_bar[i] != bar_val) { if (bootverbose) printf("vmbus_pcib: initialize bar %d " "by writing all 1s\n", i); _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, 0xffffffff); /* Now write the original value back */ _hv_pcifront_write_config(hpdev, PCIR_BAR(i), 4, bar_val); } } } mtx_unlock(&hbus->device_list_lock); } static void vmbus_pcib_set_detaching(void *arg, int pending __unused) { struct hv_pcibus *hbus = arg; atomic_set_int(&hbus->detaching, 1); } static void vmbus_pcib_pre_detach(struct hv_pcibus *hbus) { struct task task; TASK_INIT(&task, 0, vmbus_pcib_set_detaching, hbus); /* * Make sure the channel callback won't push any possible new * PCI_BUS_RELATIONS and PCI_EJECT tasks to sc->taskq. */ vmbus_chan_run_task(hbus->sc->chan, &task); taskqueue_drain_all(hbus->sc->taskq); } /* * Standard probe entry point. * */ static int vmbus_pcib_probe(device_t dev) { if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &g_pass_through_dev_type) == 0) { device_set_desc(dev, "Hyper-V PCI Express Pass Through"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Standard attach entry point. * */ static int vmbus_pcib_attach(device_t dev) { const int pci_ring_size = (4 * PAGE_SIZE); const struct hyperv_guid *inst_guid; struct vmbus_channel *channel; struct vmbus_pcib_softc *sc; struct hv_pcibus *hbus; int rid = 0; int ret; hbus = malloc(sizeof(*hbus), M_DEVBUF, M_WAITOK | M_ZERO); hbus->pcib = dev; channel = vmbus_get_channel(dev); inst_guid = vmbus_chan_guid_inst(channel); hbus->pci_domain = inst_guid->hv_guid[9] | (inst_guid->hv_guid[8] << 8); mtx_init(&hbus->config_lock, "hbcfg", NULL, MTX_DEF); mtx_init(&hbus->device_list_lock, "hbdl", NULL, MTX_DEF); TAILQ_INIT(&hbus->children); TAILQ_INIT(&hbus->dr_list); hbus->cfg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, RM_MAX_END, PCI_CONFIG_MMIO_LENGTH, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (!hbus->cfg_res) { device_printf(dev, "failed to get resource for cfg window\n"); ret = ENXIO; goto free_bus; } sc = device_get_softc(dev); sc->chan = channel; sc->rx_buf = malloc(PCIB_PACKET_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); sc->hbus = hbus; /* * The taskq is used to handle PCI_BUS_RELATIONS and PCI_EJECT * messages. NB: we can't handle the messages in the channel callback * directly, because the message handlers need to send new messages * to the host and waits for the host's completion messages, which * must also be handled by the channel callback. */ sc->taskq = taskqueue_create("vmbus_pcib_tq", M_WAITOK, taskqueue_thread_enqueue, &sc->taskq); taskqueue_start_threads(&sc->taskq, 1, PI_NET, "vmbus_pcib_tq"); hbus->sc = sc; init_completion(&hbus->query_completion); hbus->query_comp = &hbus->query_completion; ret = vmbus_chan_open(sc->chan, pci_ring_size, pci_ring_size, NULL, 0, vmbus_pcib_on_channel_callback, sc); if (ret) goto free_res; ret = hv_pci_protocol_negotiation(hbus, pci_protocol_versions, ARRAY_SIZE(pci_protocol_versions)); if (ret) goto vmbus_close; ret = hv_pci_query_relations(hbus); if (!ret) ret = wait_for_response(hbus, hbus->query_comp); if (ret) goto vmbus_close; ret = hv_pci_enter_d0(hbus); if (ret) goto vmbus_close; ret = hv_send_resources_allocated(hbus); if (ret) goto vmbus_close; vmbus_pcib_prepopulate_bars(hbus); hbus->pci_bus = device_add_child(dev, "pci", -1); if (!hbus->pci_bus) { device_printf(dev, "failed to create pci bus\n"); ret = ENXIO; goto vmbus_close; } bus_generic_attach(dev); hbus->state = hv_pcibus_installed; return (0); vmbus_close: vmbus_pcib_pre_detach(hbus); vmbus_chan_close(sc->chan); free_res: taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); free_bus: mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (ret); } /* * Standard detach entry point */ static int vmbus_pcib_detach(device_t dev) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pcibus *hbus = sc->hbus; struct pci_message teardown_packet; struct pci_bus_relations relations; int ret; vmbus_pcib_pre_detach(hbus); if (hbus->state == hv_pcibus_installed) bus_generic_detach(dev); /* Delete any children which might still exist. */ memset(&relations, 0, sizeof(relations)); hv_pci_devices_present(hbus, &relations); ret = hv_send_resources_released(hbus); if (ret) device_printf(dev, "failed to send PCI_RESOURCES_RELEASED\n"); teardown_packet.type = PCI_BUS_D0EXIT; ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, 0, &teardown_packet, sizeof(struct pci_message), 0); if (ret) device_printf(dev, "failed to send PCI_BUS_D0EXIT\n"); taskqueue_drain_all(hbus->sc->taskq); vmbus_chan_close(sc->chan); taskqueue_free(sc->taskq); free_completion(&hbus->query_completion); free(sc->rx_buf, M_DEVBUF); bus_release_resource(dev, SYS_RES_MEMORY, 0, hbus->cfg_res); mtx_destroy(&hbus->device_list_lock); mtx_destroy(&hbus->config_lock); free(hbus, M_DEVBUF); return (0); } static int vmbus_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *val) { struct vmbus_pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *val = sc->hbus->pci_domain; return (0); case PCIB_IVAR_BUS: /* There is only bus 0. */ *val = 0; return (0); } return (ENOENT); } static int vmbus_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t val) { return (ENOENT); } static struct resource * vmbus_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { unsigned int bar_no; struct hv_pci_dev *hpdev; struct vmbus_pcib_softc *sc = device_get_softc(dev); struct resource *res; unsigned int devfn; if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->hbus->pci_domain, child, rid, start, end, count, flags)); /* Devices with port I/O BAR are not supported. */ if (type == SYS_RES_IOPORT) return (NULL); if (type == SYS_RES_MEMORY) { devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (NULL); bar_no = PCI_RID2BAR(*rid); if (bar_no >= MAX_NUM_BARS) return (NULL); /* Make sure a 32-bit BAR gets a 32-bit address */ if (!(hpdev->probed_bar[bar_no] & PCIM_BAR_MEM_64)) end = ulmin(end, 0xFFFFFFFF); } res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); /* * If this is a request for a specific range, assume it is * correct and pass it up to the parent. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); if (res == NULL) device_printf(dev, "vmbus_pcib_alloc_resource failed\n"); return (res); } static int vmbus_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->hbus->pci_domain, child, r, start, end)); return (bus_generic_adjust_resource(dev, child, r, start, end)); } static int -vmbus_pcib_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmbus_pcib_release_resource(device_t dev, device_t child, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); - if (type == PCI_RES_BUS) - return (pci_domain_release_bus(sc->hbus->pci_domain, child, - rid, r)); - - if (type == SYS_RES_IOPORT) + switch (rman_get_type(r)) { + case PCI_RES_BUS: + return (pci_domain_release_bus(sc->hbus->pci_domain, child, r)); + case SYS_RES_IOPORT: return (EINVAL); - - return (bus_generic_release_resource(dev, child, type, rid, r)); + default: + return (bus_generic_release_resource(dev, child, r)); + } } static int vmbus_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_activate_bus(sc->hbus->pci_domain, child, r)); return (bus_generic_activate_resource(dev, child, r)); } static int vmbus_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct vmbus_pcib_softc *sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->hbus->pci_domain, child, r)); return (bus_generic_deactivate_resource(dev, child, r)); } static int vmbus_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { return (bus_get_cpus(pcib, op, setsize, cpuset)); } static uint32_t vmbus_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); uint32_t data = 0; KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (~0); _hv_pcifront_read_config(hpdev, reg, bytes, &data); return (data); } static void vmbus_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct vmbus_pcib_softc *sc = device_get_softc(dev); struct hv_pci_dev *hpdev; unsigned int devfn = PCI_DEVFN(slot, func); KASSERT(bus == 0, ("bus should be 0, but is %u", bus)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return; _hv_pcifront_write_config(hpdev, reg, bytes, data); } static int vmbus_pcib_route_intr(device_t pcib, device_t dev, int pin) { /* We only support MSI/MSI-X and don't support INTx interrupt. */ return (PCI_INVALID_IRQ); } static int vmbus_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_ALLOC_MSI(device_get_parent(pcib), dev, count, maxcount, irqs)); #endif #if defined(__aarch64__) return (intr_alloc_msi(pcib, dev, ACPI_MSI_XREF, count, maxcount, irqs)); #endif } static int vmbus_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { #if defined(__amd64__) || defined(__i386__) return (PCIB_RELEASE_MSI(device_get_parent(pcib), dev, count, irqs)); #endif #if defined(__aarch64__) return(intr_release_msi(pcib, dev, ACPI_MSI_XREF, count, irqs)); #endif } static int vmbus_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { #if defined(__aarch64__) int ret; #if defined(INTRNG) ret = intr_alloc_msix(pcib, dev, ACPI_MSI_XREF, irq); return ret; #else return (ENXIO); #endif #else return (PCIB_ALLOC_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } static int vmbus_pcib_release_msix(device_t pcib, device_t dev, int irq) { #if defined(__aarch64__) return (intr_release_msix(pcib, dev, ACPI_MSI_XREF, irq)); #else return (PCIB_RELEASE_MSIX(device_get_parent(pcib), dev, irq)); #endif /* __aarch64__ */ } #if defined(__aarch64__) #define MSI_INTEL_ADDR_DEST 0x00000000 #define MSI_INTEL_DATA_DELFIXED 0x0 #endif #if defined(__amd64__) || defined(__i386__) #define MSI_INTEL_ADDR_DEST 0x000ff000 #define MSI_INTEL_DATA_INTVEC IOART_INTVEC /* Interrupt vector. */ #define MSI_INTEL_DATA_DELFIXED IOART_DELFIXED #endif static int vmbus_pcib_map_msi(device_t pcib, device_t child, int irq, uint64_t *addr, uint32_t *data) { unsigned int devfn; struct hv_pci_dev *hpdev; uint64_t v_addr; uint32_t v_data; struct hv_irq_desc *hid, *tmp_hid; unsigned int cpu, vcpu_id; unsigned int vector; struct vmbus_pcib_softc *sc = device_get_softc(pcib); struct compose_comp_ctxt comp; struct { struct pci_packet pkt; union { struct pci_create_interrupt v1; struct pci_create_interrupt3 v3; }int_pkts; } ctxt; int ret; uint32_t size; devfn = PCI_DEVFN(pci_get_slot(child), pci_get_function(child)); hpdev = get_pcichild_wslot(sc->hbus, devfn_to_wslot(devfn)); if (!hpdev) return (ENOENT); #if defined(__aarch64__) ret = intr_map_msi(pcib, child, ACPI_MSI_XREF, irq, &v_addr, &v_data); #else ret = PCIB_MAP_MSI(device_get_parent(pcib), child, irq, &v_addr, &v_data); #endif if (ret) return (ret); TAILQ_FOREACH_SAFE(hid, &hpdev->irq_desc_list, link, tmp_hid) { if (hid->irq == irq) { TAILQ_REMOVE(&hpdev->irq_desc_list, hid, link); hv_int_desc_free(hpdev, hid); break; } } #if defined(__aarch64__) cpu = 0; vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data; #else cpu = apic_cpuid((v_addr & MSI_INTEL_ADDR_DEST) >> 12); vcpu_id = VMBUS_GET_VCPU_ID(device_get_parent(pcib), pcib, cpu); vector = v_data & MSI_INTEL_DATA_INTVEC; #endif if (hpdev->hbus->protocol_version < PCI_PROTOCOL_VERSION_1_4 && vcpu_id > 63) { /* We only support vcpu_id < 64 before vPCI version 1.4 */ device_printf(pcib, "Error: " "vcpu_id %u overflowed on PCI VMBus version 0x%x\n", vcpu_id, hpdev->hbus->protocol_version); return (ENODEV); } init_completion(&comp.comp_pkt.host_event); memset(&ctxt, 0, sizeof(ctxt)); ctxt.pkt.completion_func = hv_pci_compose_compl; ctxt.pkt.compl_ctxt = ∁ switch (hpdev->hbus->protocol_version) { case PCI_PROTOCOL_VERSION_1_1: ctxt.int_pkts.v1.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; ctxt.int_pkts.v1.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v1.int_desc.vector = vector; ctxt.int_pkts.v1.int_desc.vector_count = 1; ctxt.int_pkts.v1.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v1.int_desc.cpu_mask = 1ULL << vcpu_id; size = sizeof(ctxt.int_pkts.v1); break; case PCI_PROTOCOL_VERSION_1_4: ctxt.int_pkts.v3.message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3; ctxt.int_pkts.v3.wslot.val = hpdev->desc.wslot.val; ctxt.int_pkts.v3.int_desc.vector = vector; ctxt.int_pkts.v3.int_desc.vector_count = 1; ctxt.int_pkts.v3.int_desc.reserved = 0; ctxt.int_pkts.v3.int_desc.delivery_mode = MSI_INTEL_DATA_DELFIXED; ctxt.int_pkts.v3.int_desc.processor_count = 1; ctxt.int_pkts.v3.int_desc.processor_array[0] = vcpu_id; size = sizeof(ctxt.int_pkts.v3); break; } ret = vmbus_chan_send(sc->chan, VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC, &ctxt.int_pkts, size, (uint64_t)(uintptr_t)&ctxt.pkt); if (ret) { free_completion(&comp.comp_pkt.host_event); return (ret); } wait_for_completion(&comp.comp_pkt.host_event); free_completion(&comp.comp_pkt.host_event); if (comp.comp_pkt.completion_status < 0) { device_printf(pcib, "vmbus_pcib_map_msi completion_status %d\n", comp.comp_pkt.completion_status); return (EPROTO); } *addr = comp.int_desc.address; *data = comp.int_desc.data; hid = malloc(sizeof(struct hv_irq_desc), M_DEVBUF, M_WAITOK | M_ZERO); hid->irq = irq; hid->desc = comp.int_desc; TAILQ_INSERT_TAIL(&hpdev->irq_desc_list, hid, link); return (0); } static device_method_t vmbus_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmbus_pcib_probe), DEVMETHOD(device_attach, vmbus_pcib_attach), DEVMETHOD(device_detach, vmbus_pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vmbus_pcib_read_ivar), DEVMETHOD(bus_write_ivar, vmbus_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmbus_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, vmbus_pcib_adjust_resource), DEVMETHOD(bus_release_resource, vmbus_pcib_release_resource), DEVMETHOD(bus_activate_resource, vmbus_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, vmbus_pcib_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, vmbus_pcib_get_cpus), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmbus_pcib_read_config), DEVMETHOD(pcib_write_config, vmbus_pcib_write_config), DEVMETHOD(pcib_route_interrupt, vmbus_pcib_route_intr), DEVMETHOD(pcib_alloc_msi, vmbus_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_pcib_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_pcib_release_msix), DEVMETHOD(pcib_map_msi, vmbus_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods, sizeof(struct vmbus_pcib_softc)); DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, 0, 0); MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1); MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1); #endif /* NEW_PCIB */ diff --git a/sys/dev/mvs/mvs_pci.c b/sys/dev/mvs/mvs_pci.c index 9578c5ea5e7e..be9351403a0d 100644 --- a/sys/dev/mvs/mvs_pci.c +++ b/sys/dev/mvs/mvs_pci.c @@ -1,524 +1,523 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {0x504011ab, 0x00, "Marvell 88SX5040", 4, MVS_Q_GENI}, {0x504111ab, 0x00, "Marvell 88SX5041", 4, MVS_Q_GENI}, {0x508011ab, 0x00, "Marvell 88SX5080", 8, MVS_Q_GENI}, {0x508111ab, 0x00, "Marvell 88SX5081", 8, MVS_Q_GENI}, {0x604011ab, 0x00, "Marvell 88SX6040", 4, MVS_Q_GENII}, {0x604111ab, 0x00, "Marvell 88SX6041", 4, MVS_Q_GENII}, {0x604211ab, 0x00, "Marvell 88SX6042", 4, MVS_Q_GENIIE}, {0x608011ab, 0x00, "Marvell 88SX6080", 8, MVS_Q_GENII}, {0x608111ab, 0x00, "Marvell 88SX6081", 8, MVS_Q_GENII}, {0x704211ab, 0x00, "Marvell 88SX7042", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x02419005, 0x00, "Adaptec 1420SA", 4, MVS_Q_GENII}, {0x02439005, 0x00, "Adaptec 1430SA", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x00000000, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = PCIR_BAR(0); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int i, ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); /* Clear PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIC, 0x00000000); if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; /* Configure chip-global CCC */ if (ctlr->channels > 4 && (ctlr->quirks & MVS_Q_GENI) == 0) { ATA_OUTL(ctlr->r_mem, CHIP_ICT, cccc); ATA_OUTL(ctlr->r_mem, CHIP_ITT, ccc); ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); if (ccc) ccim |= IC_ALL_PORTS_COAL_DONE; ccc = 0; cccc = 0; } for (i = 0; i < ctlr->channels / 4; i++) { /* Configure per-HC CCC */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ITT, ccc); if (ccc) ccim |= (IC_HC0_COAL_DONE << (i * IC_HC_SHIFT)); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_IC, 0x00000000); } /* Enable chip interrupts */ ctlr->gmim = (ccim ? ccim : (IC_DONE_HC0 | IC_DONE_HC1)) | IC_ERR_HC0 | IC_ERR_HC1; ctlr->mim = ctlr->gmim | ctlr->pmim; ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); /* Enable PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x007fffff); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2 + unit / 4) ; if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ctlr->mim = ctlr->gmim | ctlr->pmim; if (!ctlr->msia) ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int msi = 0; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; ctlr->msi = msi; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_MIC); if (ctlr->msi) { /* We have to mask MSI during processing. */ mtx_lock(&ctlr->mtx); ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0); ctlr->msia = 1; /* Deny MIM update during processing. */ mtx_unlock(&ctlr->mtx); } else if (ic == 0) return; /* Acknowledge all-ports CCC interrupt. */ if (ic & IC_ALL_PORTS_COAL_DONE) ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); for (p = 0; p < ctlr->channels; p++) { if ((p & 3) == 0) { if (p != 0) ic >>= 1; if ((ic & IC_HC0) == 0) { p += 3; ic >>= 8; continue; } /* Acknowledge interrupts of this HC. */ aic = 0; if (ic & (IC_DONE_IRQ << 0)) aic |= HC_IC_DONE(0) | HC_IC_DEV(0); if (ic & (IC_DONE_IRQ << 2)) aic |= HC_IC_DONE(1) | HC_IC_DEV(1); if (ic & (IC_DONE_IRQ << 4)) aic |= HC_IC_DONE(2) | HC_IC_DEV(2); if (ic & (IC_DONE_IRQ << 6)) aic |= HC_IC_DONE(3) | HC_IC_DEV(3); if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_BASE(p == 4) + HC_IC, ~aic); } /* Call per-port interrupt handler. */ arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } if (ctlr->msi) { /* Unmasking MSI triggers next interrupt, if needed. */ mtx_lock(&ctlr->mtx); ctlr->msia = 0; /* Allow MIM update. */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = HC_BASE(unit >> 2) + PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int -mvs_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +mvs_release_resource(device_t dev, device_t child, struct resource *r) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: - if (rid != ATA_IRQ_RID) + if (rman_get_rid(r) != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int mvs_child_location(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), DEVMETHOD(bus_child_location, mvs_child_location), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, pci, mvs_driver, 0, 0); MODULE_PNP_INFO("W32:vendor/device", pci, mvs, mvs_ids, nitems(mvs_ids) - 1); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); diff --git a/sys/dev/mvs/mvs_soc.c b/sys/dev/mvs/mvs_soc.c index 36b3f5222cdc..696b65d54359 100644 --- a/sys/dev/mvs/mvs_soc.c +++ b/sys/dev/mvs/mvs_soc.c @@ -1,465 +1,464 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {MV_DEV_88F5182, 0x00, "Marvell 88F5182", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6281, 0x00, "Marvell 88F6281", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6282, 0x00, "Marvell 88F6282", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100, 0x00, "Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100_Z0, 0x00,"Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78260, 0x00, "Marvell MV78260", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78460, 0x00, "Marvell MV78460", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {0, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid, revid; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,sata")) return (ENXIO); soc_id(&devid, &revid); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid, revid; soc_id(&devid, &revid); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; if (ATA_INL(ctlr->r_mem, PORT_BASE(0) + SATA_PHYCFG_OFS) != 0) ctlr->quirks |= MVS_Q_SOC65; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int ccc = ctlr->ccc, cccc = ctlr->cccc; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_IC, 0x00000000); /* Clear chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIC, 0); /* Configure per-HC CCC */ if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; ATA_OUTL(ctlr->r_mem, HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_ITT, ccc); /* Enable chip interrupts */ ctlr->gmim = ((ccc ? IC_HC0_COAL_DONE : (IC_DONE_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))) | (IC_ERR_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))); ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2); if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p, chan_num; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_SOC_MIC); if ((ic & IC_HC0) == 0) return; /* Acknowledge interrupts of this HC. */ aic = 0; /* Processing interrupts from each initialized channel */ for (chan_num = 0; chan_num < ctlr->channels; chan_num++) { if (ic & (IC_DONE_IRQ << (chan_num * 2))) aic |= HC_IC_DONE(chan_num) | HC_IC_DEV(chan_num); } if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_IC, ~aic); /* Call per-port interrupt handler. */ for (p = 0; p < ctlr->channels; p++) { arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int -mvs_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +mvs_release_resource(device_t dev, device_t child, struct resource *r) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int mvs_child_location(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), DEVMETHOD(bus_child_location, mvs_child_location), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, simplebus, mvs_driver, 0, 0); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); diff --git a/sys/dev/ofw/ofw_pcib.c b/sys/dev/ofw/ofw_pcib.c index f9f17b21cdc2..ebc09fccd93e 100644 --- a/sys/dev/ofw/ofw_pcib.c +++ b/sys/dev/ofw/ofw_pcib.c @@ -1,750 +1,745 @@ /*- * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* * If it is necessary to set another value of this for * some platforms it should be set at fdt.h file */ #ifndef PCI_MAP_INTR #define PCI_MAP_INTR 4 #endif #define PCI_INTR_PINS 4 /* * bus interface. */ static struct rman *ofw_pcib_get_rman(device_t, int, u_int); static struct resource * ofw_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int ofw_pcib_release_resource(device_t, device_t, int, int, - struct resource *); +static int ofw_pcib_release_resource(device_t, device_t, struct resource *); static int ofw_pcib_activate_resource(device_t, device_t, struct resource *); static int ofw_pcib_deactivate_resource(device_t, device_t, struct resource *); static int ofw_pcib_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int ofw_pcib_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int ofw_pcib_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart); #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t, device_t); #endif /* * pcib interface */ static int ofw_pcib_maxslots(device_t); /* * ofw_bus interface */ static phandle_t ofw_pcib_get_node(device_t, device_t); /* * local methods */ static int ofw_pcib_fill_ranges(phandle_t, struct ofw_pci_range *); /* * Driver methods. */ static device_method_t ofw_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_attach, ofw_pcib_attach), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, ofw_pcib_read_ivar), DEVMETHOD(bus_write_ivar, ofw_pcib_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, ofw_pcib_get_rman), DEVMETHOD(bus_alloc_resource, ofw_pcib_alloc_resource), DEVMETHOD(bus_release_resource, ofw_pcib_release_resource), DEVMETHOD(bus_activate_resource, ofw_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, ofw_pcib_deactivate_resource), DEVMETHOD(bus_adjust_resource, ofw_pcib_adjust_resource), DEVMETHOD(bus_map_resource, ofw_pcib_map_resource), DEVMETHOD(bus_unmap_resource, ofw_pcib_unmap_resource), DEVMETHOD(bus_translate_resource, ofw_pcib_translate_resource), #ifdef __powerpc__ DEVMETHOD(bus_get_bus_tag, ofw_pcib_bus_get_bus_tag), #endif /* pcib interface */ DEVMETHOD(pcib_maxslots, ofw_pcib_maxslots), DEVMETHOD(pcib_route_interrupt, ofw_pcib_route_interrupt), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ofw_pcib_get_node), DEVMETHOD_END }; DEFINE_CLASS_0(ofw_pcib, ofw_pcib_driver, ofw_pcib_methods, 0); int ofw_pcib_init(device_t dev) { struct ofw_pci_softc *sc; phandle_t node; u_int32_t busrange[2]; struct ofw_pci_range *rp; int i, error; struct ofw_pci_cell_info *cell_info; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->sc_initialized = 1; sc->sc_range = NULL; sc->sc_pci_domain = device_get_unit(dev); cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_cell_info = cell_info; if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8) busrange[0] = 0; sc->sc_dev = dev; sc->sc_node = node; sc->sc_bus = busrange[0]; if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { phandle_t c; int n, i; sc->sc_nrange = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_nranges(c, cell_info); if (n > 0) sc->sc_nrange += n; } if (sc->sc_nrange == 0) { error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); i = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pcib_fill_ranges(c, &sc->sc_range[i]); if (n > 0) i += n; } KASSERT(i == sc->sc_nrange, ("range count mismatch")); } else { sc->sc_nrange = ofw_pcib_nranges(node, cell_info); if (sc->sc_nrange <= 0) { device_printf(dev, "could not getranges\n"); error = ENXIO; goto out; } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); ofw_pcib_fill_ranges(node, sc->sc_range); } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "PCI I/O Ports"; error = rman_init(&sc->sc_io_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "PCI Non Prefetchable Memory"; error = rman_init(&sc->sc_mem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_mem_rman; } sc->sc_pmem_rman.rm_type = RMAN_ARRAY; sc->sc_pmem_rman.rm_descr = "PCI Prefetchable Memory"; error = rman_init(&sc->sc_pmem_rman); if (error != 0) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto out_pmem_rman; } for (i = 0; i < sc->sc_nrange; i++) { error = 0; rp = sc->sc_range + i; if (sc->sc_range_mask & ((uint64_t)1 << i)) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_CONFIG: break; case OFW_PCI_PHYS_HI_SPACE_IO: error = rman_manage_region(&sc->sc_io_rman, rp->pci, rp->pci + rp->size - 1); break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: if (rp->pci_hi & OFW_PCI_PHYS_HI_PREFETCHABLE) { sc->sc_have_pmem = 1; error = rman_manage_region(&sc->sc_pmem_rman, rp->pci, rp->pci + rp->size - 1); } else { error = rman_manage_region(&sc->sc_mem_rman, rp->pci, rp->pci + rp->size - 1); } break; } if (error != 0) { device_printf(dev, "rman_manage_region(%x, %#jx, %#jx) failed. " "error = %d\n", rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, rp->pci + rp->size - 1, error); goto out_full; } } ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); return (0); out_full: rman_fini(&sc->sc_pmem_rman); out_pmem_rman: rman_fini(&sc->sc_mem_rman); out_mem_rman: rman_fini(&sc->sc_io_rman); out: free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); return (error); } void ofw_pcib_fini(device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); free(sc->sc_cell_info, M_DEVBUF); free(sc->sc_range, M_DEVBUF); rman_fini(&sc->sc_io_rman); rman_fini(&sc->sc_mem_rman); rman_fini(&sc->sc_pmem_rman); } int ofw_pcib_attach(device_t dev) { struct ofw_pci_softc *sc; int error; sc = device_get_softc(dev); if (!sc->sc_initialized) { error = ofw_pcib_init(dev); if (error != 0) return (error); } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ofw_pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } int ofw_pcib_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[PCI_MAP_INTR]; int intrcells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells != 0) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } /* * Maybe it's a real interrupt, not an intpin */ if (pin > PCI_INTR_PINS) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } int ofw_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->sc_pci_domain; return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); default: break; } return (ENOENT); } int ofw_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_bus = value; return (0); default: break; } return (ENOENT); } int ofw_pcib_nranges(phandle_t node, struct ofw_pci_cell_info *info) { ssize_t nbase_ranges; if (info == NULL) return (-1); info->host_address_cells = 1; info->size_cells = 2; info->pci_address_cell = 3; OF_getencprop(OF_parent(node), "#address-cells", &(info->host_address_cells), sizeof(info->host_address_cells)); OF_getencprop(node, "#address-cells", &(info->pci_address_cell), sizeof(info->pci_address_cell)); OF_getencprop(node, "#size-cells", &(info->size_cells), sizeof(info->size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); return (nbase_ranges / sizeof(cell_t) / (info->pci_address_cell + info->host_address_cells + info->size_cells)); } static struct resource * ofw_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_alloc_bus(sc->sc_pci_domain, child, rid, start, end, count, flags)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); default: return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } } static int -ofw_pcib_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +ofw_pcib_release_resource(device_t bus, device_t child, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_release_bus(sc->sc_pci_domain, child, rid, - res)); + return (pci_domain_release_bus(sc->sc_pci_domain, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rman_release_resource(bus, child, res)); default: - return (bus_generic_release_resource(bus, child, type, rid, - res)); + return (bus_generic_release_resource(bus, child, res)); } } static int ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart) { struct ofw_pci_softc *sc; struct ofw_pci_range *rp; int space; sc = device_get_softc(bus); /* * Map this through the ranges list */ for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (type == space) { start += (rp->host - rp->pci); *newstart = start; return (0); } } return (ENOENT); } static int ofw_pcib_activate_resource(device_t bus, device_t child, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->sc_pci_domain, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_activate_resource(bus, child, res)); default: return (bus_generic_activate_resource(bus, child, res)); } } static int ofw_pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct ofw_pci_softc *sc; struct ofw_pci_range *rp; rman_res_t length, start; int error, space; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); /* * Map this through the ranges list */ sc = device_get_softc(dev); for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (rman_get_type(r) == space) { start += (rp->host - rp->pci); break; } } if (bootverbose) printf("ofw_pci mapdev: start %jx, len %jd\n", start, length); map->r_bustag = BUS_GET_BUS_TAG(child, child); if (map->r_bustag == NULL) return (ENOMEM); error = bus_space_map(map->r_bustag, start, length, 0, &map->r_bushandle); if (error != 0) return (error); /* XXX for powerpc only? */ map->r_vaddr = (void *)map->r_bushandle; map->r_size = length; return (0); } static int ofw_pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); return (0); default: return (EINVAL); } } #ifdef __powerpc__ static bus_space_tag_t ofw_pcib_bus_get_bus_tag(device_t bus, device_t child) { return (&bs_le_tag); } #endif static int ofw_pcib_deactivate_resource(device_t bus, device_t child, struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->sc_pci_domain, child, res)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_deactivate_resource(bus, child, res)); default: return (bus_generic_deactivate_resource(bus, child, res)); } } static int ofw_pcib_adjust_resource(device_t bus, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct ofw_pci_softc *sc; sc = device_get_softc(bus); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->sc_pci_domain, child, res, start, end)); #endif case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(bus, child, res, start, end)); default: return (bus_generic_adjust_resource(bus, child, res, start, end)); } } static phandle_t ofw_pcib_get_node(device_t bus, device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); /* We only have one child, the PCI bus, which needs our own node. */ return (sc->sc_node); } static int ofw_pcib_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) { int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; cell_t *base_ranges; ssize_t nbase_ranges; int nranges; int i, j, k; OF_getencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); OF_getencprop(node, "#address-cells", &pci_address_cells, sizeof(pci_address_cells)); OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); nranges = nbase_ranges / sizeof(cell_t) / (pci_address_cells + host_address_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < nranges; i++) { ranges[i].pci_hi = base_ranges[j++]; ranges[i].pci = 0; for (k = 0; k < pci_address_cells - 1; k++) { ranges[i].pci <<= 32; ranges[i].pci |= base_ranges[j++]; } ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { ranges[i].host <<= 32; ranges[i].host |= base_ranges[j++]; } ranges[i].size = 0; for (k = 0; k < size_cells; k++) { ranges[i].size <<= 32; ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (nranges); } static struct rman * ofw_pcib_get_rman(device_t bus, int type, u_int flags) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IOPORT: return (&sc->sc_io_rman); case SYS_RES_MEMORY: if (sc->sc_have_pmem && (flags & RF_PREFETCHABLE)) return (&sc->sc_pmem_rman); else return (&sc->sc_mem_rman); default: break; } return (NULL); } diff --git a/sys/dev/ofw/ofwbus.c b/sys/dev/ofw/ofwbus.c index 48e865b5b52d..51e6072ad4ba 100644 --- a/sys/dev/ofw/ofwbus.c +++ b/sys/dev/ofw/ofwbus.c @@ -1,190 +1,189 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * Copyright 2001 by Thomas Moestl . * Copyright 2006 by Marius Strobl . * All rights reserved. * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/nexus.c,v 1.43 2001/02/09 */ #include #include #include #include #include #include #include #include #include #include #include #include /* * The ofwbus (which is a pseudo-bus actually) iterates over the nodes that * hang from the Open Firmware root node and adds them as devices to this bus * (except some special nodes which are excluded) so that drivers can be * attached to them. There should be only one ofwbus in the system, added * directly as a child of nexus0. */ static device_probe_t ofwbus_probe; static device_attach_t ofwbus_attach; static bus_alloc_resource_t ofwbus_alloc_resource; static bus_release_resource_t ofwbus_release_resource; static device_method_t ofwbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofwbus_probe), DEVMETHOD(device_attach, ofwbus_attach), /* Bus interface */ DEVMETHOD(bus_alloc_resource, ofwbus_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, ofwbus_release_resource), DEVMETHOD_END }; DEFINE_CLASS_1(ofwbus, ofwbus_driver, ofwbus_methods, sizeof(struct simplebus_softc), simplebus_driver); EARLY_DRIVER_MODULE(ofwbus, nexus, ofwbus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ofwbus, 1); static int ofwbus_probe(device_t dev) { if (OF_peer(0) == 0) return (ENXIO); /* Only one instance of ofwbus. */ if (device_get_unit(dev) != 0) panic("ofwbus added with non-zero unit number: %d\n", device_get_unit(dev)); device_set_desc(dev, "Open Firmware Device Tree"); return (BUS_PROBE_NOWILDCARD); } static int ofwbus_attach(device_t dev) { phandle_t node; node = OF_peer(0); /* * If no Open Firmware, bail early */ if (node == -1) return (ENXIO); /* * ofwbus bus starts on unamed node in FDT, so we cannot make * ofw_bus_devinfo from it. Pass node to simplebus_init directly. */ simplebus_init(dev, node); /* * Allow devices to identify. */ bus_generic_probe(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); } static struct resource * ofwbus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct resource_list_entry *rle; bool isdefault, passthrough; isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); rle = NULL; if (!passthrough && isdefault) { rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; count = ummax(count, rle->count); end = ummax(rle->end, start + count - 1); } /* Let nexus handle the allocation. */ rv = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); if (rv == NULL) return (NULL); if (!passthrough && rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rle->end - rle->start + 1; } return (rv); } static int -ofwbus_release_resource(device_t bus, device_t child, int type, - int rid, struct resource *r) +ofwbus_release_resource(device_t bus, device_t child, struct resource *r) { struct resource_list_entry *rle; bool passthrough; passthrough = (device_get_parent(child) != bus); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), - type, rid); + rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; } /* Let nexus handle the release. */ - return (bus_generic_release_resource(bus, child, type, rid, r)); + return (bus_generic_release_resource(bus, child, r)); } diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c index 302b9d75c864..c40261be4724 100644 --- a/sys/dev/pccbb/pccbb.c +++ b/sys/dev/pccbb/pccbb.c @@ -1,1577 +1,1574 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000-2001 Jonathan Chen All rights reserved. * Copyright (c) 2002-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) #define CBB_CARD_PRESENT(s) ((s & CBB_STATE_CD) == 0) #define CBB_START_MEM 0x88000000 #define CBB_START_32_IO 0x1000 #define CBB_START_16_IO 0x100 /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, cbb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CBB parameters"); /* There's no way to say TUNEABLE_LONG to get the right types */ u_long cbb_start_mem = CBB_START_MEM; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_memory, CTLFLAG_RWTUN, &cbb_start_mem, CBB_START_MEM, "Starting address for memory allocations"); u_long cbb_start_16_io = CBB_START_16_IO; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_16_io, CTLFLAG_RWTUN, &cbb_start_16_io, CBB_START_16_IO, "Starting ioport for 16-bit cards"); u_long cbb_start_32_io = CBB_START_32_IO; SYSCTL_ULONG(_hw_cbb, OID_AUTO, start_32_io, CTLFLAG_RWTUN, &cbb_start_32_io, CBB_START_32_IO, "Starting ioport for 32-bit cards"); int cbb_debug = 0; SYSCTL_INT(_hw_cbb, OID_AUTO, debug, CTLFLAG_RWTUN, &cbb_debug, 0, "Verbose cardbus bridge debugging"); static void cbb_insert(struct cbb_softc *sc); static void cbb_removal(struct cbb_softc *sc); static uint32_t cbb_detect_voltage(device_t brdev); static int cbb_cardbus_reset_power(device_t brdev, device_t child, int on); static int cbb_cardbus_io_open(device_t brdev, int win, uint32_t start, uint32_t end); static int cbb_cardbus_mem_open(device_t brdev, int win, uint32_t start, uint32_t end); static void cbb_cardbus_auto_open(struct cbb_softc *sc, int type); static int cbb_cardbus_activate_resource(device_t brdev, device_t child, struct resource *res); static int cbb_cardbus_deactivate_resource(device_t brdev, device_t child, struct resource *res); static struct resource *cbb_cardbus_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int cbb_cardbus_release_resource(device_t brdev, device_t child, - int type, int rid, struct resource *res); + struct resource *res); static int cbb_cardbus_power_enable_socket(device_t brdev, device_t child); static int cbb_cardbus_power_disable_socket(device_t brdev, device_t child); static int cbb_func_filt(void *arg); static void cbb_func_intr(void *arg); static void cbb_remove_res(struct cbb_softc *sc, struct resource *res) { struct cbb_reslist *rle; SLIST_FOREACH(rle, &sc->rl, link) { if (rle->res == res) { SLIST_REMOVE(&sc->rl, rle, cbb_reslist, link); free(rle, M_DEVBUF); return; } } } static struct resource * cbb_find_res(struct cbb_softc *sc, int type, int rid) { struct cbb_reslist *rle; SLIST_FOREACH(rle, &sc->rl, link) if (SYS_RES_MEMORY == rle->type && rid == rle->rid) return (rle->res); return (NULL); } static void cbb_insert_res(struct cbb_softc *sc, struct resource *res, int type, int rid) { struct cbb_reslist *rle; /* * Need to record allocated resource so we can iterate through * it later. */ rle = malloc(sizeof(struct cbb_reslist), M_DEVBUF, M_NOWAIT); if (rle == NULL) panic("cbb_cardbus_alloc_resource: can't record entry!"); rle->res = res; rle->type = type; rle->rid = rid; SLIST_INSERT_HEAD(&sc->rl, rle, link); } static void cbb_destroy_res(struct cbb_softc *sc) { struct cbb_reslist *rle; while ((rle = SLIST_FIRST(&sc->rl)) != NULL) { device_printf(sc->dev, "Danger Will Robinson: Resource " "left allocated! This is a bug... " "(rid=%x, type=%d, addr=%jx)\n", rle->rid, rle->type, rman_get_start(rle->res)); SLIST_REMOVE_HEAD(&sc->rl, link); free(rle, M_DEVBUF); } } /* * Disable function interrupts by telling the bridge to generate IRQ1 * interrupts. These interrupts aren't really generated by the chip, since * IRQ1 is reserved. Some chipsets assert INTA# inappropriately during * initialization, so this helps to work around the problem. * * XXX We can't do this workaround for all chipsets, because this * XXX causes interference with the keyboard because somechipsets will * XXX actually signal IRQ1 over their serial interrupt connections to * XXX the south bridge. Disable it it for now. */ void cbb_disable_func_intr(struct cbb_softc *sc) { #if 0 uint8_t reg; reg = (exca_getb(&sc->exca, EXCA_INTR) & ~EXCA_INTR_IRQ_MASK) | EXCA_INTR_IRQ_RESERVED1; exca_putb(&sc->exca, EXCA_INTR, reg); #endif } /* * Enable function interrupts. We turn on function interrupts when the card * requests an interrupt. The PCMCIA standard says that we should set * the lower 4 bits to 0 to route via PCI. Note: we call this for both * CardBus and R2 (PC Card) cases, but it should have no effect on CardBus * cards. */ static void cbb_enable_func_intr(struct cbb_softc *sc) { uint8_t reg; reg = (exca_getb(&sc->exca, EXCA_INTR) & ~EXCA_INTR_IRQ_MASK) | EXCA_INTR_IRQ_NONE; PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN, 2); exca_putb(&sc->exca, EXCA_INTR, reg); } int cbb_detach(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); device_t *devlist; int tmp, tries, error, numdevs; /* * Before we delete the children (which we have to do because * attach doesn't check for children busses correctly), we have * to detach the children. Even if we didn't need to delete the * children, we have to detach them. */ error = bus_generic_detach(brdev); if (error != 0) return (error); /* * Since the attach routine doesn't search for children before it * attaches them to this device, we must delete them here in order * for the kldload/unload case to work. If we failed to do that, then * we'd get duplicate devices when cbb.ko was reloaded. */ tries = 10; do { error = device_get_children(brdev, &devlist, &numdevs); if (error == 0) break; /* * Try hard to cope with low memory. */ if (error == ENOMEM) { pause("cbbnomem", 1); continue; } } while (tries-- > 0); for (tmp = 0; tmp < numdevs; tmp++) device_delete_child(brdev, devlist[tmp]); free(devlist, M_TEMP); /* Turn off the interrupts */ cbb_set(sc, CBB_SOCKET_MASK, 0); /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* Ack the interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); /* * Wait for the thread to die. kproc_exit will do a wakeup * on the event thread's struct proc * so that we know it is * safe to proceed. IF the thread is running, set the please * die flag and wait for it to comply. Since the wakeup on * the event thread happens only in kproc_exit, we don't * need to loop here. */ bus_teardown_intr(brdev, sc->irq_res, sc->intrhand); mtx_lock(&sc->mtx); sc->flags |= CBB_KTHREAD_DONE; while (sc->flags & CBB_KTHREAD_RUNNING) { DEVPRINTF((sc->dev, "Waiting for thread to die\n")); wakeup(&sc->intrhand); msleep(sc->event_thread, &sc->mtx, PWAIT, "cbbun", 0); } mtx_unlock(&sc->mtx); bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); mtx_destroy(&sc->mtx); return (0); } int cbb_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct cbb_intrhand *ih; struct cbb_softc *sc = device_get_softc(dev); int err; if (filt == NULL && intr == NULL) return (EINVAL); ih = malloc(sizeof(struct cbb_intrhand), M_DEVBUF, M_NOWAIT); if (ih == NULL) return (ENOMEM); *cookiep = ih; ih->filt = filt; ih->intr = intr; ih->arg = arg; ih->sc = sc; /* * XXX need to turn on ISA interrupts, if we ever support them, but * XXX for now that's all we need to do. */ err = BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, filt ? cbb_func_filt : NULL, intr ? cbb_func_intr : NULL, ih, &ih->cookie); if (err != 0) { free(ih, M_DEVBUF); return (err); } cbb_enable_func_intr(sc); sc->cardok = 1; return 0; } int cbb_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct cbb_intrhand *ih; int err; /* XXX Need to do different things for ISA interrupts. */ ih = (struct cbb_intrhand *) cookie; err = BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, ih->cookie); if (err != 0) return (err); free(ih, M_DEVBUF); return (0); } void cbb_driver_added(device_t brdev, driver_t *driver) { struct cbb_softc *sc = device_get_softc(brdev); device_t *devlist; device_t dev; int tmp; int numdevs; int wake = 0; DEVICE_IDENTIFY(driver, brdev); tmp = device_get_children(brdev, &devlist, &numdevs); if (tmp != 0) { device_printf(brdev, "Cannot get children list, no reprobe\n"); return; } for (tmp = 0; tmp < numdevs; tmp++) { dev = devlist[tmp]; if (device_get_state(dev) == DS_NOTPRESENT && device_probe_and_attach(dev) == 0) wake++; } free(devlist, M_TEMP); if (wake > 0) wakeup(&sc->intrhand); } void cbb_child_detached(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); /* I'm not sure we even need this */ if (child != sc->cbdev && child != sc->exca.pccarddev) device_printf(brdev, "Unknown child detached: %s\n", device_get_nameunit(child)); } /************************************************************************/ /* Kthreads */ /************************************************************************/ void cbb_event_thread(void *arg) { struct cbb_softc *sc = arg; uint32_t status; int err; int not_a_card = 0; /* * We need to act as a power sequencer on startup. Delay 2s/channel * to ensure the other channels have had a chance to come up. We likely * should add a lock that's shared on a per-slot basis so that only * one power event can happen per slot at a time. */ pause("cbbstart", hz * device_get_unit(sc->dev) * 2); mtx_lock(&sc->mtx); sc->flags |= CBB_KTHREAD_RUNNING; while ((sc->flags & CBB_KTHREAD_DONE) == 0) { mtx_unlock(&sc->mtx); status = cbb_get(sc, CBB_SOCKET_STATE); DPRINTF(("Status is 0x%x\n", status)); if (!CBB_CARD_PRESENT(status)) { not_a_card = 0; /* We know card type */ cbb_removal(sc); } else if (status & CBB_STATE_NOT_A_CARD) { /* * Up to 10 times, try to rescan the card when we see * NOT_A_CARD. 10 is somehwat arbitrary. When this * pathology hits, there's a ~40% chance each try will * fail. 10 tries takes about 5s and results in a * 99.99% certainty of the results. */ if (not_a_card++ < 10) { DEVPRINTF((sc->dev, "Not a card bit set, rescanning\n")); cbb_setb(sc, CBB_SOCKET_FORCE, CBB_FORCE_CV_TEST); } else { device_printf(sc->dev, "Can't determine card type\n"); } } else { not_a_card = 0; /* We know card type */ cbb_insert(sc); } /* * First time through we need to tell mountroot that we're * done. */ if (sc->sc_root_token) { root_mount_rel(sc->sc_root_token); sc->sc_root_token = NULL; } /* * Wait until it has been 250ms since the last time we * get an interrupt. We handle the rest of the interrupt * at the top of the loop. Although we clear the bit in the * ISR, we signal sc->cv from the detach path after we've * set the CBB_KTHREAD_DONE bit, so we can't do a simple * 250ms sleep here. * * In our ISR, we turn off the card changed interrupt. Turn * them back on here before we wait for them to happen. We * turn them on/off so that we can tolerate a large latency * between the time we signal cbb_event_thread and it gets * a chance to run. */ mtx_lock(&sc->mtx); cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD | CBB_SOCKET_MASK_CSTS); msleep(&sc->intrhand, &sc->mtx, 0, "-", 0); err = 0; while (err != EWOULDBLOCK && (sc->flags & CBB_KTHREAD_DONE) == 0) err = msleep(&sc->intrhand, &sc->mtx, 0, "-", hz / 5); } DEVPRINTF((sc->dev, "Thread terminating\n")); sc->flags &= ~CBB_KTHREAD_RUNNING; mtx_unlock(&sc->mtx); kproc_exit(0); } /************************************************************************/ /* Insert/removal */ /************************************************************************/ static void cbb_insert(struct cbb_softc *sc) { uint32_t sockevent, sockstate; sockevent = cbb_get(sc, CBB_SOCKET_EVENT); sockstate = cbb_get(sc, CBB_SOCKET_STATE); DEVPRINTF((sc->dev, "card inserted: event=0x%08x, state=%08x\n", sockevent, sockstate)); if (sockstate & CBB_STATE_R2_CARD) { if (device_is_attached(sc->exca.pccarddev)) { sc->flags |= CBB_16BIT_CARD; exca_insert(&sc->exca); } else { device_printf(sc->dev, "16-bit card inserted, but no pccard bus.\n"); } } else if (sockstate & CBB_STATE_CB_CARD) { if (device_is_attached(sc->cbdev)) { sc->flags &= ~CBB_16BIT_CARD; CARD_ATTACH_CARD(sc->cbdev); } else { device_printf(sc->dev, "CardBus card inserted, but no cardbus bus.\n"); } } else { /* * We should power the card down, and try again a couple of * times if this happens. XXX */ device_printf(sc->dev, "Unsupported card type detected\n"); } } static void cbb_removal(struct cbb_softc *sc) { sc->cardok = 0; if (sc->flags & CBB_16BIT_CARD) { exca_removal(&sc->exca); } else { if (device_is_attached(sc->cbdev)) CARD_DETACH_CARD(sc->cbdev); } cbb_destroy_res(sc); } /************************************************************************/ /* Interrupt Handler */ /************************************************************************/ static int cbb_func_filt(void *arg) { struct cbb_intrhand *ih = (struct cbb_intrhand *)arg; struct cbb_softc *sc = ih->sc; /* * Make sure that the card is really there. */ if (!sc->cardok) return (FILTER_STRAY); if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { sc->cardok = 0; return (FILTER_HANDLED); } return ((*ih->filt)(ih->arg)); } static void cbb_func_intr(void *arg) { struct cbb_intrhand *ih = (struct cbb_intrhand *)arg; struct cbb_softc *sc = ih->sc; /* * While this check may seem redundant, it helps close a race * condition. If the card is ejected after the filter runs, but * before this ISR can be scheduled, then we need to do the same * filtering to prevent the card's ISR from being called. One could * argue that the card's ISR should be able to cope, but experience * has shown they can't always. This mitigates the problem by making * the race quite a bit smaller. Properly written client ISRs should * cope with the card going away in the middle of the ISR. We assume * that drivers that are sophisticated enough to use filters don't * need our protection. This also allows us to ensure they *ARE* * called if their filter said they needed to be called. */ if (ih->filt == NULL) { if (!sc->cardok) return; if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { sc->cardok = 0; return; } } ih->intr(ih->arg); } /************************************************************************/ /* Generic Power functions */ /************************************************************************/ static uint32_t cbb_detect_voltage(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t psr; uint32_t vol = CARD_UKN_CARD; psr = cbb_get(sc, CBB_SOCKET_STATE); if (psr & CBB_STATE_5VCARD && psr & CBB_STATE_5VSOCK) vol |= CARD_5V_CARD; if (psr & CBB_STATE_3VCARD && psr & CBB_STATE_3VSOCK) vol |= CARD_3V_CARD; if (psr & CBB_STATE_XVCARD && psr & CBB_STATE_XVSOCK) vol |= CARD_XV_CARD; if (psr & CBB_STATE_YVCARD && psr & CBB_STATE_YVSOCK) vol |= CARD_YV_CARD; return (vol); } static uint8_t cbb_o2micro_power_hack(struct cbb_softc *sc) { uint8_t reg; /* * Issue #2: INT# not qualified with IRQ Routing Bit. An * unexpected PCI INT# may be generated during PC Card * initialization even with the IRQ Routing Bit Set with some * PC Cards. * * This is a two part issue. The first part is that some of * our older controllers have an issue in which the slot's PCI * INT# is NOT qualified by the IRQ routing bit (PCI reg. 3Eh * bit 7). Regardless of the IRQ routing bit, if NO ISA IRQ * is selected (ExCA register 03h bits 3:0, of the slot, are * cleared) we will generate INT# if IREQ# is asserted. The * second part is because some PC Cards prematurally assert * IREQ# before the ExCA registers are fully programmed. This * in turn asserts INT# because ExCA register 03h bits 3:0 * (ISA IRQ Select) are not yet programmed. * * The fix for this issue, which will work for any controller * (old or new), is to set ExCA register 03h bits 3:0 = 0001b * (select IRQ1), of the slot, before turning on slot power. * Selecting IRQ1 will result in INT# NOT being asserted * (because IRQ1 is selected), and IRQ1 won't be asserted * because our controllers don't generate IRQ1. * * Other, non O2Micro controllers will generate irq 1 in some * situations, so we can't do this hack for everybody. Reports of * keyboard controller's interrupts being suppressed occurred when * we did this. */ reg = exca_getb(&sc->exca, EXCA_INTR); exca_putb(&sc->exca, EXCA_INTR, (reg & 0xf0) | 1); return (reg); } /* * Restore the damage that cbb_o2micro_power_hack does to EXCA_INTR so * we don't have an interrupt storm on power on. This has the effect of * disabling card status change interrupts for the duration of poweron. */ static void cbb_o2micro_power_hack2(struct cbb_softc *sc, uint8_t reg) { exca_putb(&sc->exca, EXCA_INTR, reg); } int cbb_power(device_t brdev, int volts) { uint32_t status, sock_ctrl, reg_ctrl, mask; struct cbb_softc *sc = device_get_softc(brdev); int cnt, sane; int retval = 0; int on = 0; uint8_t reg = 0; sock_ctrl = cbb_get(sc, CBB_SOCKET_CONTROL); sock_ctrl &= ~CBB_SOCKET_CTRL_VCCMASK; switch (volts & CARD_VCCMASK) { case 5: sock_ctrl |= CBB_SOCKET_CTRL_VCC_5V; on++; break; case 3: sock_ctrl |= CBB_SOCKET_CTRL_VCC_3V; on++; break; case XV: sock_ctrl |= CBB_SOCKET_CTRL_VCC_XV; on++; break; case YV: sock_ctrl |= CBB_SOCKET_CTRL_VCC_YV; on++; break; case 0: break; default: return (0); /* power NEVER changed */ } /* VPP == VCC */ sock_ctrl &= ~CBB_SOCKET_CTRL_VPPMASK; sock_ctrl |= ((sock_ctrl >> 4) & 0x07); if (cbb_get(sc, CBB_SOCKET_CONTROL) == sock_ctrl) return (1); /* no change necessary */ DEVPRINTF((sc->dev, "cbb_power: %dV\n", volts)); if (volts != 0 && sc->chipset == CB_O2MICRO) reg = cbb_o2micro_power_hack(sc); /* * We have to mask the card change detect interrupt while we're * messing with the power. It is allowed to bounce while we're * messing with power as things settle down. In addition, we mask off * the card's function interrupt by routing it via the ISA bus. This * bit generally only affects 16-bit cards. Some bridges allow one to * set another bit to have it also affect 32-bit cards. Since 32-bit * cards are required to be better behaved, we don't bother to get * into those bridge specific features. * * XXX I wonder if we need to enable the READY bit interrupt in the * EXCA CSC register for 16-bit cards, and disable the CD bit? */ mask = cbb_get(sc, CBB_SOCKET_MASK); mask |= CBB_SOCKET_MASK_POWER; mask &= ~CBB_SOCKET_MASK_CD; cbb_set(sc, CBB_SOCKET_MASK, mask); PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN, 2); cbb_set(sc, CBB_SOCKET_CONTROL, sock_ctrl); if (on) { mtx_lock(&sc->mtx); cnt = sc->powerintr; /* * We have a shortish timeout of 500ms here. Some bridges do * not generate a POWER_CYCLE event for 16-bit cards. In * those cases, we have to cope the best we can, and having * only a short delay is better than the alternatives. Others * raise the power cycle a smidge before it is really ready. * We deal with those below. */ sane = 10; while (!(cbb_get(sc, CBB_SOCKET_STATE) & CBB_STATE_POWER_CYCLE) && cnt == sc->powerintr && sane-- > 0) msleep(&sc->powerintr, &sc->mtx, 0, "-", hz / 20); mtx_unlock(&sc->mtx); /* * Relax for 100ms. Some bridges appear to assert this signal * right away, but before the card has stabilized. Other * cards need need more time to cope up reliabily. * Experiments with troublesome setups show this to be a * "cheap" way to enhance reliabilty. We need not do this for * "off" since we don't touch the card after we turn it off. */ pause("cbbPwr", min(hz / 10, 1)); /* * The TOPIC95B requires a little bit extra time to get its * act together, so delay for an additional 100ms. Also as * documented below, it doesn't seem to set the POWER_CYCLE * bit, so don't whine if it never came on. */ if (sc->chipset == CB_TOPIC95) pause("cbb95B", hz / 10); else if (sane <= 0) device_printf(sc->dev, "power timeout, doom?\n"); } /* * After the power is good, we can turn off the power interrupt. * However, the PC Card standard says that we must delay turning the * CD bit back on for a bit to allow for bouncyness on power down * (recall that we don't wait above for a power down, since we don't * get an interrupt for that). We're called either from the suspend * code in which case we don't want to turn card change on again, or * we're called from the card insertion code, in which case the cbb * thread will turn it on for us before it waits to be woken by a * change event. * * NB: Topic95B doesn't set the power cycle bit. we assume that * both it and the TOPIC95 behave the same. */ cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_POWER); status = cbb_get(sc, CBB_SOCKET_STATE); if (on && sc->chipset != CB_TOPIC95) { if ((status & CBB_STATE_POWER_CYCLE) == 0) device_printf(sc->dev, "Power not on?\n"); } if (status & CBB_STATE_BAD_VCC_REQ) { device_printf(sc->dev, "Bad Vcc requested\n"); /* * Turn off the power, and try again. Retrigger other * active interrupts via force register. From NetBSD * PR 36652, coded by me to description there. */ sock_ctrl &= ~CBB_SOCKET_CTRL_VCCMASK; sock_ctrl &= ~CBB_SOCKET_CTRL_VPPMASK; cbb_set(sc, CBB_SOCKET_CONTROL, sock_ctrl); status &= ~CBB_STATE_BAD_VCC_REQ; status &= ~CBB_STATE_DATA_LOST; status |= CBB_FORCE_CV_TEST; cbb_set(sc, CBB_SOCKET_FORCE, status); goto done; } if (sc->chipset == CB_TOPIC97) { reg_ctrl = pci_read_config(sc->dev, TOPIC_REG_CTRL, 4); reg_ctrl &= ~TOPIC97_REG_CTRL_TESTMODE; if (on) reg_ctrl |= TOPIC97_REG_CTRL_CLKRUN_ENA; else reg_ctrl &= ~TOPIC97_REG_CTRL_CLKRUN_ENA; pci_write_config(sc->dev, TOPIC_REG_CTRL, reg_ctrl, 4); } retval = 1; done:; if (volts != 0 && sc->chipset == CB_O2MICRO) cbb_o2micro_power_hack2(sc, reg); return (retval); } static int cbb_current_voltage(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t ctrl; ctrl = cbb_get(sc, CBB_SOCKET_CONTROL); switch (ctrl & CBB_SOCKET_CTRL_VCCMASK) { case CBB_SOCKET_CTRL_VCC_5V: return CARD_5V_CARD; case CBB_SOCKET_CTRL_VCC_3V: return CARD_3V_CARD; case CBB_SOCKET_CTRL_VCC_XV: return CARD_XV_CARD; case CBB_SOCKET_CTRL_VCC_YV: return CARD_YV_CARD; } return 0; } /* * detect the voltage for the card, and set it. Since the power * used is the square of the voltage, lower voltages is a big win * and what Windows does (and what Microsoft prefers). The MS paper * also talks about preferring the CIS entry as well, but that has * to be done elsewhere. We also optimize power sequencing here * and don't change things if we're already powered up at a supported * voltage. * * In addition, we power up with OE disabled. We'll set it later * in the power up sequence. */ static int cbb_do_power(device_t brdev) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t voltage, curpwr; uint32_t status; /* Don't enable OE (output enable) until power stable */ exca_clrb(&sc->exca, EXCA_PWRCTL, EXCA_PWRCTL_OE); voltage = cbb_detect_voltage(brdev); curpwr = cbb_current_voltage(brdev); status = cbb_get(sc, CBB_SOCKET_STATE); if ((status & CBB_STATE_POWER_CYCLE) && (voltage & curpwr)) return 0; /* Prefer lowest voltage supported */ cbb_power(brdev, CARD_OFF); if (voltage & CARD_YV_CARD) cbb_power(brdev, CARD_VCC(YV)); else if (voltage & CARD_XV_CARD) cbb_power(brdev, CARD_VCC(XV)); else if (voltage & CARD_3V_CARD) cbb_power(brdev, CARD_VCC(3)); else if (voltage & CARD_5V_CARD) cbb_power(brdev, CARD_VCC(5)); else { device_printf(brdev, "Unknown card voltage\n"); return (ENXIO); } return (0); } /************************************************************************/ /* CardBus power functions */ /************************************************************************/ static int cbb_cardbus_reset_power(device_t brdev, device_t child, int on) { struct cbb_softc *sc = device_get_softc(brdev); uint32_t b, h; int delay, count, zero_seen, func; /* * Asserting reset for 20ms is necessary for most bridges. For some * reason, the Ricoh RF5C47x bridges need it asserted for 400ms. The * root cause of this is unknown, and NetBSD does the same thing. */ delay = sc->chipset == CB_RF5C47X ? 400 : 20; PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); pause("cbbP3", hz * delay / 1000); /* * If a card exists and we're turning it on, take it out of reset. * After clearing reset, wait up to 1.1s for the first configuration * register (vendor/product) configuration register of device 0.0 to * become != 0xffffffff. The PCMCIA PC Card Host System Specification * says that when powering up the card, the PCI Spec v2.1 must be * followed. In PCI spec v2.2 Table 4-6, Trhfa (Reset High to first * Config Access) is at most 2^25 clocks, or just over 1s. Section * 2.2.1 states any card not ready to participate in bus transactions * must tristate its outputs. Therefore, any access to its * configuration registers must be ignored. In that state, the config * reg will read 0xffffffff. Section 6.2.1 states a vendor id of * 0xffff is invalid, so this can never match a real card. Print a * warning if it never returns a real id. The PCMCIA PC Card * Electrical Spec Section 5.2.7.1 implies only device 0 is present on * a cardbus bus, so that's the only register we check here. */ if (on && CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) { PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, &~CBBM_BRIDGECTRL_RESET, 2); b = pcib_get_bus(child); count = 1100 / 20; do { pause("cbbP4", hz * 2 / 100); } while (PCIB_READ_CONFIG(brdev, b, 0, 0, PCIR_DEVVENDOR, 4) == 0xfffffffful && --count >= 0); if (count < 0) device_printf(brdev, "Warning: Bus reset timeout\n"); /* * Some cards (so far just an atheros card I have) seem to * come out of reset in a funky state. They report they are * multi-function cards, but have nonsense for some of the * higher functions. So if the card claims to be MFDEV, and * any of the higher functions' ID is 0, then we've hit the * bug and we'll try again. */ h = PCIB_READ_CONFIG(brdev, b, 0, 0, PCIR_HDRTYPE, 1); if ((h & PCIM_MFDEV) == 0) return 0; zero_seen = 0; for (func = 1; func < 8; func++) { h = PCIB_READ_CONFIG(brdev, b, 0, func, PCIR_DEVVENDOR, 4); if (h == 0) zero_seen++; } if (!zero_seen) return 0; return (EINVAL); } return 0; } static int cbb_cardbus_power_disable_socket(device_t brdev, device_t child) { cbb_power(brdev, CARD_OFF); cbb_cardbus_reset_power(brdev, child, 0); return (0); } static int cbb_cardbus_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); int err, count; if (!CBB_CARD_PRESENT(cbb_get(sc, CBB_SOCKET_STATE))) return (ENODEV); count = 10; do { err = cbb_do_power(brdev); if (err) return (err); err = cbb_cardbus_reset_power(brdev, child, 1); if (err) { device_printf(brdev, "Reset failed, trying again.\n"); cbb_cardbus_power_disable_socket(brdev, child); pause("cbbErr1", hz / 10); /* wait 100ms */ } } while (err != 0 && count-- > 0); return (0); } /************************************************************************/ /* CardBus Resource */ /************************************************************************/ static void cbb_activate_window(device_t brdev, int type) { PCI_ENABLE_IO(device_get_parent(brdev), brdev, type); } static int cbb_cardbus_io_open(device_t brdev, int win, uint32_t start, uint32_t end) { int basereg; int limitreg; if ((win < 0) || (win > 1)) { DEVPRINTF((brdev, "cbb_cardbus_io_open: window out of range %d\n", win)); return (EINVAL); } basereg = win * 8 + CBBR_IOBASE0; limitreg = win * 8 + CBBR_IOLIMIT0; pci_write_config(brdev, basereg, start, 4); pci_write_config(brdev, limitreg, end, 4); cbb_activate_window(brdev, SYS_RES_IOPORT); return (0); } static int cbb_cardbus_mem_open(device_t brdev, int win, uint32_t start, uint32_t end) { int basereg; int limitreg; if ((win < 0) || (win > 1)) { DEVPRINTF((brdev, "cbb_cardbus_mem_open: window out of range %d\n", win)); return (EINVAL); } basereg = win * 8 + CBBR_MEMBASE0; limitreg = win * 8 + CBBR_MEMLIMIT0; pci_write_config(brdev, basereg, start, 4); pci_write_config(brdev, limitreg, end, 4); cbb_activate_window(brdev, SYS_RES_MEMORY); return (0); } #define START_NONE 0xffffffff #define END_NONE 0 static void cbb_cardbus_auto_open(struct cbb_softc *sc, int type) { uint32_t starts[2]; uint32_t ends[2]; struct cbb_reslist *rle; int align, i; uint32_t reg; starts[0] = starts[1] = START_NONE; ends[0] = ends[1] = END_NONE; if (type == SYS_RES_MEMORY) align = CBB_MEMALIGN; else if (type == SYS_RES_IOPORT) align = CBB_IOALIGN; else align = 1; SLIST_FOREACH(rle, &sc->rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if (!(rman_get_flags(rle->res) & RF_ACTIVE)) continue; if (rman_get_flags(rle->res) & RF_PREFETCHABLE) i = 1; else i = 0; if (rman_get_start(rle->res) < starts[i]) starts[i] = rman_get_start(rle->res); if (rman_get_end(rle->res) > ends[i]) ends[i] = rman_get_end(rle->res); } for (i = 0; i < 2; i++) { if (starts[i] == START_NONE) continue; starts[i] &= ~(align - 1); ends[i] = roundup2(ends[i], align) - 1; } if (starts[0] != START_NONE && starts[1] != START_NONE) { if (starts[0] < starts[1]) { if (ends[0] > starts[1]) { device_printf(sc->dev, "Overlapping ranges" " for prefetch and non-prefetch memory\n"); return; } } else { if (ends[1] > starts[0]) { device_printf(sc->dev, "Overlapping ranges" " for prefetch and non-prefetch memory\n"); return; } } } if (type == SYS_RES_MEMORY) { cbb_cardbus_mem_open(sc->dev, 0, starts[0], ends[0]); cbb_cardbus_mem_open(sc->dev, 1, starts[1], ends[1]); reg = pci_read_config(sc->dev, CBBR_BRIDGECTRL, 2); reg &= ~(CBBM_BRIDGECTRL_PREFETCH_0 | CBBM_BRIDGECTRL_PREFETCH_1); if (starts[1] != START_NONE) reg |= CBBM_BRIDGECTRL_PREFETCH_1; pci_write_config(sc->dev, CBBR_BRIDGECTRL, reg, 2); if (bootverbose) { device_printf(sc->dev, "Opening memory:\n"); if (starts[0] != START_NONE) device_printf(sc->dev, "Normal: %#x-%#x\n", starts[0], ends[0]); if (starts[1] != START_NONE) device_printf(sc->dev, "Prefetch: %#x-%#x\n", starts[1], ends[1]); } } else if (type == SYS_RES_IOPORT) { cbb_cardbus_io_open(sc->dev, 0, starts[0], ends[0]); cbb_cardbus_io_open(sc->dev, 1, starts[1], ends[1]); if (bootverbose && starts[0] != START_NONE) device_printf(sc->dev, "Opening I/O: %#x-%#x\n", starts[0], ends[0]); } } static int cbb_cardbus_activate_resource(device_t brdev, device_t child, struct resource *res) { int ret; ret = BUS_ACTIVATE_RESOURCE(device_get_parent(brdev), child, res); if (ret != 0) return (ret); cbb_cardbus_auto_open(device_get_softc(brdev), rman_get_type(res)); return (0); } static int cbb_cardbus_deactivate_resource(device_t brdev, device_t child, struct resource *res) { int ret; ret = BUS_DEACTIVATE_RESOURCE(device_get_parent(brdev), child, res); if (ret != 0) return (ret); cbb_cardbus_auto_open(device_get_softc(brdev), rman_get_type(res)); return (0); } static struct resource * cbb_cardbus_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc = device_get_softc(brdev); int tmp; struct resource *res; rman_res_t align; switch (type) { case SYS_RES_IRQ: tmp = rman_get_start(sc->irq_res); if (start > tmp || end < tmp || count != 1) { device_printf(child, "requested interrupt %jd-%jd," "count = %jd not supported by cbb\n", start, end, count); return (NULL); } start = end = tmp; flags |= RF_SHAREABLE; break; case SYS_RES_IOPORT: if (start <= cbb_start_32_io) start = cbb_start_32_io; if (end < start) end = start; if (count > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(count); break; case SYS_RES_MEMORY: if (start <= cbb_start_mem) start = cbb_start_mem; if (end < start) end = start; if (count < CBB_MEMALIGN) align = CBB_MEMALIGN; else align = count; if (align > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(align); break; } res = BUS_ALLOC_RESOURCE(device_get_parent(brdev), child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { printf("cbb alloc res fail type %d rid %x\n", type, *rid); return (NULL); } cbb_insert_res(sc, res, type, *rid); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res) != 0) { bus_release_resource(child, type, *rid, res); return (NULL); } return (res); } static int -cbb_cardbus_release_resource(device_t brdev, device_t child, int type, - int rid, struct resource *res) +cbb_cardbus_release_resource(device_t brdev, device_t child, + struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; if (rman_get_flags(res) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, rid, res); + error = bus_deactivate_resource(child, res); if (error != 0) return (error); } cbb_remove_res(sc, res); return (BUS_RELEASE_RESOURCE(device_get_parent(brdev), child, - type, rid, res)); + res)); } /************************************************************************/ /* PC Card Power Functions */ /************************************************************************/ static int cbb_pcic_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); int err; DPRINTF(("cbb_pcic_socket_enable:\n")); /* power down/up the socket to reset */ err = cbb_do_power(brdev); if (err) return (err); exca_reset(&sc->exca, child); return (0); } static int cbb_pcic_power_disable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); DPRINTF(("cbb_pcic_socket_disable\n")); /* Turn off the card's interrupt and leave it in reset, wait 10ms */ exca_putb(&sc->exca, EXCA_INTR, 0); pause("cbbP1", hz / 100); /* power down the socket */ cbb_power(brdev, CARD_OFF); exca_putb(&sc->exca, EXCA_PWRCTL, 0); /* wait 300ms until power fails (Tpf). */ pause("cbbP2", hz * 300 / 1000); /* enable CSC interrupts */ exca_putb(&sc->exca, EXCA_INTR, EXCA_INTR_ENABLE); return (0); } /************************************************************************/ /* POWER methods */ /************************************************************************/ int cbb_power_enable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_power_enable_socket(brdev, child)); return (cbb_cardbus_power_enable_socket(brdev, child)); } int cbb_power_disable_socket(device_t brdev, device_t child) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_power_disable_socket(brdev, child)); return (cbb_cardbus_power_disable_socket(brdev, child)); } static int cbb_pcic_activate_resource(device_t brdev, device_t child, struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; error = exca_activate_resource(&sc->exca, child, res); if (error == 0) cbb_activate_window(brdev, rman_get_type(res)); return (error); } static int cbb_pcic_deactivate_resource(device_t brdev, device_t child, struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); return (exca_deactivate_resource(&sc->exca, child, res)); } static struct resource * cbb_pcic_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res = NULL; struct cbb_softc *sc = device_get_softc(brdev); int align; int tmp; switch (type) { case SYS_RES_MEMORY: if (start < cbb_start_mem) start = cbb_start_mem; if (end < start) end = start; if (count < CBB_MEMALIGN) align = CBB_MEMALIGN; else align = count; if (align > (1 << RF_ALIGNMENT(flags))) flags = (flags & ~RF_ALIGNMENT_MASK) | rman_make_alignment_flags(align); break; case SYS_RES_IOPORT: if (start < cbb_start_16_io) start = cbb_start_16_io; if (end < start) end = start; break; case SYS_RES_IRQ: tmp = rman_get_start(sc->irq_res); if (start > tmp || end < tmp || count != 1) { device_printf(child, "requested interrupt %jd-%jd," "count = %jd not supported by cbb\n", start, end, count); return (NULL); } flags |= RF_SHAREABLE; start = end = rman_get_start(sc->irq_res); break; } res = BUS_ALLOC_RESOURCE(device_get_parent(brdev), child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) return (NULL); cbb_insert_res(sc, res, type, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { bus_release_resource(child, type, *rid, res); return (NULL); } } return (res); } static int -cbb_pcic_release_resource(device_t brdev, device_t child, int type, - int rid, struct resource *res) +cbb_pcic_release_resource(device_t brdev, device_t child, + struct resource *res) { struct cbb_softc *sc = device_get_softc(brdev); int error; if (rman_get_flags(res) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, rid, res); + error = bus_deactivate_resource(child, res); if (error != 0) return (error); } cbb_remove_res(sc, res); return (BUS_RELEASE_RESOURCE(device_get_parent(brdev), child, - type, rid, res)); + res)); } /************************************************************************/ /* PC Card methods */ /************************************************************************/ int cbb_pcic_set_res_flags(device_t brdev, device_t child, int type, int rid, u_long flags) { struct cbb_softc *sc = device_get_softc(brdev); struct resource *res; if (type != SYS_RES_MEMORY) return (EINVAL); res = cbb_find_res(sc, type, rid); if (res == NULL) { device_printf(brdev, "set_res_flags: specified rid not found\n"); return (ENOENT); } return (exca_mem_set_flags(&sc->exca, res, flags)); } int cbb_pcic_set_memory_offset(device_t brdev, device_t child, int rid, uint32_t cardaddr, uint32_t *deltap) { struct cbb_softc *sc = device_get_softc(brdev); struct resource *res; res = cbb_find_res(sc, SYS_RES_MEMORY, rid); if (res == NULL) { device_printf(brdev, "set_memory_offset: specified rid not found\n"); return (ENOENT); } return (exca_mem_set_offset(&sc->exca, res, cardaddr, deltap)); } /************************************************************************/ /* BUS Methods */ /************************************************************************/ int cbb_activate_resource(device_t brdev, device_t child, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_activate_resource(brdev, child, r)); else return (cbb_cardbus_activate_resource(brdev, child, r)); } int cbb_deactivate_resource(device_t brdev, device_t child, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_deactivate_resource(brdev, child, r)); else return (cbb_cardbus_deactivate_resource(brdev, child, r)); } struct resource * cbb_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) return (cbb_pcic_alloc_resource(brdev, child, type, rid, start, end, count, flags)); else return (cbb_cardbus_alloc_resource(brdev, child, type, rid, start, end, count, flags)); } int -cbb_release_resource(device_t brdev, device_t child, int type, int rid, - struct resource *r) +cbb_release_resource(device_t brdev, device_t child, struct resource *r) { struct cbb_softc *sc = device_get_softc(brdev); if (sc->flags & CBB_16BIT_CARD) - return (cbb_pcic_release_resource(brdev, child, type, - rid, r)); + return (cbb_pcic_release_resource(brdev, child, r)); else - return (cbb_cardbus_release_resource(brdev, child, type, - rid, r)); + return (cbb_cardbus_release_resource(brdev, child, r)); } int cbb_read_ivar(device_t brdev, device_t child, int which, uintptr_t *result) { struct cbb_softc *sc = device_get_softc(brdev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return (0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return (0); case EXCA_IVAR_SLOT: *result = 0; return (0); } return (ENOENT); } int cbb_write_ivar(device_t brdev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: return (EINVAL); case EXCA_IVAR_SLOT: return (EINVAL); } return (ENOENT); } int cbb_child_present(device_t parent, device_t child) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(parent); uint32_t sockstate; sockstate = cbb_get(sc, CBB_SOCKET_STATE); return (CBB_CARD_PRESENT(sockstate) && sc->cardok); } diff --git a/sys/dev/pccbb/pccbb_pci.c b/sys/dev/pccbb/pccbb_pci.c index 08014fb210ed..3b66b3df3852 100644 --- a/sys/dev/pccbb/pccbb_pci.c +++ b/sys/dev/pccbb/pccbb_pci.c @@ -1,977 +1,976 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000-2001 Jonathan Chen All rights reserved. * Copyright (c) 2002-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) static void cbb_chipinit(struct cbb_softc *sc); static int cbb_pci_filt(void *arg); static struct yenta_chipinfo { uint32_t yc_id; const char *yc_name; int yc_chiptype; } yc_chipsets[] = { /* Texas Instruments chips */ {PCIC_ID_TI1031, "TI1031 PCI-PC Card Bridge", CB_TI113X}, {PCIC_ID_TI1130, "TI1130 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1131, "TI1131 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1210, "TI1210 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1211, "TI1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1220, "TI1220 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1221, "TI1221 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1225, "TI1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1250, "TI1250 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251, "TI1251 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251B,"TI1251B PCI-CardBus Bridge",CB_TI125X}, {PCIC_ID_TI1260, "TI1260 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1260B,"TI1260B PCI-CardBus Bridge",CB_TI12XX}, {PCIC_ID_TI1410, "TI1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1420, "TI1420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1421, "TI1421 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1450, "TI1450 PCI-CardBus Bridge", CB_TI125X}, /*SIC!*/ {PCIC_ID_TI1451, "TI1451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1510, "TI1510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1520, "TI1520 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4410, "TI4410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4450, "TI4450 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4451, "TI4451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4510, "TI4510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6411, "TI6411 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420SC, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7410, "TI7410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7510, "TI7510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610M, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610SD, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610MS, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, /* ENE */ {PCIC_ID_ENE_CB710, "ENE CB710 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB720, "ENE CB720 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1211, "ENE CB1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1225, "ENE CB1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1410, "ENE CB1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1420, "ENE CB1420 PCI-CardBus Bridge", CB_TI12XX}, /* Ricoh chips */ {PCIC_ID_RICOH_RL5C465, "RF5C465 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C466, "RF5C466 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C475, "RF5C475 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C476, "RF5C476 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C477, "RF5C477 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C478, "RF5C478 PCI-CardBus Bridge", CB_RF5C47X}, /* Toshiba products */ {PCIC_ID_TOPIC95, "ToPIC95 PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC95B, "ToPIC95B PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC97, "ToPIC97 PCI-CardBus Bridge", CB_TOPIC97}, {PCIC_ID_TOPIC100, "ToPIC100 PCI-CardBus Bridge", CB_TOPIC97}, /* Cirrus Logic */ {PCIC_ID_CLPD6832, "CLPD6832 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6833, "CLPD6833 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6834, "CLPD6834 PCI-CardBus Bridge", CB_CIRRUS}, /* 02Micro */ {PCIC_ID_OZ6832, "O2Micro OZ6832/6833 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6860, "O2Micro OZ6836/6860 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6872, "O2Micro OZ6812/6872 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6912, "O2Micro OZ6912/6972 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6922, "O2Micro OZ6922 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6933, "O2Micro OZ6933 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E1, "O2Micro OZ711E1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711EC1, "O2Micro OZ711EC1/M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E2, "O2Micro OZ711E2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M1, "O2Micro OZ711M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M2, "O2Micro OZ711M2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M3, "O2Micro OZ711M3 PCI-CardBus Bridge", CB_O2MICRO}, /* SMC */ {PCIC_ID_SMC_34C90, "SMC 34C90 PCI-CardBus Bridge", CB_CIRRUS}, /* sentinel */ {0 /* null id */, "unknown", CB_UNKNOWN}, }; /************************************************************************/ /* Probe/Attach */ /************************************************************************/ static int cbb_chipset(uint32_t pci_id, const char **namep) { struct yenta_chipinfo *ycp; for (ycp = yc_chipsets; ycp->yc_id != 0 && pci_id != ycp->yc_id; ++ycp) continue; if (namep != NULL) *namep = ycp->yc_name; return (ycp->yc_chiptype); } static int cbb_pci_probe(device_t brdev) { const char *name; uint32_t progif; uint32_t baseclass; uint32_t subclass; /* * Do we know that we support the chipset? If so, then we * accept the device. */ if (cbb_chipset(pci_get_devid(brdev), &name) != CB_UNKNOWN) { device_set_desc(brdev, name); return (BUS_PROBE_DEFAULT); } /* * We do support generic CardBus bridges. All that we've seen * to date have progif 0 (the Yenta spec, and successors mandate * this). */ baseclass = pci_get_class(brdev); subclass = pci_get_subclass(brdev); progif = pci_get_progif(brdev); if (baseclass == PCIC_BRIDGE && subclass == PCIS_BRIDGE_CARDBUS && progif == 0) { device_set_desc(brdev, "PCI-CardBus Bridge"); return (BUS_PROBE_GENERIC); } return (ENXIO); } /* * Print out the config space */ static void cbb_print_config(device_t dev) { int i; device_printf(dev, "PCI Configuration space:"); for (i = 0; i < 256; i += 4) { if (i % 16 == 0) printf("\n 0x%02x: ", i); printf("0x%08x ", pci_read_config(dev, i, 4)); } printf("\n"); } static int cbb_pci_attach(device_t brdev) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */ uint32_t pribus; #endif struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int rid; device_t parent; parent = device_get_parent(brdev); mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF); sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL); sc->dev = brdev; sc->cbdev = NULL; sc->domain = pci_get_domain(brdev); sc->pribus = pcib_get_bus(parent); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); pcib_setup_secbus(brdev, &sc->bus, 1); #else sc->bus.sec = pci_read_config(brdev, PCIR_SECBUS_2, 1); sc->bus.sub = pci_read_config(brdev, PCIR_SUBBUS_2, 1); #endif SLIST_INIT(&sc->rl); rid = CBBR_SOCKBASE; sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->base_res) { device_printf(brdev, "Could not map register memory\n"); mtx_destroy(&sc->mtx); return (ENOMEM); } else { DEVPRINTF((brdev, "Found memory at %jx\n", rman_get_start(sc->base_res))); } /* attach children */ sc->cbdev = device_add_child(brdev, "cardbus", -1); if (sc->cbdev == NULL) DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n")); else if (device_probe_and_attach(sc->cbdev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n")); sc->bst = rman_get_bustag(sc->base_res); sc->bsh = rman_get_bushandle(sc->base_res); exca_init(&sc->exca, brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET); sc->exca.flags |= EXCA_HAS_MEMREG_WIN; sc->exca.chipset = EXCA_CARDBUS; sc->chipinit = cbb_chipinit; sc->chipinit(sc); /*Sysctls*/ sctx = device_get_sysctl_ctx(brdev); soid = device_get_sysctl_tree(brdev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); #if 0 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory", CTLFLAG_RD, &sc->subbus, 0, "Memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem", CTLFLAG_RD, &sc->subbus, 0, "Prefetch memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1", CTLFLAG_RD, &sc->subbus, 0, "io range 1 open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2", CTLFLAG_RD, &sc->subbus, 0, "io range 2 open"); #endif #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* * This is a gross hack. We should be scanning the entire pci * tree, assigning bus numbers in a way such that we (1) can * reserve 1 extra bus just in case and (2) all sub buses * are in an appropriate range. */ DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec)); pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1); if (sc->bus.sec == 0 || sc->pribus != pribus) { if (curr_bus_number <= sc->pribus) curr_bus_number = sc->pribus + 1; if (pribus != sc->pribus) { DEVPRINTF((brdev, "Setting primary bus to %d\n", sc->pribus)); pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); } sc->bus.sec = curr_bus_number++; sc->bus.sub = curr_bus_number++; DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n", sc->bus.sec, sc->bus.sub)); pci_write_config(brdev, PCIR_SECBUS_2, sc->bus.sec, 1); pci_write_config(brdev, PCIR_SUBBUS_2, sc->bus.sub, 1); } #endif /* Map and establish the interrupt. */ rid = 0; sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(brdev, "Unable to map IRQ...\n"); goto err; } if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE, cbb_pci_filt, NULL, sc, &sc->intrhand)) { device_printf(brdev, "couldn't establish interrupt\n"); goto err; } /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* reset interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT)); if (bootverbose) cbb_print_config(brdev); /* Start the thread */ if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(brdev))) { device_printf(brdev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev)); return (0); err: if (sc->irq_res) bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->base_res) { bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); } mtx_destroy(&sc->mtx); return (ENOMEM); } static int cbb_pci_detach(device_t brdev) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct cbb_softc *sc = device_get_softc(brdev); #endif int error; error = cbb_detach(brdev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (error == 0) pcib_free_secbus(brdev, &sc->bus); #endif return (error); } static void cbb_chipinit(struct cbb_softc *sc) { uint32_t mux, sysctrl, reg; /* Set CardBus latency timer */ if (pci_read_config(sc->dev, PCIR_SECLAT_2, 1) < 0x20) pci_write_config(sc->dev, PCIR_SECLAT_2, 0x20, 1); /* Set PCI latency timer */ if (pci_read_config(sc->dev, PCIR_LATTIMER, 1) < 0x20) pci_write_config(sc->dev, PCIR_LATTIMER, 0x20, 1); /* Enable DMA, memory access for this card and I/O access for children */ pci_enable_busmaster(sc->dev); pci_enable_io(sc->dev, SYS_RES_IOPORT); pci_enable_io(sc->dev, SYS_RES_MEMORY); /* disable Legacy IO */ switch (sc->chipset) { case CB_RF5C46X: PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_RL_3E0_EN | CBBM_BRIDGECTRL_RL_3E2_EN), 2); break; default: pci_write_config(sc->dev, CBBR_LEGACY, 0x0, 4); break; } /* Use PCI interrupt for interrupt routing */ PCI_MASK2_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_MASTER_ABORT | CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN), | CBBM_BRIDGECTRL_WRITE_POST_EN, 2); /* * XXX this should be a function table, ala OLDCARD. This means * that we could more easily support ISA interrupts for pccard * cards if we had to. */ switch (sc->chipset) { case CB_TI113X: /* * The TI 1031, TI 1130 and TI 1131 all require another bit * be set to enable PCI routing of interrupts, and then * a bit for each of the CSC and Function interrupts we * want routed. */ PCI_MASK_CONFIG(sc->dev, CBBR_CBCTRL, | CBBM_CBCTRL_113X_PCI_INTR | CBBM_CBCTRL_113X_PCI_CSC | CBBM_CBCTRL_113X_PCI_IRQ_EN, 1); PCI_MASK_CONFIG(sc->dev, CBBR_DEVCTRL, & ~(CBBM_DEVCTRL_INT_SERIAL | CBBM_DEVCTRL_INT_PCI), 1); break; case CB_TI12XX: /* * Some TI 12xx (and [14][45]xx) based pci cards * sometimes have issues with the MFUNC register not * being initialized due to a bad EEPROM on board. * Laptops that this matters on have this register * properly initialized. * * The TI125X parts have a different register. * * Note: Only the lower two nibbles matter. When set * to 0, the MFUNC{0,1} pins are GPIO, which isn't * going to work out too well because we specifically * program these parts to parallel interrupt signalling * elsewhere. We preserve the upper bits of this * register since changing them have subtle side effects * for different variants of the card and are * extremely difficult to exaustively test. * * Also, the TI 1510/1520 changed the default for the MFUNC * register from 0x0 to 0x1000 to enable IRQSER by default. * We want to be careful to avoid overriding that, and the * below test will do that. Should this check prove to be * too permissive, we should just check against 0 and 0x1000 * and not touch it otherwise. */ mux = pci_read_config(sc->dev, CBBR_MFUNC, 4); sysctrl = pci_read_config(sc->dev, CBBR_SYSCTRL, 4); if ((mux & (CBBM_MFUNC_PIN0 | CBBM_MFUNC_PIN1)) == 0) { mux = (mux & ~CBBM_MFUNC_PIN0) | CBBM_MFUNC_PIN0_INTA; if ((sysctrl & CBBM_SYSCTRL_INTRTIE) == 0) mux = (mux & ~CBBM_MFUNC_PIN1) | CBBM_MFUNC_PIN1_INTB; pci_write_config(sc->dev, CBBR_MFUNC, mux, 4); } /*FALLTHROUGH*/ case CB_TI125X: /* * Disable zoom video. Some machines initialize this * improperly and exerpience has shown that this helps * prevent strange behavior. We don't support zoom * video anyway, so no harm can come from this. */ pci_write_config(sc->dev, CBBR_MMCTRL, 0, 4); break; case CB_O2MICRO: /* * Issue #1: INT# generated at the same time as * selected ISA IRQ. When IREQ# or STSCHG# is active, * in addition to the ISA IRQ being generated, INT# * will also be generated at the same time. * * Some of the older controllers have an issue in * which the slot's PCI INT# will be asserted whenever * IREQ# or STSCGH# is asserted even if ExCA registers * 03h or 05h have an ISA IRQ selected. * * The fix for this issue, which will work for any * controller (old or new), is to set ExCA registers * 3Ah (slot 0) & 7Ah (slot 1) bits 7:4 = 1010b. * These bits are undocumented. By setting this * register (of each slot) to '1010xxxxb' a routing of * IREQ# to INTC# and STSCHG# to INTC# is selected. * Since INTC# isn't connected there will be no * unexpected PCI INT when IREQ# or STSCHG# is active. * However, INTA# (slot 0) or INTB# (slot 1) will * still be correctly generated if NO ISA IRQ is * selected (ExCA regs 03h or 05h are cleared). */ reg = exca_getb(&sc->exca, EXCA_O2MICRO_CTRL_C); reg = (reg & 0x0f) | EXCA_O2CC_IREQ_INTC | EXCA_O2CC_STSCHG_INTC; exca_putb(&sc->exca, EXCA_O2MICRO_CTRL_C, reg); break; case CB_TOPIC97: /* * Disable Zoom Video, ToPIC 97, 100. */ pci_write_config(sc->dev, TOPIC97_ZV_CONTROL, 0, 1); /* * ToPIC 97, 100 * At offset 0xa1: INTERRUPT CONTROL register * 0x1: Turn on INT interrupts. */ PCI_MASK_CONFIG(sc->dev, TOPIC_INTCTRL, | TOPIC97_INTCTRL_INTIRQSEL, 1); /* * ToPIC97, 100 * Need to assert support for low voltage cards */ exca_setb(&sc->exca, EXCA_TOPIC97_CTRL, EXCA_TOPIC97_CTRL_LV_MASK); goto topic_common; case CB_TOPIC95: /* * SOCKETCTRL appears to be TOPIC 95/B specific */ PCI_MASK_CONFIG(sc->dev, TOPIC95_SOCKETCTRL, | TOPIC95_SOCKETCTRL_SCR_IRQSEL, 4); topic_common:; /* * At offset 0xa0: SLOT CONTROL * 0x80 Enable CardBus Functionality * 0x40 Enable CardBus and PC Card registers * 0x20 Lock ID in exca regs * 0x10 Write protect ID in config regs * Clear the rest of the bits, which defaults the slot * in legacy mode to 0x3e0 and offset 0. (legacy * mode is determined elsewhere) */ pci_write_config(sc->dev, TOPIC_SLOTCTRL, TOPIC_SLOTCTRL_SLOTON | TOPIC_SLOTCTRL_SLOTEN | TOPIC_SLOTCTRL_ID_LOCK | TOPIC_SLOTCTRL_ID_WP, 1); /* * At offset 0xa3 Card Detect Control Register * 0x80 CARDBUS enbale * 0x01 Cleared for hardware change detect */ PCI_MASK2_CONFIG(sc->dev, TOPIC_CDC, | TOPIC_CDC_CARDBUS, & ~TOPIC_CDC_SWDETECT, 4); break; } /* * Need to tell ExCA registers to CSC interrupts route via PCI * interrupts. There are two ways to do this. One is to set * INTR_ENABLE and the other is to set CSC to 0. Since both * methods are mutually compatible, we do both. */ exca_putb(&sc->exca, EXCA_INTR, EXCA_INTR_ENABLE); exca_putb(&sc->exca, EXCA_CSC_INTR, 0); cbb_disable_func_intr(sc); /* close all memory and io windows */ pci_write_config(sc->dev, CBBR_MEMBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_MEMBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT1, 0, 4); } static int cbb_route_interrupt(device_t pcib, device_t dev, int pin) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(pcib); return (rman_get_start(sc->irq_res)); } static int cbb_pci_shutdown(device_t brdev) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); /* * We're about to pull the rug out from the card, so mark it as * gone to prevent harm. */ sc->cardok = 0; /* * Place the cards in reset, turn off the interrupts and power * down the socket. */ PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); exca_clrb(&sc->exca, EXCA_INTR, EXCA_INTR_RESET); cbb_set(sc, CBB_SOCKET_MASK, 0); cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); cbb_power(brdev, CARD_OFF); /* * For paranoia, turn off all address decoding. Really not needed, * it seems, but it can't hurt */ exca_putb(&sc->exca, EXCA_ADDRWIN_ENABLE, 0); pci_write_config(brdev, CBBR_MEMBASE0, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(brdev, CBBR_MEMBASE1, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(brdev, CBBR_IOBASE0, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT0, 0, 4); pci_write_config(brdev, CBBR_IOBASE1, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT1, 0, 4); return (0); } static int cbb_pci_filt(void *arg) { struct cbb_softc *sc = arg; uint32_t sockevent; uint8_t csc; int retval = FILTER_STRAY; /* * Some chips also require us to read the old ExCA registe for card * status change when we route CSC vis PCI. This isn't supposed to be * required, but it clears the interrupt state on some chipsets. * Maybe there's a setting that would obviate its need. Maybe we * should test the status bits and deal with them, but so far we've * not found any machines that don't also give us the socket status * indication above. * * This call used to be unconditional. However, further research * suggests that we hit this condition when the card READY interrupt * fired. So now we only read it for 16-bit cards, and we only claim * the interrupt if READY is set. If this still causes problems, then * the next step would be to read this if we have a 16-bit card *OR* * we have no card. We treat the READY signal as if it were the power * completion signal. Some bridges may double signal things here, bit * signalling twice should be OK since we only sleep on the powerintr * in one place and a double wakeup would be benign there. */ if (sc->flags & CBB_16BIT_CARD) { csc = exca_getb(&sc->exca, EXCA_CSC); if (csc & EXCA_CSC_READY) { atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); retval = FILTER_HANDLED; } } /* * Read the socket event. Sometimes, the theory goes, the PCI bus is * so loaded that it cannot satisfy the read request, so we get * garbage back from the following read. We have to filter out the * garbage so that we don't spontaneously reset the card under high * load. PCI isn't supposed to act like this. No doubt this is a bug * in the PCI bridge chipset (or cbb brige) that's being used in * certain amd64 laptops today. Work around the issue by assuming * that any bits we don't know about being set means that we got * garbage. */ sockevent = cbb_get(sc, CBB_SOCKET_EVENT); if (sockevent != 0 && (sockevent & ~CBB_SOCKET_EVENT_VALID_MASK) == 0) { /* * If anything has happened to the socket, we assume that the * card is no longer OK, and we shouldn't call its ISR. We * set cardok as soon as we've attached the card. This helps * in a noisy eject, which happens all too often when users * are ejecting their PC Cards. * * We use this method in preference to checking to see if the * card is still there because the check suffers from a race * condition in the bouncing case. */ #define DELTA (CBB_SOCKET_MASK_CD) if (sockevent & DELTA) { cbb_clrb(sc, CBB_SOCKET_MASK, DELTA); cbb_set(sc, CBB_SOCKET_EVENT, DELTA); sc->cardok = 0; cbb_disable_func_intr(sc); wakeup(&sc->intrhand); } #undef DELTA /* * Wakeup anybody waiting for a power interrupt. We have to * use atomic_add_int for wakups on other cores. */ if (sockevent & CBB_SOCKET_EVENT_POWER) { cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_EVENT_POWER); cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_POWER); atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); } /* * Status change interrupts aren't presently used in the * rest of the driver. For now, just ACK them. */ if (sockevent & CBB_SOCKET_EVENT_CSTS) cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_CSTS); retval = FILTER_HANDLED; } return retval; } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static struct resource * cbb_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); return (cbb_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int cbb_pci_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct cbb_softc *sc; sc = device_get_softc(bus); if (rman_get_type(r) == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int -cbb_pci_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +cbb_pci_release_resource(device_t bus, device_t child, struct resource *r) { struct cbb_softc *sc; int error; sc = device_get_softc(bus); - if (type == PCI_RES_BUS) { + if (rman_get_type(r) == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); if (rman_get_flags(r) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, rid, r); + error = bus_deactivate_resource(child, r); if (error) return (error); } return (rman_release_resource(r)); } - return (cbb_release_resource(bus, child, type, rid, r)); + return (cbb_release_resource(bus, child, r)); } #endif /************************************************************************/ /* PCI compat methods */ /************************************************************************/ static int cbb_maxslots(device_t brdev) { return (0); } static uint32_t cbb_read_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ return (PCIB_READ_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, width)); } static void cbb_write_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, val, width); } static int cbb_pci_suspend(device_t brdev) { int error = 0; struct cbb_softc *sc = device_get_softc(brdev); error = bus_generic_suspend(brdev); if (error != 0) return (error); cbb_set(sc, CBB_SOCKET_MASK, 0); /* Quiet hardware */ sc->cardok = 0; /* Card is bogus now */ return (0); } static int cbb_pci_resume(device_t brdev) { int error = 0; struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); uint32_t tmp; /* * In the APM and early ACPI era, BIOSes saved the PCI config * registers. As chips became more complicated, that functionality moved * into the ACPI code / tables. We must therefore, restore the settings * we made here to make sure the device come back. Transitions to Dx * from D0 and back to D0 cause the bridge to lose its config space, so * all the bus mappings and such are preserved. * * The PCI layer handles standard PCI registers like the * command register and BARs, but cbb-specific registers are * handled here. */ sc->chipinit(sc); /* reset interrupt -- Do we really need to do this? */ tmp = cbb_get(sc, CBB_SOCKET_EVENT); cbb_set(sc, CBB_SOCKET_EVENT, tmp); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* Signal the thread to wakeup. */ wakeup(&sc->intrhand); error = bus_generic_resume(brdev); return (error); } static device_method_t cbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cbb_pci_probe), DEVMETHOD(device_attach, cbb_pci_attach), DEVMETHOD(device_detach, cbb_pci_detach), DEVMETHOD(device_shutdown, cbb_pci_shutdown), DEVMETHOD(device_suspend, cbb_pci_suspend), DEVMETHOD(device_resume, cbb_pci_resume), /* bus methods */ DEVMETHOD(bus_read_ivar, cbb_read_ivar), DEVMETHOD(bus_write_ivar, cbb_write_ivar), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_alloc_resource, cbb_pci_alloc_resource), DEVMETHOD(bus_adjust_resource, cbb_pci_adjust_resource), DEVMETHOD(bus_release_resource, cbb_pci_release_resource), #else DEVMETHOD(bus_alloc_resource, cbb_alloc_resource), DEVMETHOD(bus_release_resource, cbb_release_resource), #endif DEVMETHOD(bus_activate_resource, cbb_activate_resource), DEVMETHOD(bus_deactivate_resource, cbb_deactivate_resource), DEVMETHOD(bus_driver_added, cbb_driver_added), DEVMETHOD(bus_child_detached, cbb_child_detached), DEVMETHOD(bus_setup_intr, cbb_setup_intr), DEVMETHOD(bus_teardown_intr, cbb_teardown_intr), DEVMETHOD(bus_child_present, cbb_child_present), /* 16-bit card interface */ DEVMETHOD(card_set_res_flags, cbb_pcic_set_res_flags), DEVMETHOD(card_set_memory_offset, cbb_pcic_set_memory_offset), /* power interface */ DEVMETHOD(power_enable_socket, cbb_power_enable_socket), DEVMETHOD(power_disable_socket, cbb_power_disable_socket), /* pcib compatibility interface */ DEVMETHOD(pcib_maxslots, cbb_maxslots), DEVMETHOD(pcib_read_config, cbb_read_config), DEVMETHOD(pcib_write_config, cbb_write_config), DEVMETHOD(pcib_route_interrupt, cbb_route_interrupt), DEVMETHOD_END }; static driver_t cbb_driver = { "cbb", cbb_methods, sizeof(struct cbb_softc) }; DRIVER_MODULE(cbb, pci, cbb_driver, 0, 0); MODULE_PNP_INFO("W32:vendor/device;D:#", pci, cbb, yc_chipsets, nitems(yc_chipsets) - 1); MODULE_DEPEND(cbb, exca, 1, 1, 1); diff --git a/sys/dev/pccbb/pccbbvar.h b/sys/dev/pccbb/pccbbvar.h index 5d6529070a12..95ffc06ef439 100644 --- a/sys/dev/pccbb/pccbbvar.h +++ b/sys/dev/pccbb/pccbbvar.h @@ -1,166 +1,166 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000,2001 Jonathan Chen All rights reserved. * Copyright (c) 2003-2004 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Structure definitions for the Cardbus Bridge driver */ struct cbb_intrhand { driver_filter_t *filt; driver_intr_t *intr; void *arg; struct cbb_softc *sc; void *cookie; }; struct cbb_reslist { SLIST_ENTRY(cbb_reslist) link; struct resource *res; int type; int rid; /* note: unlike the regular resource list, there can be * duplicate rid's in the same list. However, the * combination of rid and res->r_dev should be unique. */ bus_addr_t cardaddr; /* for 16-bit pccard memory */ }; #define CBB_AUTO_OPEN_SMALLHOLE 0x100 struct cbb_softc { device_t dev; struct exca_softc exca; struct resource *base_res; struct resource *irq_res; void *intrhand; bus_space_tag_t bst; bus_space_handle_t bsh; uint32_t domain; unsigned int pribus; struct pcib_secbus bus; struct mtx mtx; int cardok; u_int32_t flags; #define CBB_16BIT_CARD 0x20000000 #define CBB_KTHREAD_RUNNING 0x40000000 #define CBB_KTHREAD_DONE 0x80000000 int chipset; /* chipset id */ #define CB_UNKNOWN 0 /* NOT Cardbus-PCI bridge */ #define CB_TI113X 1 /* TI PCI1130/1131 */ #define CB_TI12XX 2 /* TI PCI12xx/14xx/44xx/15xx/45xx */ #define CB_TI125X 3 /* TI PCI1250/1251(B)/1450 */ #define CB_RF5C47X 4 /* RICOH RF5C475/476/477 */ #define CB_RF5C46X 5 /* RICOH RF5C465/466/467 */ #define CB_CIRRUS 6 /* Cirrus Logic CLPD683x */ #define CB_TOPIC95 7 /* Toshiba ToPIC95 */ #define CB_TOPIC97 8 /* Toshiba ToPIC97/100 */ #define CB_O2MICRO 9 /* O2Micro chips */ SLIST_HEAD(, cbb_reslist) rl; device_t cbdev; struct proc *event_thread; void (*chipinit)(struct cbb_softc *); int powerintr; struct root_hold_token *sc_root_token; }; /* result of detect_card */ #define CARD_UKN_CARD 0x00 #define CARD_5V_CARD 0x01 #define CARD_3V_CARD 0x02 #define CARD_XV_CARD 0x04 #define CARD_YV_CARD 0x08 /* for power_socket */ #define CARD_VCC(X) (X) #define CARD_VPP_VCC 0xf0 #define CARD_VCCMASK 0xf #define CARD_VCCSHIFT 0 #define XV 2 #define YV 1 #define CARD_OFF (CARD_VCC(0)) extern int cbb_debug; int cbb_activate_resource(device_t brdev, device_t child, struct resource *r); struct resource *cbb_alloc_resource(device_t brdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); void cbb_child_detached(device_t brdev, device_t child); int cbb_child_present(device_t parent, device_t child); int cbb_deactivate_resource(device_t brdev, device_t child, struct resource *r); int cbb_detach(device_t brdev); void cbb_disable_func_intr(struct cbb_softc *sc); void cbb_driver_added(device_t brdev, driver_t *driver); void cbb_event_thread(void *arg); int cbb_pcic_set_memory_offset(device_t brdev, device_t child, int rid, uint32_t cardaddr, uint32_t *deltap); int cbb_pcic_set_res_flags(device_t brdev, device_t child, int type, int rid, u_long flags); int cbb_power(device_t brdev, int volts); int cbb_power_enable_socket(device_t brdev, device_t child); int cbb_power_disable_socket(device_t brdev, device_t child); int cbb_read_ivar(device_t brdev, device_t child, int which, uintptr_t *result); int cbb_release_resource(device_t brdev, device_t child, - int type, int rid, struct resource *r); + struct resource *r); int cbb_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); int cbb_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int cbb_write_ivar(device_t brdev, device_t child, int which, uintptr_t value); /* */ static __inline void cbb_set(struct cbb_softc *sc, uint32_t reg, uint32_t val) { bus_space_write_4(sc->bst, sc->bsh, reg, val); } static __inline uint32_t cbb_get(struct cbb_softc *sc, uint32_t reg) { return (bus_space_read_4(sc->bst, sc->bsh, reg)); } static __inline void cbb_setb(struct cbb_softc *sc, uint32_t reg, uint32_t bits) { cbb_set(sc, reg, cbb_get(sc, reg) | bits); } static __inline void cbb_clrb(struct cbb_softc *sc, uint32_t reg, uint32_t bits) { cbb_set(sc, reg, cbb_get(sc, reg) & ~bits); } diff --git a/sys/dev/pci/hostb_pci.c b/sys/dev/pci/hostb_pci.c index 20e18a0e5f1c..3f6abf31b236 100644 --- a/sys/dev/pci/hostb_pci.c +++ b/sys/dev/pci/hostb_pci.c @@ -1,288 +1,287 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 1997, Stefan Esser * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include /* * Provide a device to "eat" the host->pci bridge devices that show up * on PCI buses and stop them showing up twice on the probes. This also * stops them showing up as 'none' in pciconf -l. If the host bridge * provides an AGP capability then we create a child agp device for the * agp GART driver to attach to. */ static int pci_hostb_probe(device_t dev) { u_int32_t id; id = pci_get_devid(dev); switch (id) { /* VIA VT82C596 Power Management Function */ case 0x30501106: return (ENXIO); default: break; } if (pci_get_class(dev) == PCIC_BRIDGE && pci_get_subclass(dev) == PCIS_BRIDGE_HOST) { device_set_desc(dev, "Host to PCI bridge"); device_quiet(dev); return (-10000); } return (ENXIO); } static int pci_hostb_attach(device_t dev) { bus_generic_probe(dev); /* * If AGP capabilities are present on this device, then create * an AGP child. */ if (pci_find_cap(dev, PCIY_AGP, NULL) == 0) device_add_child(dev, "agp", -1); bus_generic_attach(dev); return (0); } /* Bus interface. */ static int pci_hostb_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } static int pci_hostb_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct resource * pci_hostb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, start, end, count, flags)); } static int -pci_hostb_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pci_hostb_release_resource(device_t dev, device_t child, struct resource *r) { - return (bus_release_resource(dev, type, rid, r)); + return (bus_release_resource(dev, r)); } /* PCI interface. */ static uint32_t pci_hostb_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } static void pci_hostb_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } static int pci_hostb_enable_busmaster(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_enable_busmaster\n", device_get_nameunit(child)); return (pci_enable_busmaster(dev)); } static int pci_hostb_disable_busmaster(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_disable_busmaster\n", device_get_nameunit(child)); return (pci_disable_busmaster(dev)); } static int pci_hostb_enable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_enable_io\n", device_get_nameunit(child)); return (pci_enable_io(dev, space)); } static int pci_hostb_disable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_disable_io\n", device_get_nameunit(child)); return (pci_disable_io(dev, space)); } static int pci_hostb_set_powerstate(device_t dev, device_t child, int state) { device_printf(dev, "child %s requested pci_set_powerstate\n", device_get_nameunit(child)); return (pci_set_powerstate(dev, state)); } static int pci_hostb_get_powerstate(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_get_powerstate\n", device_get_nameunit(child)); return (pci_get_powerstate(dev)); } static int pci_hostb_assign_interrupt(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_assign_interrupt\n", device_get_nameunit(child)); return (PCI_ASSIGN_INTERRUPT(device_get_parent(dev), dev)); } static int pci_hostb_find_cap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_cap(dev, capability, capreg)); } static int pci_hostb_find_next_cap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_cap(dev, capability, start, capreg)); } static int pci_hostb_find_extcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_extcap(dev, capability, capreg)); } static int pci_hostb_find_next_extcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_extcap(dev, capability, start, capreg)); } static int pci_hostb_find_htcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_htcap(dev, capability, capreg)); } static int pci_hostb_find_next_htcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_htcap(dev, capability, start, capreg)); } static device_method_t pci_hostb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_hostb_probe), DEVMETHOD(device_attach, pci_hostb_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, pci_hostb_read_ivar), DEVMETHOD(bus_write_ivar, pci_hostb_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, pci_hostb_alloc_resource), DEVMETHOD(bus_release_resource, pci_hostb_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), /* PCI interface */ DEVMETHOD(pci_read_config, pci_hostb_read_config), DEVMETHOD(pci_write_config, pci_hostb_write_config), DEVMETHOD(pci_enable_busmaster, pci_hostb_enable_busmaster), DEVMETHOD(pci_disable_busmaster, pci_hostb_disable_busmaster), DEVMETHOD(pci_enable_io, pci_hostb_enable_io), DEVMETHOD(pci_disable_io, pci_hostb_disable_io), DEVMETHOD(pci_get_powerstate, pci_hostb_get_powerstate), DEVMETHOD(pci_set_powerstate, pci_hostb_set_powerstate), DEVMETHOD(pci_assign_interrupt, pci_hostb_assign_interrupt), DEVMETHOD(pci_find_cap, pci_hostb_find_cap), DEVMETHOD(pci_find_next_cap, pci_hostb_find_next_cap), DEVMETHOD(pci_find_extcap, pci_hostb_find_extcap), DEVMETHOD(pci_find_next_extcap, pci_hostb_find_next_extcap), DEVMETHOD(pci_find_htcap, pci_hostb_find_htcap), DEVMETHOD(pci_find_next_htcap, pci_hostb_find_next_htcap), { 0, 0 } }; static driver_t pci_hostb_driver = { "hostb", pci_hostb_methods, 1, }; DRIVER_MODULE(hostb, pci, pci_hostb_driver, 0, 0); diff --git a/sys/dev/pci/isa_pci.c b/sys/dev/pci/isa_pci.c index a703a9e76169..c8ad277edb41 100644 --- a/sys/dev/pci/isa_pci.c +++ b/sys/dev/pci/isa_pci.c @@ -1,242 +1,240 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PCI:ISA bridge support */ #include #include #include #include #include #include #include #include #include #include #include static int isab_pci_probe(device_t dev); static int isab_pci_attach(device_t dev); static struct resource * isab_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int isab_pci_release_resource(device_t dev, device_t child, - int type, int rid, struct resource *r); + struct resource *r); static device_method_t isab_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isab_pci_probe), DEVMETHOD(device_attach, isab_pci_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, isab_pci_alloc_resource), DEVMETHOD(bus_release_resource, isab_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; struct isab_pci_resource { struct resource *ip_res; int ip_refs; }; struct isab_pci_softc { struct isab_pci_resource isab_pci_res[PCIR_MAX_BAR_0 + 1]; }; static driver_t isab_driver = { "isab", isab_methods, sizeof(struct isab_pci_softc), }; DRIVER_MODULE(isab, pci, isab_driver, 0, 0); /* * XXX we need to add a quirk list here for bridges that don't correctly * report themselves. */ static int isab_pci_probe(device_t dev) { int matched = 0; /* * Try for a generic match based on class/subclass. */ if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_ISA)) { matched = 1; } else { /* * These are devices that we *know* are PCI:ISA bridges. * Sometimes, however, they don't report themselves as * such. Check in case one of them is pretending to be * something else. */ switch (pci_get_devid(dev)) { case 0x04848086: /* Intel 82378ZB/82378IB */ case 0x122e8086: /* Intel 82371FB */ case 0x70008086: /* Intel 82371SB */ case 0x71108086: /* Intel 82371AB */ case 0x71988086: /* Intel 82443MX */ case 0x24108086: /* Intel 82801AA (ICH) */ case 0x24208086: /* Intel 82801AB (ICH0) */ case 0x24408086: /* Intel 82801AB (ICH2) */ case 0x00061004: /* VLSI 82C593 */ case 0x05861106: /* VIA 82C586 */ case 0x05961106: /* VIA 82C596 */ case 0x06861106: /* VIA 82C686 */ case 0x153310b9: /* AcerLabs M1533 */ case 0x154310b9: /* AcerLabs M1543 */ case 0x00081039: /* SiS 85c503 */ case 0x00001078: /* Cyrix Cx5510 */ case 0x01001078: /* Cyrix Cx5530 */ case 0xc7001045: /* OPTi 82C700 (FireStar) */ case 0x886a1060: /* UMC UM8886 ISA */ case 0x02001166: /* ServerWorks IB6566 PCI */ if (bootverbose) printf("PCI-ISA bridge with incorrect subclass 0x%x\n", pci_get_subclass(dev)); matched = 1; break; default: break; } } if (matched) { device_set_desc(dev, "PCI-ISA bridge"); return(-10000); } return(ENXIO); } static int isab_pci_attach(device_t dev) { bus_generic_probe(dev); return (isab_attach(dev)); } static struct resource * isab_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct isab_pci_softc *sc; int bar; if (device_get_parent(child) != dev) return bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we cache the resource so that we only allocate it * from the PCI bus once. */ bar = PCI_RID2BAR(*rid); if (bar < 0 || bar > PCIR_MAX_BAR_0) return (NULL); sc = device_get_softc(dev); if (sc->isab_pci_res[bar].ip_res == NULL) sc->isab_pci_res[bar].ip_res = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (sc->isab_pci_res[bar].ip_res != NULL) sc->isab_pci_res[bar].ip_refs++; return (sc->isab_pci_res[bar].ip_res); } return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } static int -isab_pci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +isab_pci_release_resource(device_t dev, device_t child, struct resource *r) { struct isab_pci_softc *sc; int bar, error; if (device_get_parent(child) != dev) - return bus_generic_release_resource(dev, child, type, rid, r); + return bus_generic_release_resource(dev, child, r); - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we release the resource from the PCI bus * when the last child reference goes away. */ - bar = PCI_RID2BAR(rid); + bar = PCI_RID2BAR(rman_get_rid(r)); if (bar < 0 || bar > PCIR_MAX_BAR_0) return (EINVAL); sc = device_get_softc(dev); if (sc->isab_pci_res[bar].ip_res == NULL) return (EINVAL); KASSERT(sc->isab_pci_res[bar].ip_res == r, ("isa_pci resource mismatch")); if (sc->isab_pci_res[bar].ip_refs > 1) { sc->isab_pci_res[bar].ip_refs--; return (0); } KASSERT(sc->isab_pci_res[bar].ip_refs > 0, ("isa_pci resource reference count underflow")); - error = bus_release_resource(dev, type, rid, r); + error = bus_release_resource(dev, r); if (error == 0) { sc->isab_pci_res[bar].ip_res = NULL; sc->isab_pci_res[bar].ip_refs = 0; } return (error); } - return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, - rid, r)); + return (bus_generic_release_resource(dev, child, r)); } diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index 2797bded62fd..cbab4b50392f 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -1,6838 +1,6835 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_iommu.h" #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static int pci_probe(device_t dev); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq); static void pci_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static int pci_reset_post(device_t dev, device_t child); static int pci_reset_prepare(device_t dev, device_t child); static int pci_reset_child(device_t dev, device_t child, int flags); static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *rid); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_reset_prepare, pci_reset_prepare), DEVMETHOD(bus_reset_post, pci_reset_post), DEVMETHOD(bus_reset_child, pci_reset_child), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method), DEVMETHOD(bus_child_location, pci_child_location_method), DEVMETHOD(bus_get_device_path, pci_get_device_path_method), DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_id, pci_get_id_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_modevent, NULL, BUS_PASS_BUS); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ /* * HPE Gen 10 VGA has a memory range that can't be allocated in the * expected place. */ { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 }, { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 1; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means" " disable. 1 means conservatively place devices into D3 state. 2 means" " aggressively place devices into D3 state. 3 means put absolutely" " everything in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_msix_rewrite_table = 0; SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN, &pci_msix_rewrite_table, 0, "Rewrite entire MSI-X table when updating MSI-X entries"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers. Disable this if you depend on" " BIOS emulation of USB devices, that is you use USB devices (like" " keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); #endif static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); int pci_enable_aspm = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm, 0, "Enable support for PCIe Active State Power Management"); static int pci_clear_aer_on_attach = 0; SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN, &pci_clear_aer_on_attach, 0, "Clear port and device AER state on driver attach"); static bool pci_enable_mps_tune = true; SYSCTL_BOOL(_hw_pci, OID_AUTO, enable_mps_tune, CTLFLAG_RWTUN, &pci_enable_mps_tune, 1, "Enable tuning of MPS(maximum payload size)." ); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo = NULL; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { break; } } return (dinfo != NULL ? dinfo->cfg.dev : NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t from) { struct pci_devinfo *dinfo; bool found = false; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (from != NULL && found == false) { if (from != dinfo->cfg.dev) continue; found = true; continue; } if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); if (vid == PCIV_INVALID) return (NULL); did = REG(PCIR_DEVICE, 2); return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ if (cfg->pp.pp_cap == 0) { cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; if ((nextptr - ptr) > PCIR_POWER_DATA) cfg->pp.pp_data = ptr + PCIR_POWER_DATA; } break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & PCIM_MSICTRL_MMC_MASK)>>1); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; /* return 0 and one byte in *data if no read error, -1 else */ static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (-1); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } /* return 0 on match, -1 and "unget" byte on no match */ static int vpd_expectbyte(struct vpd_readstate *vrs, uint8_t expected) { uint8_t data; if (vpd_nextbyte(vrs, &data) != 0) return (-1); if (data == expected) return (0); vrs->cksum -= data; vrs->val = (vrs->val << 8) + data; vrs->bytesinval++; return (-1); } /* return size if tag matches, -1 on no match, -2 on read error */ static int vpd_read_tag_size(struct vpd_readstate *vrs, uint8_t vpd_tag) { uint8_t byte1, byte2; if (vpd_expectbyte(vrs, vpd_tag) != 0) return (-1); if ((vpd_tag & 0x80) == 0) return (vpd_tag & 0x07); if (vpd_nextbyte(vrs, &byte1) != 0) return (-2); if (vpd_nextbyte(vrs, &byte2) != 0) return (-2); return ((byte2 << 8) + byte1); } /* (re)allocate buffer in multiples of 8 elements */ static void* alloc_buffer(void* buffer, size_t element_size, int needed) { int alloc, new_alloc; alloc = roundup2(needed, 8); new_alloc = roundup2(needed + 1, 8); if (alloc != new_alloc) { buffer = reallocf(buffer, new_alloc * element_size, M_DEVBUF, M_WAITOK | M_ZERO); } return (buffer); } /* read VPD keyword and return element size, return -1 on read error */ static int vpd_read_elem_head(struct vpd_readstate *vrs, char keyword[2]) { uint8_t data; if (vpd_nextbyte(vrs, &keyword[0]) != 0) return (-1); if (vpd_nextbyte(vrs, &keyword[1]) != 0) return (-1); if (vpd_nextbyte(vrs, &data) != 0) return (-1); return (data); } /* read VPD data element of given size into allocated buffer */ static char * vpd_read_value(struct vpd_readstate *vrs, int size) { int i; char char1; char *value; value = malloc(size + 1, M_DEVBUF, M_WAITOK); for (i = 0; i < size; i++) { if (vpd_nextbyte(vrs, &char1) != 0) { free(value, M_DEVBUF); return (NULL); } value[i] = char1; } value[size] = '\0'; return (value); } /* read VPD into *keyword and *value, return length of data element */ static int vpd_read_elem_data(struct vpd_readstate *vrs, char keyword[2], char **value, int maxlen) { int len; len = vpd_read_elem_head(vrs, keyword); if (len > maxlen) return (-1); *value = vpd_read_value(vrs, len); return (len); } /* subtract all data following first byte from checksum of RV element */ static void vpd_fixup_cksum(struct vpd_readstate *vrs, char *rvstring, int len) { int i; uint8_t fixup; fixup = 0; for (i = 1; i < len; i++) fixup += rvstring[i]; vrs->cksum -= fixup; } /* fetch one read-only element and return size of heading + data */ static size_t next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_readonly *vpd_ros; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt); vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt]; maxsize -= 3; len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize); if (vpd_ros->value == NULL) return (-1); vpd_ros->len = len; if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') { vpd_fixup_cksum(vrs, vpd_ros->value, len); if (vrs->cksum != 0) { pci_printf(cfg, "invalid VPD checksum %#hhx\n", vrs->cksum); return (-1); } } vpd->vpd_rocnt++; return (len + 3); } /* fetch one writable element and return size of heading + data */ static size_t next_vpd_rw_elem(struct vpd_readstate *vrs, int maxsize) { struct pcicfg_vpd *vpd; pcicfgregs *cfg; struct vpd_write *vpd_w; int len; cfg = vrs->cfg; vpd = &cfg->vpd; if (maxsize < 3) return (-1); vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt); if (vpd->vpd_w == NULL) { pci_printf(cfg, "out of memory"); return (-1); } vpd_w = &vpd->vpd_w[vpd->vpd_wcnt]; maxsize -= 3; vpd_w->start = vrs->off + 3 - vrs->bytesinval; len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize); if (vpd_w->value == NULL) return (-1); vpd_w->len = len; vpd->vpd_wcnt++; return (len + 3); } /* free all memory allocated for VPD data */ static void vpd_free(struct pcicfg_vpd *vpd) { int i; free(vpd->vpd_ident, M_DEVBUF); for (i = 0; i < vpd->vpd_rocnt; i++) free(vpd->vpd_ros[i].value, M_DEVBUF); free(vpd->vpd_ros, M_DEVBUF); vpd->vpd_rocnt = 0; for (i = 0; i < vpd->vpd_wcnt; i++) free(vpd->vpd_w[i].value, M_DEVBUF); free(vpd->vpd_w, M_DEVBUF); vpd->vpd_wcnt = 0; } #define VPD_TAG_END ((0x0f << 3) | 0) /* small tag, len == 0 */ #define VPD_TAG_IDENT (0x02 | 0x80) /* large tag */ #define VPD_TAG_RO (0x10 | 0x80) /* large tag */ #define VPD_TAG_RW (0x11 | 0x80) /* large tag */ static int pci_parse_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int cksumvalid; int size, elem_size; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; /* read VPD ident element - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_IDENT); if (size <= 0) { pci_printf(cfg, "no VPD ident found\n"); return (0); } cfg->vpd.vpd_ident = vpd_read_value(&vrs, size); if (cfg->vpd.vpd_ident == NULL) { pci_printf(cfg, "error accessing VPD ident data\n"); return (0); } /* read VPD RO elements - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_RO); if (size <= 0) { pci_printf(cfg, "no read-only VPD data found\n"); return (0); } while (size > 0) { elem_size = next_vpd_ro_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing read-only VPD data\n"); return (-1); } size -= elem_size; } cksumvalid = (vrs.cksum == 0); if (!cksumvalid) return (-1); /* read VPD RW elements - optional */ size = vpd_read_tag_size(&vrs, VPD_TAG_RW); if (size == -2) return (-1); while (size > 0) { elem_size = next_vpd_rw_elem(&vrs, size); if (elem_size < 0) { pci_printf(cfg, "error accessing writeable VPD data\n"); return (-1); } size -= elem_size; } /* read empty END tag - mandatory */ size = vpd_read_tag_size(&vrs, VPD_TAG_END); if (size != 0) { pci_printf(cfg, "No valid VPD end tag found\n"); } return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { int status; status = pci_parse_vpd(pcib, cfg); if (status < 0) vpd_free(&cfg->vpd); cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; } return (ENOENT); } /* * Find the next requested HyperTransport capability after start and return * the offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { int ptr; uint16_t val; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT, ("start capability is not HyperTransport capability")); ptr = start; /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t status; uint8_t ptr; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. */ while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the next requested capability after start and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg) { uint8_t ptr; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability, ("start capability is not expected capability")); ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1); while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Find the next requested extended capability after start and return the * offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ecap = pci_read_config(child, start, 4); KASSERT(PCI_EXTCAP_ID(ecap) == capability, ("start extended capability is not expected capability")); ptr = PCI_EXTCAP_NEXTPTR(ecap); while (ptr != 0) { ecap = pci_read_config(child, ptr, 4); if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); } void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { if (pci_msix_rewrite_table) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; /* * Some VM hosts require MSIX to be disabled in the * control register before updating the MSIX table * entries are allowed. It is not enough to only * disable MSIX while updating a single entry. MSIX * must be disabled while updating all entries in the * table. */ pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); pci_resume_msix(child); } else pci_write_msix_entry(child, index, address, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_msgnum > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val |= PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val &= ~PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; int i; if (msix->msix_alloc > 0) { /* First, mask all vectors. */ for (i = 0; i < msix->msix_msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_write_msix_entry(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irq, max; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, cfg->msix.msix_msgnum); max = min(*count, cfg->msix.msix_msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { int run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = 0; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = 1; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = 0; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* Mask all vectors. */ for (i = 0; i < cfg->msix.msix_msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, cfg->msix.msix_ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i, irq, j, *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count == 0 || count > msix->msix_msgnum) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = 1; for (i = 0; i < msix->msix_alloc - 1; i++) if (used[i] == 0 && used[i + 1] == 1) { free(used, M_DEVBUF); return (EINVAL); } if (used[0] != 1) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (used[j] == 0) { struct msix_vector *vec; while (used[j] == 0) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_msgnum); return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_relaxed_ordering_enabled(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_RELAXED_ORD_ENABLE; return (val != 0); } int pci_get_max_payload(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_PAYLOAD; val >>= 5; return (1 << (val + 7)); } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; int error, i, j; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { bool found = false; for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } found = true; } } return (found ? 0 : ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irqs[32]; uint16_t ctrl; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%d supported)\n", *count, cfg->msi.msi_msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, cfg->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { int run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl = cfg->msi.msi_ctrl; ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; int error, i, irqs[32]; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; if (pci_do_msi && msi->msi_location != 0) return (msi->msi_msgnum); return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) vpd_free(&dinfo->cfg.vpd); STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_cap == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, state); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_cap != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_cap) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { int ctrl; ctrl = cfg->msi.msi_ctrl; printf("\tMSI supports %d message%s%s%s\n", cfg->msi.msi_msgnum, (cfg->msi.msi_msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { printf("\tMSI-X supports %d message%s ", cfg->msix.msix_msgnum, (cfg->msix.msix_msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. * * NB: according to the PCI Local Bus Specification, rev. 3.0: * "Software writes 0FFFFFFFFh to both registers, reads them back, * and combines the result into a 64-bit value." (section 6.2.5.1) * * Writes to both registers must be performed before attempting to * read back the size value. */ testval = 0; pci_write_config(dev, reg, 0xffffffff, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } testval |= pci_read_config(dev, reg, 4); /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } struct pci_map * pci_first_bar(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); return (STAILQ_FIRST(&dinfo->cfg.maps)); } struct pci_map * pci_next_bar(struct pci_map *pm) { return (STAILQ_NEXT(pm, pm_link)); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); #ifdef PCI_IOV if ((dinfo->cfg.flags & PCICFG_VF) != 0) { struct pcicfg_iov *iov; iov = dinfo->cfg.iov; cmd = pci_read_config(iov->iov_pf, iov->iov_pos + PCIR_SRIOV_CTL, 2); return ((cmd & PCIM_SRIOV_VF_MSE) != 0); } #endif cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable buses look better when * all resources are allocated, so allow '0' to be overridden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if ((pci_do_realloc_bars || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR)) && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } #endif static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { #ifdef __PCI_REROUTE_INTERRUPT /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); #else pci_assign_interrupt(bus, dev, 0); #endif } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); #endif } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); /* If function 0 is not present, skip to the next slot. */ if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == PCIV_INVALID) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *vf_dinfo; device_t pcib; int busno, slot, func; pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif /* * For PCIe device set Max_Payload_Size to match PCIe root's. */ static void pcie_setup_mps(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); device_t root; uint16_t rmps, mmps, mps; if (dinfo->cfg.pcie.pcie_location == 0) return; root = pci_find_pcie_root_port(dev); if (root == NULL) return; /* Check whether the MPS is already configured. */ rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; mps = pcie_read_config(dev, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; if (mps == rmps) return; /* Check whether the device is capable of the root's MPS. */ mmps = (pcie_read_config(dev, PCIER_DEVICE_CAP, 2) & PCIEM_CAP_MAX_PAYLOAD) << 5; if (rmps > mmps) { /* * The device is unable to handle root's MPS. Limit root. * XXX: We should traverse through all the tree, applying * it to all the devices. */ pcie_adjust_config(root, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, mmps, 2); } else { pcie_adjust_config(dev, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, rmps, 2); } } static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo) { int aer; uint32_t r; uint16_t r2; if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, 2); r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR | PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, r2, 2); } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER UC 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4); r &= ~(PCIM_AER_UC_TRAINING_ERROR | PCIM_AER_UC_DL_PROTOCOL_ERROR | PCIM_AER_UC_SURPRISE_LINK_DOWN | PCIM_AER_UC_POISONED_TLP | PCIM_AER_UC_FC_PROTOCOL_ERROR | PCIM_AER_UC_COMPLETION_TIMEOUT | PCIM_AER_UC_COMPLETER_ABORT | PCIM_AER_UC_UNEXPECTED_COMPLETION | PCIM_AER_UC_RECEIVER_OVERFLOW | PCIM_AER_UC_MALFORMED_TLP | PCIM_AER_UC_ECRC_ERROR | PCIM_AER_UC_UNSUPPORTED_REQUEST | PCIM_AER_UC_ACS_VIOLATION | PCIM_AER_UC_INTERNAL_ERROR | PCIM_AER_UC_MC_BLOCKED_TLP | PCIM_AER_UC_ATOMIC_EGRESS_BLK | PCIM_AER_UC_TLP_PREFIX_BLOCKED); pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER COR 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4); r &= ~(PCIM_AER_COR_RECEIVER_ERROR | PCIM_AER_COR_BAD_TLP | PCIM_AER_COR_BAD_DLLP | PCIM_AER_COR_REPLAY_ROLLOVER | PCIM_AER_COR_REPLAY_TIMEOUT | PCIM_AER_COR_ADVISORY_NF_ERROR | PCIM_AER_COR_INTERNAL_ERROR | PCIM_AER_COR_HEADER_LOG_OVFLOW); pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4); r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2); r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE; pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, r, 2); } } void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { device_t dev; dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1); device_set_ivars(dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dev, dinfo, 0); pci_cfg_restore(dev, dinfo); pci_print_verbose(dinfo); pci_add_resources(bus, dev, 0, 0); if (pci_enable_mps_tune) pcie_setup_mps(dev); pci_child_added(dinfo->cfg.dev); if (pci_clear_aer_on_attach) pci_add_child_clear_aer(dev, dinfo); EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } #endif if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * buses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); return (bus_generic_attach(dev)); } int pci_detach(device_t dev) { #ifdef PCI_RES_BUS struct pci_softc *sc; #endif int error; error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_RES_BUS sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); if (error) return (error); #endif return (device_delete_children(dev)); } static void pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp) { int line, unit; const char *at; char me1[24], me2[32]; uint8_t b, s, f; uint32_t d; device_location_cache_t *cache; d = pci_get_domain(child); b = pci_get_bus(child); s = pci_get_slot(child); f = pci_get_function(child); snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f); snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f); line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { resource_string_value(name, unit, "at", &at); if (strcmp(at, me1) == 0 || strcmp(at, me2) == 0) { *unitp = unit; break; } if (dev_wired_cache_match(cache, child, at)) { *unitp = unit; break; } } dev_wired_cache_fini(cache); } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) { /* * Make sure this device's interrupt handler is not invoked * in the case the device uses a shared interrupt that can * be raised by some other device. * This is applicable only to regular (legacy) PCI interrupts * as MSI/MSI-X interrupts are never shared. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_suspend_intr(child, rle->res); pci_set_power_child(dev, child, PCI_POWERSTATE_D3); } return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); /* * Allow interrupts only after fully resuming the driver and hardware. */ if (pci_do_power_suspend) { /* See pci_suspend_child for details. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_resume_intr(child, rle->res); } return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } /* * The MSIX table entry must be made valid by * incrementing the mte_handlers before * calling pci_enable_msix() and * pci_resume_msix(). Else the MSIX rewrite * table quirk will not work as expected. */ mte->mte_handlers++; if (mte->mte_handlers == 1) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"}, {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"}, {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"}, {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { if (dinfo->cfg.msi.msi_alloc != 0) pci_printf(&dinfo->cfg, "Device leaked %d MSI " "vectors\n", dinfo->cfg.msi.msi_alloc); else pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " "vectors\n", dinfo->cfg.msix.msix_alloc); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); #ifdef PCI_RES_BUS if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); #endif pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND_FLAGS(pciregs, db_pci_dump, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; uint16_t cmd; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); /* Disable decoding via the CMD register before updating the BAR */ cmd = pci_read_config(child, PCIR_COMMAND, 2); pci_write_config(child, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); map = rman_get_start(res); pci_write_bar(child, pm, map); /* Restore the original value of the CMD register */ pci_write_config(child, PCIR_COMMAND, cmd, 2); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); #endif case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } #endif /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int -pci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pci_release_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg __unused; if (device_get_parent(child) != dev) - return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, - type, rid, r)); + return (bus_generic_release_resource(dev, child, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (cfg->flags & PCICFG_VF) { - switch (type) { + switch (rman_get_type(r)) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: - return (pci_vf_release_mem_resource(dev, child, rid, - r)); + return (pci_vf_release_mem_resource(dev, child, r)); } /* Fall through for other types of resource allocations. */ } #endif #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && - (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { - switch (rid) { + (rman_get_type(r) == SYS_RES_IOPORT || + rman_get_type(r) == SYS_RES_MEMORY)) { + switch (rman_get_rid(r)) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: - return (bus_generic_release_resource(dev, child, type, - rid, r)); + return (bus_generic_release_resource(dev, child, r)); } } #endif rl = &dinfo->resources; - return (resource_list_release(rl, dev, child, type, rid, r)); + return (resource_list_release(rl, dev, child, r)); } int pci_activate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; int error, rid, type; error = bus_generic_activate_resource(dev, child, r); if (error) return (error); /* Enable decoding in the command register when activating BARs. */ if (device_get_parent(child) == dev) { /* Device ROMs need their decoding explicitly enabled. */ dinfo = device_get_ivars(child); rid = rman_get_rid(r); type = rman_get_type(r); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } } return (error); } int pci_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; int error, rid, type; error = bus_generic_deactivate_resource(dev, child, r); if (error) return (error); /* Disable decoding for device ROMs. */ if (device_get_parent(child) == dev) { dinfo = device_get_ivars(child); rid = rman_get_rid(r); type = rman_get_type(r); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); } return (0); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; EVENTHANDLER_INVOKE(pci_delete_device, child); /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } #ifdef IOMMU bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { bus_dma_tag_t tag; struct pci_softc *sc; if (device_get_parent(dev) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, dev); } else tag = NULL; if (tag == NULL) { sc = device_get_softc(bus); tag = sc->sc_dma_tag; } return (tag); } #else bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } #endif uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int pci_child_location_method(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", pci_get_slot(child), pci_get_function(child), pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_get_device_path_method(device_t bus, device_t child, const char *locator, struct sbuf *sb) { device_t parent = device_get_parent(bus); int rv; if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { rv = bus_generic_get_device_path(parent, bus, locator, sb); if (rv == 0) { sbuf_printf(sb, "/Pci(0x%x,0x%x)", pci_get_slot(child), pci_get_function(child)); } return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); #endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); #endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { return (PCIB_GET_ID(device_get_parent(dev), child, type, id)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } /* * Wait for pending transactions to complete on a PCI-express function. * * The maximum delay is specified in milliseconds in max_delay. Note * that this function may sleep. * * Returns true if the function is idle and false if the timeout is * exceeded. If dev is not a PCI-express function, this returns true. */ bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t sta; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (true); sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); while (sta & PCIEM_STA_TRANSACTION_PND) { if (max_delay == 0) return (false); /* Poll once every 100 milliseconds up to the timeout. */ if (max_delay > 100) { pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK); max_delay -= 100; } else { pause_sbt("pcietp", max_delay * SBT_1MS, 0, C_HARDCLOCK); max_delay = 0; } sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); } return (true); } /* * Determine the maximum Completion Timeout in microseconds. * * For non-PCI-express functions this returns 0. */ int pcie_get_max_completion_timeout(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); /* * Functions using the 1.x spec use the default timeout range of * 50 microseconds to 50 milliseconds. Functions that do not * support programmable timeouts also use this range. */ if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) & PCIEM_CAP2_COMP_TIMO_RANGES) == 0) return (50 * 1000); switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) & PCIEM_CTL2_COMP_TIMO_VAL) { case PCIEM_CTL2_COMP_TIMO_100US: return (100); case PCIEM_CTL2_COMP_TIMO_10MS: return (10 * 1000); case PCIEM_CTL2_COMP_TIMO_55MS: return (55 * 1000); case PCIEM_CTL2_COMP_TIMO_210MS: return (210 * 1000); case PCIEM_CTL2_COMP_TIMO_900MS: return (900 * 1000); case PCIEM_CTL2_COMP_TIMO_3500MS: return (3500 * 1000); case PCIEM_CTL2_COMP_TIMO_13S: return (13 * 1000 * 1000); case PCIEM_CTL2_COMP_TIMO_64S: return (64 * 1000 * 1000); default: return (50 * 1000); } } void pcie_apei_error(device_t dev, int sev, uint8_t *aerp) { struct pci_devinfo *dinfo = device_get_ivars(dev); const char *s; int aer; uint32_t r, r1; uint16_t rs; if (sev == PCIEM_STA_CORRECTABLE_ERROR) s = "Correctable"; else if (sev == PCIEM_STA_NON_FATAL_ERROR) s = "Uncorrectable (Non-Fatal)"; else s = "Uncorrectable (Fatal)"; device_printf(dev, "%s PCIe error reported by APEI\n", s); if (aerp) { if (sev == PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_COR_STATUS); r1 = le32dec(aerp + PCIR_AER_COR_MASK); } else { r = le32dec(aerp + PCIR_AER_UC_STATUS); r1 = le32dec(aerp + PCIR_AER_UC_MASK); } device_printf(dev, "status 0x%08x mask 0x%08x", r, r1); if (sev != PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_UC_SEVERITY); rs = le16dec(aerp + PCIR_AER_CAP_CONTROL); printf(" severity 0x%08x first %d\n", r, rs & 0x1f); } else printf("\n"); } /* As kind of recovery just report and clear the error statuses. */ if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); device_printf(dev, "Clearing UC AER errors 0x%08x\n", r); } r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); device_printf(dev, "Clearing COR AER errors 0x%08x\n", r); } } if (dinfo->cfg.pcie.pcie_location != 0) { rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((rs & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, rs, 2); device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs); } } } /* * Perform a Function Level Reset (FLR) on a device. * * This function first waits for any pending transactions to complete * within the timeout specified by max_delay. If transactions are * still pending, the function will return false without attempting a * reset. * * If dev is not a PCI-express function or does not support FLR, this * function returns false. * * Note that no registers are saved or restored. The caller is * responsible for saving and restoring any registers including * PCI-standard registers via pci_save_state() and * pci_restore_state(). */ bool pcie_flr(device_t dev, u_int max_delay, bool force) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t cmd, ctl; int compl_delay; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (false); if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR)) return (false); /* * Disable busmastering to prevent generation of new * transactions while waiting for the device to go idle. If * the idle timeout fails, the command register is restored * which will re-enable busmastering. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2); if (!pcie_wait_for_pending_transactions(dev, max_delay)) { if (!force) { pci_write_config(dev, PCIR_COMMAND, cmd, 2); return (false); } pci_printf(&dinfo->cfg, "Resetting with transactions pending after %d ms\n", max_delay); /* * Extend the post-FLR delay to cover the maximum * Completion Timeout delay of anything in flight * during the FLR delay. Enforce a minimum delay of * at least 10ms. */ compl_delay = pcie_get_max_completion_timeout(dev) / 1000; if (compl_delay < 10) compl_delay = 10; } else compl_delay = 0; /* Initiate the reset. */ ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl | PCIEM_CTL_INITIATE_FLR, 2); /* Wait for 100ms. */ pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK); if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND) pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); return (true); } /* * Attempt a power-management reset by cycling the device in/out of D3 * state. PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ int pci_power_reset(device_t dev) { int ps; ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); return (0); } /* * Try link drop and retrain of the downstream port of upstream * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must * cause Conventional Hot reset of the device in the slot. * Alternative, for PCIe, could be the secondary bus reset initiatied * on the upstream switch PCIR_BRIDGECTL_1, bit 6. */ int pcie_link_reset(device_t port, int pcie_location) { uint16_t v; v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2); v |= PCIEM_LINK_CTL_LINK_DIS; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier1", mstosbt(20), 0, 0); v &= ~PCIEM_LINK_CTL_LINK_DIS; v |= PCIEM_LINK_CTL_RETRAIN_LINK; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */ v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2); return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0); } static int pci_reset_post(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_restore_state(child); return (0); } static int pci_reset_prepare(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_save_state(child); return (0); } static int pci_reset_child(device_t dev, device_t child, int flags) { int error; if (dev == NULL || device_get_parent(child) != dev) return (0); if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { if (!pcie_flr(child, 1000, false)) { error = BUS_RESET_PREPARE(dev, child); if (error == 0) pci_power_reset(child); BUS_RESET_POST(dev, child); } if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } return (error); } const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt) { bool match; uint16_t vendor, device, subvendor, subdevice, class, subclass, revid; vendor = pci_get_vendor(child); device = pci_get_device(child); subvendor = pci_get_subvendor(child); subdevice = pci_get_subdevice(child); class = pci_get_class(child); subclass = pci_get_subclass(child); revid = pci_get_revid(child); while (nelt-- > 0) { match = true; if (id->match_flag_vendor) match &= vendor == id->vendor; if (id->match_flag_device) match &= device == id->device; if (id->match_flag_subvendor) match &= subvendor == id->subvendor; if (id->match_flag_subdevice) match &= subdevice == id->subdevice; if (id->match_flag_class) match &= class == id->class_id; if (id->match_flag_subclass) match &= subclass == id->subclass; if (id->match_flag_revid) match &= revid == id->revid; if (match) return (id); id++; } return (NULL); } static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo) { const char *dev_name; device_t dev; dev = dinfo->cfg.dev; printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func); dev_name = device_get_name(dev); if (dev_name != NULL) printf(" (%s%d)", dev_name, device_get_unit(dev)); } void pci_print_faulted_dev(void) { struct pci_devinfo *dinfo; device_t dev; int aer, i; uint32_t r1, r2; uint16_t status; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status = pci_read_config(dev, PCIR_STATUS, 2); status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status != 0) { pci_print_faulted_dev_name(dinfo); printf(" error 0x%04x\n", status); } if (dinfo->cfg.pcie.pcie_location != 0) { status = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((status & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_print_faulted_dev_name(dinfo); printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n", pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2), status); } } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r1 != 0 || r2 != 0) { pci_print_faulted_dev_name(dinfo); printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n" " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n", r1, pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4), pci_read_config(dev, aer + PCIR_AER_UC_SEVERITY, 4), r2, pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4), pci_read_config(dev, aer + PCIR_AER_CAP_CONTROL, 4)); for (i = 0; i < 4; i++) { r1 = pci_read_config(dev, aer + PCIR_AER_HEADER_LOG + i * 4, 4); printf(" HL%d: 0x%08x\n", i, r1); } } } } } #ifdef DDB DB_SHOW_COMMAND_FLAGS(pcierr, pci_print_faulted_dev_db, DB_CMD_MEMSAFE) { pci_print_faulted_dev(); } static void db_clear_pcie_errors(const struct pci_devinfo *dinfo) { device_t dev; int aer; uint32_t r; dev = dinfo->cfg.dev; r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, r, 2); if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0) return; r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); } DB_COMMAND_FLAGS(pci_clearerr, db_pci_clearerr, DB_CMD_MEMSAFE) { struct pci_devinfo *dinfo; device_t dev; uint16_t status, status1; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status1 = status = pci_read_config(dev, PCIR_STATUS, 2); status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status1 != 0) { status &= ~status1; pci_write_config(dev, PCIR_STATUS, status, 2); } if (dinfo->cfg.pcie.pcie_location != 0) db_clear_pcie_errors(dinfo); } } #endif diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c index 5831b0f9a8d9..0bdc121f9727 100644 --- a/sys/dev/pci/pci_host_generic.c +++ b/sys/dev/pci/pci_host_generic.c @@ -1,742 +1,740 @@ /*- * Copyright (c) 2015, 2020 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #if defined(VM_MEMATTR_DEVICE_NP) #define PCI_UNMAPPED #define PCI_RF_FLAGS RF_UNMAPPED #else #define PCI_RF_FLAGS 0 #endif /* Forward prototypes */ static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); int pci_host_generic_core_attach(device_t dev) { #ifdef PCI_UNMAPPED struct resource_map_request req; struct resource_map map; #endif struct generic_pcie_core_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; const char *range_descr; char buf[64]; int domain, error; int flags, rid, tuple, type; sc = device_get_softc(dev); sc->dev = dev; /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); /* * Attempt to set the domain. If it's missing, or we are unable to * set it then memory allocations may be placed in the wrong domain. */ if (bus_get_domain(dev, &domain) == 0) (void)bus_dma_tag_set_domain(sc->dmat, domain); if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, PCI_RF_FLAGS | RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory.\n"); error = ENXIO; goto err_resource; } #ifdef PCI_UNMAPPED resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE_NP; error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, &map); if (error != 0) { device_printf(dev, "could not map memory.\n"); return (error); } rman_set_mapping(sc->res, &map); #endif } sc->has_pmem = false; sc->pmem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s prefetch window", device_get_nameunit(dev)); sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->mem_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); sc->io_rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s I/O port window", device_get_nameunit(dev)); sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); /* Initialize rman and allocate memory regions */ error = rman_init(&sc->pmem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_pmem_rman; } error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_mem_rman; } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); goto err_io_rman; } for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->ranges[tuple].phys_base; pci_base = sc->ranges[tuple].pci_base; size = sc->ranges[tuple].size; rid = tuple + 1; if (size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: sc->has_pmem = true; range_descr = "prefetch"; flags = RF_PREFETCHABLE; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->pmem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_MEM: range_descr = "memory"; flags = 0; type = SYS_RES_MEMORY; error = rman_manage_region(&sc->mem_rman, pci_base, pci_base + size - 1); break; case FLAG_TYPE_IO: range_descr = "I/O port"; flags = 0; type = SYS_RES_IOPORT; error = rman_manage_region(&sc->io_rman, pci_base, pci_base + size - 1); break; default: continue; } if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); goto err_rman_manage; } error = bus_set_resource(dev, type, rid, phys_base, size); if (error != 0) { device_printf(dev, "failed to set resource for range %d: %d\n", tuple, error); goto err_rman_manage; } sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE | RF_UNMAPPED | flags); if (sc->ranges[tuple].res == NULL) { device_printf(dev, "failed to allocate resource for range %d\n", tuple); error = ENXIO; goto err_rman_manage; } if (bootverbose) device_printf(dev, "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n", pci_base, phys_base, size, range_descr); } return (0); err_rman_manage: for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); err_io_rman: rman_fini(&sc->mem_rman); err_mem_rman: rman_fini(&sc->pmem_rman); err_pmem_rman: free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); err_resource: bus_dma_tag_destroy(sc->dmat); return (error); } int pci_host_generic_core_detach(device_t dev) { struct generic_pcie_core_softc *sc; int error, tuple, type; sc = device_get_softc(dev); error = bus_generic_detach(dev); if (error != 0) return (error); for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { if (sc->ranges[tuple].size == 0) continue; /* empty range element */ switch (FLAG_TYPE(sc->ranges[tuple].flags)) { case FLAG_TYPE_PMEM: case FLAG_TYPE_MEM: type = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: type = SYS_RES_IOPORT; break; default: continue; } if (sc->ranges[tuple].res != NULL) bus_release_resource(dev, type, tuple + 1, sc->ranges[tuple].res); bus_delete_resource(dev, type, tuple + 1); } rman_fini(&sc->io_rman); rman_fini(&sc->mem_rman); rman_fini(&sc->pmem_rman); free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); if (sc->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); bus_dma_tag_destroy(sc->dmat); return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; uint32_t data; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return (~0U); if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) return (~0U); offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: data = bus_read_1(sc->res, offset); break; case 2: data = le16toh(bus_read_2(sc->res, offset)); break; case 4: data = le32toh(bus_read_4(sc->res, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_core_softc *sc; uint64_t offset; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return; if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); switch (bytes) { case 1: bus_write_1(sc->res, offset, val); break; case 2: bus_write_2(sc->res, offset, htole16(val)); break; case 4: bus_write_4(sc->res, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { *result = sc->bus_start; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_get_rman(device_t dev, int type, u_int flags) { struct generic_pcie_core_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) return (&sc->pmem_rman); return (&sc->mem_rman); default: break; } return (NULL); } int -pci_host_generic_core_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *res) +pci_host_generic_core_release_resource(device_t dev, device_t child, + struct resource *res) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif - switch (type) { + switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: - return (pci_domain_release_bus(sc->ecam, child, rid, res)); + return (pci_domain_release_bus(sc->ecam, child, res)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_release_resource(dev, child, type, rid, - res)); + return (bus_generic_rman_release_resource(dev, child, res)); default: - return (bus_generic_release_resource(dev, child, type, rid, - res)); + return (bus_generic_release_resource(dev, child, res)); } } static struct pcie_range * generic_pcie_containing_range(device_t dev, int type, rman_res_t start, rman_res_t end) { struct generic_pcie_core_softc *sc = device_get_softc(dev); uint64_t pci_base; uint64_t size; int i, space; switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (NULL); } for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; size = sc->ranges[i].size; if (size == 0) continue; /* empty range element */ if (start < pci_base || end >= pci_base + size) continue; switch (FLAG_TYPE(sc->ranges[i].flags)) { case FLAG_TYPE_MEM: case FLAG_TYPE_PMEM: space = SYS_RES_MEMORY; break; case FLAG_TYPE_IO: space = SYS_RES_IOPORT; break; default: continue; } if (type == space) return (&sc->ranges[i]); } return (NULL); } static int generic_pcie_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *new_start) { struct pcie_range *range; /* Translate the address from a PCI address to a physical address */ switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, start, start); if (range == NULL) return (ENOENT); *new_start = start - range->pci_base + range->phys_base; break; default: /* No translation for non-memory types */ *new_start = start; break; } return (0); } struct resource * pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; #endif struct resource *res; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) sc = device_get_softc(dev); #endif switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, count, flags); break; #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); break; default: res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); break; } if (res == NULL) { device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); } return (res); } static int generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_activate_bus(sc->ecam, child, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(dev, child, r)); default: return (bus_generic_activate_resource(dev, child, r)); } } static int generic_pcie_deactivate_resource(device_t dev, device_t child, struct resource *r) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(r)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_deactivate_bus(sc->ecam, child, r)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(dev, child, r)); default: return (bus_generic_deactivate_resource(dev, child, r)); } } static int generic_pcie_adjust_resource(device_t dev, device_t child, struct resource *res, rman_res_t start, rman_res_t end) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); #endif switch (rman_get_type(res)) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_domain_adjust_bus(sc->ecam, child, res, start, end)); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, res, start, end)); default: return (bus_generic_adjust_resource(dev, child, res, start, end)); } } static int generic_pcie_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct pcie_range *range; rman_res_t length, start; int error, type; type = rman_get_type(r); switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (bus_generic_map_resource(dev, child, r, argsp, map)); } /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); args.offset = start - range->pci_base; args.length = length; return (bus_generic_map_resource(dev, child, range->res, &args, map)); } static int generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct pcie_range *range; int type; type = rman_get_type(r); switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (EINVAL); #endif case SYS_RES_IOPORT: case SYS_RES_MEMORY: range = generic_pcie_containing_range(dev, type, rman_get_start(r), rman_get_end(r)); if (range == NULL || range->res == NULL) return (ENOENT); r = range->res; break; default: break; } return (bus_generic_unmap_resource(dev, child, r, map)); } static bus_dma_tag_t generic_pcie_get_dma_tag(device_t dev, device_t child) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_attach, pci_host_generic_core_attach), DEVMETHOD(device_detach, pci_host_generic_core_detach), DEVMETHOD(bus_get_rman, generic_pcie_get_rman), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), DEVMETHOD(bus_map_resource, generic_pcie_map_resource), DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, generic_pcie_core_driver, generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); diff --git a/sys/dev/pci/pci_host_generic.h b/sys/dev/pci/pci_host_generic.h index 2d7583b861c8..688345e92db5 100644 --- a/sys/dev/pci/pci_host_generic.h +++ b/sys/dev/pci/pci_host_generic.h @@ -1,101 +1,101 @@ /* * Copyright (c) 2015, 2020 Ruslan Bukin * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #ifndef __PCI_HOST_GENERIC_H_ #define __PCI_HOST_GENERIC_H_ #include "pci_if.h" /* Assembling ECAM Configuration Address */ #define PCIE_BUS_SHIFT 20 #define PCIE_SLOT_SHIFT 15 #define PCIE_FUNC_SHIFT 12 #define PCIE_BUS_MASK 0xFF #define PCIE_SLOT_MASK 0x1F #define PCIE_FUNC_MASK 0x07 #define PCIE_REG_MASK 0xFFF #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ ((reg) & PCIE_REG_MASK)) #define MAX_RANGES_TUPLES 16 #define MIN_RANGES_TUPLES 2 struct pcie_range { uint64_t pci_base; uint64_t phys_base; uint64_t size; uint64_t flags; #define FLAG_TYPE(x) ((x) & FLAG_TYPE_MASK) #define FLAG_TYPE_MASK 0x3 #define FLAG_TYPE_INVALID 0x0 #define FLAG_TYPE_IO 0x1 #define FLAG_TYPE_MEM 0x2 #define FLAG_TYPE_PMEM 0x3 struct resource *res; }; struct generic_pcie_core_softc { struct pcie_range ranges[MAX_RANGES_TUPLES]; int nranges; int coherent; bool has_pmem; struct rman pmem_rman; struct rman mem_rman; struct rman io_rman; struct resource *res; int bus_start; int bus_end; int ecam; device_t dev; bus_space_handle_t ioh; bus_dma_tag_t dmat; uint32_t quirks; }; /* Quirks */ #define PCIE_ECAM_DESIGNWARE_QUIRK (1 << 0) /* Child will map resources to access config registers */ #define PCIE_CUSTOM_CONFIG_SPACE_QUIRK (1 << 1) DECLARE_CLASS(generic_pcie_core_driver); int pci_host_generic_core_attach(device_t); int pci_host_generic_core_detach(device_t); struct resource *pci_host_generic_core_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -int pci_host_generic_core_release_resource(device_t, device_t, int, int, +int pci_host_generic_core_release_resource(device_t, device_t, struct resource *); #endif /* __PCI_HOST_GENERIC_H_ */ diff --git a/sys/dev/pci/pci_iov.c b/sys/dev/pci/pci_iov.c index d52e534b9ab5..c8e139f043c9 100644 --- a/sys/dev/pci/pci_iov.c +++ b/sys/dev/pci/pci_iov.c @@ -1,1092 +1,1088 @@ /*- * Copyright (c) 2013-2015 Sandvine Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static MALLOC_DEFINE(M_SRIOV, "sr_iov", "PCI SR-IOV allocations"); static d_ioctl_t pci_iov_ioctl; static struct cdevsw iov_cdevsw = { .d_version = D_VERSION, .d_name = "iov", .d_ioctl = pci_iov_ioctl }; SYSCTL_DECL(_hw_pci); /* * The maximum amount of memory we will allocate for user configuration of an * SR-IOV device. 1MB ought to be enough for anyone, but leave this * configurable just in case. */ static u_long pci_iov_max_config = 1024 * 1024; SYSCTL_ULONG(_hw_pci, OID_AUTO, iov_max_config, CTLFLAG_RWTUN, &pci_iov_max_config, 0, "Maximum allowed size of SR-IOV configuration."); #define IOV_READ(d, r, w) \ pci_read_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, w) #define IOV_WRITE(d, r, v, w) \ pci_write_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, v, w) static nvlist_t *pci_iov_build_schema(nvlist_t **pf_schema, nvlist_t **vf_schema); static void pci_iov_build_pf_schema(nvlist_t *schema, nvlist_t **driver_schema); static void pci_iov_build_vf_schema(nvlist_t *schema, nvlist_t **driver_schema); static int pci_iov_delete_iov_children(struct pci_devinfo *dinfo); static nvlist_t *pci_iov_get_pf_subsystem_schema(void); static nvlist_t *pci_iov_get_vf_subsystem_schema(void); int pci_iov_attach_name(device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema, const char *fmt, ...) { char buf[NAME_MAX + 1]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); return (PCI_IOV_ATTACH(device_get_parent(dev), dev, pf_schema, vf_schema, buf)); } int pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema, nvlist_t *vf_schema, const char *name) { struct pci_devinfo *dinfo; struct pcicfg_iov *iov; nvlist_t *schema; uint32_t version; int error; int iov_pos; dinfo = device_get_ivars(dev); schema = NULL; error = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); if (error != 0) return (error); version = pci_read_config(dev, iov_pos, 4); if (PCI_EXTCAP_VER(version) != 1) { if (bootverbose) device_printf(dev, "Unsupported version of SR-IOV (%d) detected\n", PCI_EXTCAP_VER(version)); return (ENXIO); } iov = malloc(sizeof(*dinfo->cfg.iov), M_SRIOV, M_WAITOK | M_ZERO); mtx_lock(&Giant); if (dinfo->cfg.iov != NULL) { error = EBUSY; goto cleanup; } iov->iov_pf = dev; iov->iov_pos = iov_pos; schema = pci_iov_build_schema(&pf_schema, &vf_schema); if (schema == NULL) { error = ENOMEM; goto cleanup; } error = pci_iov_validate_schema(schema); if (error != 0) goto cleanup; iov->iov_schema = schema; iov->iov_cdev = make_dev(&iov_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "iov/%s", name); if (iov->iov_cdev == NULL) { error = ENOMEM; goto cleanup; } dinfo->cfg.iov = iov; iov->iov_cdev->si_drv1 = dinfo; mtx_unlock(&Giant); return (0); cleanup: nvlist_destroy(schema); nvlist_destroy(pf_schema); nvlist_destroy(vf_schema); free(iov, M_SRIOV); mtx_unlock(&Giant); return (error); } int pci_iov_detach_method(device_t bus, device_t dev) { struct pci_devinfo *dinfo; struct pcicfg_iov *iov; int error; mtx_lock(&Giant); dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov == NULL) { mtx_unlock(&Giant); return (0); } if ((iov->iov_flags & IOV_BUSY) != 0) { mtx_unlock(&Giant); return (EBUSY); } error = pci_iov_delete_iov_children(dinfo); if (error != 0) { mtx_unlock(&Giant); return (error); } dinfo->cfg.iov = NULL; if (iov->iov_cdev) { destroy_dev(iov->iov_cdev); iov->iov_cdev = NULL; } nvlist_destroy(iov->iov_schema); free(iov, M_SRIOV); mtx_unlock(&Giant); return (0); } static nvlist_t * pci_iov_build_schema(nvlist_t **pf, nvlist_t **vf) { nvlist_t *schema, *pf_driver, *vf_driver; /* We always take ownership of the schemas. */ pf_driver = *pf; *pf = NULL; vf_driver = *vf; *vf = NULL; schema = pci_iov_schema_alloc_node(); if (schema == NULL) goto cleanup; pci_iov_build_pf_schema(schema, &pf_driver); pci_iov_build_vf_schema(schema, &vf_driver); if (nvlist_error(schema) != 0) goto cleanup; return (schema); cleanup: nvlist_destroy(schema); nvlist_destroy(pf_driver); nvlist_destroy(vf_driver); return (NULL); } static void pci_iov_build_pf_schema(nvlist_t *schema, nvlist_t **driver_schema) { nvlist_t *pf_schema, *iov_schema; pf_schema = pci_iov_schema_alloc_node(); if (pf_schema == NULL) { nvlist_set_error(schema, ENOMEM); return; } iov_schema = pci_iov_get_pf_subsystem_schema(); /* * Note that if either *driver_schema or iov_schema is NULL, then * nvlist_move_nvlist will put the schema in the error state and * SR-IOV will fail to initialize later, so we don't have to explicitly * handle that case. */ nvlist_move_nvlist(pf_schema, DRIVER_CONFIG_NAME, *driver_schema); nvlist_move_nvlist(pf_schema, IOV_CONFIG_NAME, iov_schema); nvlist_move_nvlist(schema, PF_CONFIG_NAME, pf_schema); *driver_schema = NULL; } static void pci_iov_build_vf_schema(nvlist_t *schema, nvlist_t **driver_schema) { nvlist_t *vf_schema, *iov_schema; vf_schema = pci_iov_schema_alloc_node(); if (vf_schema == NULL) { nvlist_set_error(schema, ENOMEM); return; } iov_schema = pci_iov_get_vf_subsystem_schema(); /* * Note that if either *driver_schema or iov_schema is NULL, then * nvlist_move_nvlist will put the schema in the error state and * SR-IOV will fail to initialize later, so we don't have to explicitly * handle that case. */ nvlist_move_nvlist(vf_schema, DRIVER_CONFIG_NAME, *driver_schema); nvlist_move_nvlist(vf_schema, IOV_CONFIG_NAME, iov_schema); nvlist_move_nvlist(schema, VF_SCHEMA_NAME, vf_schema); *driver_schema = NULL; } static nvlist_t * pci_iov_get_pf_subsystem_schema(void) { nvlist_t *pf; pf = pci_iov_schema_alloc_node(); if (pf == NULL) return (NULL); pci_iov_schema_add_uint16(pf, "num_vfs", IOV_SCHEMA_REQUIRED, -1); pci_iov_schema_add_string(pf, "device", IOV_SCHEMA_REQUIRED, NULL); return (pf); } static nvlist_t * pci_iov_get_vf_subsystem_schema(void) { nvlist_t *vf; vf = pci_iov_schema_alloc_node(); if (vf == NULL) return (NULL); pci_iov_schema_add_bool(vf, "passthrough", IOV_SCHEMA_HASDEFAULT, 0); return (vf); } static int pci_iov_alloc_bar(struct pci_devinfo *dinfo, int bar, pci_addr_t bar_shift) { struct resource *res; struct pcicfg_iov *iov; device_t dev, bus; rman_res_t start, end; pci_addr_t bar_size; int rid; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); rid = iov->iov_pos + PCIR_SRIOV_BAR(bar); bar_size = 1 << bar_shift; res = pci_alloc_multi_resource(bus, dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, iov->iov_num_vfs, RF_ACTIVE); if (res == NULL) return (ENXIO); iov->iov_bar[bar].res = res; iov->iov_bar[bar].bar_size = bar_size; iov->iov_bar[bar].bar_shift = bar_shift; start = rman_get_start(res); end = rman_get_end(res); return (rman_manage_region(&iov->rman, start, end)); } static void pci_iov_add_bars(struct pcicfg_iov *iov, struct pci_devinfo *dinfo) { struct pci_iov_bar *bar; uint64_t bar_start; int i; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { bar = &iov->iov_bar[i]; if (bar->res != NULL) { bar_start = rman_get_start(bar->res) + dinfo->cfg.vf.index * bar->bar_size; pci_add_bar(dinfo->cfg.dev, PCIR_BAR(i), bar_start, bar->bar_shift); } } } static int pci_iov_parse_config(struct pcicfg_iov *iov, struct pci_iov_arg *arg, nvlist_t **ret) { void *packed_config; nvlist_t *config; int error; config = NULL; packed_config = NULL; if (arg->len > pci_iov_max_config) { error = EMSGSIZE; goto out; } packed_config = malloc(arg->len, M_SRIOV, M_WAITOK); error = copyin(arg->config, packed_config, arg->len); if (error != 0) goto out; config = nvlist_unpack(packed_config, arg->len, NV_FLAG_IGNORE_CASE); if (config == NULL) { error = EINVAL; goto out; } error = pci_iov_schema_validate_config(iov->iov_schema, config); if (error != 0) goto out; error = nvlist_error(config); if (error != 0) goto out; *ret = config; config = NULL; out: nvlist_destroy(config); free(packed_config, M_SRIOV); return (error); } /* * Set the ARI_EN bit in the lowest-numbered PCI function with the SR-IOV * capability. This bit is only writeable on the lowest-numbered PF but * affects all PFs on the device. */ static int pci_iov_set_ari(device_t bus, bool *ari_enabled) { device_t lowest; device_t *devlist; int i, error, devcount, lowest_func, lowest_pos, iov_pos, dev_func; uint16_t iov_ctl; /* If ARI is disabled on the downstream port there is nothing to do. */ if (!PCIB_ARI_ENABLED(device_get_parent(bus))) { *ari_enabled = false; return (0); } error = device_get_children(bus, &devlist, &devcount); if (error != 0) return (error); lowest = NULL; for (i = 0; i < devcount; i++) { if (pci_find_extcap(devlist[i], PCIZ_SRIOV, &iov_pos) == 0) { dev_func = pci_get_function(devlist[i]); if (lowest == NULL || dev_func < lowest_func) { lowest = devlist[i]; lowest_func = dev_func; lowest_pos = iov_pos; } } } free(devlist, M_TEMP); /* * If we called this function some device must have the SR-IOV * capability. */ KASSERT(lowest != NULL, ("Could not find child of %s with SR-IOV capability", device_get_nameunit(bus))); iov_ctl = pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_ARI_EN; pci_write_config(lowest, lowest_pos + PCIR_SRIOV_CTL, iov_ctl, 2); if ((pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2) & PCIM_SRIOV_ARI_EN) == 0) { device_printf(lowest, "failed to enable ARI\n"); return (ENXIO); } *ari_enabled = true; return (0); } static int pci_iov_config_page_size(struct pci_devinfo *dinfo) { uint32_t page_cap, page_size; page_cap = IOV_READ(dinfo, PCIR_SRIOV_PAGE_CAP, 4); /* * If the system page size is less than the smallest SR-IOV page size * then round up to the smallest SR-IOV page size. */ if (PAGE_SHIFT < PCI_SRIOV_BASE_PAGE_SHIFT) page_size = (1 << 0); else page_size = (1 << (PAGE_SHIFT - PCI_SRIOV_BASE_PAGE_SHIFT)); /* Check that the device supports the system page size. */ if (!(page_size & page_cap)) return (ENXIO); IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, page_size, 4); return (0); } static int pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *config) { const nvlist_t *device, *driver_config; device = nvlist_get_nvlist(config, PF_CONFIG_NAME); driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME); return (PCI_IOV_INIT(dev, num_vfs, driver_config)); } static int pci_iov_init_rman(device_t pf, struct pcicfg_iov *iov) { int error; iov->rman.rm_start = 0; iov->rman.rm_end = ~0; iov->rman.rm_type = RMAN_ARRAY; snprintf(iov->rman_name, sizeof(iov->rman_name), "%s VF I/O memory", device_get_nameunit(pf)); iov->rman.rm_descr = iov->rman_name; error = rman_init(&iov->rman); if (error != 0) return (error); iov->iov_flags |= IOV_RMAN_INITED; return (0); } static int pci_iov_alloc_bar_ea(struct pci_devinfo *dinfo, int bar) { struct pcicfg_iov *iov; rman_res_t start, end; struct resource *res; struct resource_list *rl; struct resource_list_entry *rle; rl = &dinfo->resources; iov = dinfo->cfg.iov; rle = resource_list_find(rl, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(bar)); if (rle == NULL) rle = resource_list_find(rl, SYS_RES_IOPORT, iov->iov_pos + PCIR_SRIOV_BAR(bar)); if (rle == NULL) return (ENXIO); res = rle->res; iov->iov_bar[bar].res = res; iov->iov_bar[bar].bar_size = rman_get_size(res) / iov->iov_num_vfs; iov->iov_bar[bar].bar_shift = pci_mapsize(iov->iov_bar[bar].bar_size); start = rman_get_start(res); end = rman_get_end(res); return (rman_manage_region(&iov->rman, start, end)); } static int pci_iov_setup_bars(struct pci_devinfo *dinfo) { device_t dev; struct pcicfg_iov *iov; pci_addr_t bar_value, testval; int i, last_64, error; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; last_64 = 0; pci_add_resources_ea(device_get_parent(dev), dev, 1); for (i = 0; i <= PCIR_MAX_BAR_0; i++) { /* First, try to use BARs allocated with EA */ error = pci_iov_alloc_bar_ea(dinfo, i); if (error == 0) continue; /* Allocate legacy-BAR only if EA is not enabled */ if (pci_ea_is_enabled(dev, iov->iov_pos + PCIR_SRIOV_BAR(i))) continue; /* * If a PCI BAR is a 64-bit wide BAR, then it spans two * consecutive registers. Therefore if the last BAR that * we looked at was a 64-bit BAR, we need to skip this * register as it's the second half of the last BAR. */ if (!last_64) { pci_read_bar(dev, iov->iov_pos + PCIR_SRIOV_BAR(i), &bar_value, &testval, &last_64); if (testval != 0) { error = pci_iov_alloc_bar(dinfo, i, pci_mapsize(testval)); if (error != 0) return (error); } } else last_64 = 0; } return (0); } static void pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config, uint16_t first_rid, uint16_t rid_stride) { char device_name[VF_MAX_NAME]; const nvlist_t *device, *driver_config, *iov_config; device_t bus, dev, vf; struct pcicfg_iov *iov; struct pci_devinfo *vfinfo; int i, error; uint16_t vid, did, next_rid; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); next_rid = first_rid; vid = pci_get_vendor(dev); did = IOV_READ(dinfo, PCIR_SRIOV_VF_DID, 2); for (i = 0; i < iov->iov_num_vfs; i++, next_rid += rid_stride) { snprintf(device_name, sizeof(device_name), VF_PREFIX"%d", i); device = nvlist_get_nvlist(config, device_name); iov_config = nvlist_get_nvlist(device, IOV_CONFIG_NAME); driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME); vf = PCI_CREATE_IOV_CHILD(bus, dev, next_rid, vid, did); if (vf == NULL) break; /* * If we are creating passthrough devices then force the ppt * driver to attach to prevent a VF driver from claiming the * VFs. */ if (nvlist_get_bool(iov_config, "passthrough")) device_set_devclass_fixed(vf, "ppt"); vfinfo = device_get_ivars(vf); vfinfo->cfg.iov = iov; vfinfo->cfg.vf.index = i; pci_iov_add_bars(iov, vfinfo); error = PCI_IOV_ADD_VF(dev, i, driver_config); if (error != 0) { device_printf(dev, "Failed to add VF %d\n", i); device_delete_child(bus, vf); } } bus_generic_attach(bus); } static int pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg) { device_t bus, dev; struct pci_devinfo *dinfo; struct pcicfg_iov *iov; nvlist_t *config; int i, error; uint16_t rid_off, rid_stride; uint16_t first_rid, last_rid; uint16_t iov_ctl; uint16_t num_vfs, total_vfs; int iov_inited; bool ari_enabled; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); iov_inited = 0; config = NULL; if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) { mtx_unlock(&Giant); return (EBUSY); } iov->iov_flags |= IOV_BUSY; error = pci_iov_parse_config(iov, arg, &config); if (error != 0) goto out; num_vfs = pci_iov_config_get_num_vfs(config); total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2); if (num_vfs > total_vfs) { error = EINVAL; goto out; } error = pci_iov_config_page_size(dinfo); if (error != 0) goto out; error = pci_iov_set_ari(bus, &ari_enabled); if (error != 0) goto out; error = pci_iov_init(dev, num_vfs, config); if (error != 0) goto out; iov_inited = 1; IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2); rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2); rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2); first_rid = pci_get_rid(dev) + rid_off; last_rid = first_rid + (num_vfs - 1) * rid_stride; /* We don't yet support allocating extra bus numbers for VFs. */ if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) { error = ENOSPC; goto out; } if (!ari_enabled && PCI_RID2SLOT(last_rid) != 0) { error = ENOSPC; goto out; } iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); error = pci_iov_init_rman(dev, iov); if (error != 0) goto out; iov->iov_num_vfs = num_vfs; error = pci_iov_setup_bars(dinfo); if (error != 0) goto out; iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE; IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); /* Per specification, we must wait 100ms before accessing VFs. */ pause("iov", roundup(hz, 10)); pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride); nvlist_destroy(config); iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (0); out: if (iov_inited) PCI_IOV_UNINIT(dev); for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { - pci_release_resource(bus, dev, SYS_RES_MEMORY, - iov->iov_pos + PCIR_SRIOV_BAR(i), - iov->iov_bar[i].res); + pci_release_resource(bus, dev, iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } nvlist_destroy(config); iov->iov_num_vfs = 0; iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (error); } void pci_iov_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { struct pcicfg_iov *iov; iov = dinfo->cfg.iov; IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, iov->iov_page_size, 4); IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, iov->iov_num_vfs, 2); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov->iov_ctl, 2); } void pci_iov_cfg_save(device_t dev, struct pci_devinfo *dinfo) { struct pcicfg_iov *iov; iov = dinfo->cfg.iov; iov->iov_page_size = IOV_READ(dinfo, PCIR_SRIOV_PAGE_SIZE, 4); iov->iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); } /* Return true if child is a VF of the given PF. */ static int pci_iov_is_child_vf(struct pcicfg_iov *pf, device_t child) { struct pci_devinfo *vfinfo; vfinfo = device_get_ivars(child); if (!(vfinfo->cfg.flags & PCICFG_VF)) return (0); return (pf == vfinfo->cfg.iov); } static int pci_iov_delete_iov_children(struct pci_devinfo *dinfo) { device_t bus, dev, vf, *devlist; struct pcicfg_iov *iov; int i, error, devcount; uint32_t iov_ctl; mtx_assert(&Giant, MA_OWNED); iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); devlist = NULL; iov->iov_flags |= IOV_BUSY; error = device_get_children(bus, &devlist, &devcount); if (error != 0) goto out; for (i = 0; i < devcount; i++) { vf = devlist[i]; if (!pci_iov_is_child_vf(iov, vf)) continue; error = device_detach(vf); if (error != 0) { device_printf(dev, "Could not disable SR-IOV: failed to detach VF %s\n", device_get_nameunit(vf)); goto out; } } for (i = 0; i < devcount; i++) { vf = devlist[i]; if (pci_iov_is_child_vf(iov, vf)) device_delete_child(bus, vf); } PCI_IOV_UNINIT(dev); iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2); iov->iov_num_vfs = 0; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { - pci_release_resource(bus, dev, SYS_RES_MEMORY, - iov->iov_pos + PCIR_SRIOV_BAR(i), - iov->iov_bar[i].res); + pci_release_resource(bus, dev, iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } error = 0; out: free(devlist, M_TEMP); iov->iov_flags &= ~IOV_BUSY; return (error); } static int pci_iov_delete(struct cdev *cdev) { struct pci_devinfo *dinfo; struct pcicfg_iov *iov; int error; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; if ((iov->iov_flags & IOV_BUSY) != 0) { error = EBUSY; goto out; } if (iov->iov_num_vfs == 0) { error = ECHILD; goto out; } error = pci_iov_delete_iov_children(dinfo); out: mtx_unlock(&Giant); return (error); } static int pci_iov_get_schema_ioctl(struct cdev *cdev, struct pci_iov_schema *output) { struct pci_devinfo *dinfo; void *packed; size_t output_len, size; int error; packed = NULL; mtx_lock(&Giant); dinfo = cdev->si_drv1; packed = nvlist_pack(dinfo->cfg.iov->iov_schema, &size); mtx_unlock(&Giant); if (packed == NULL) { error = ENOMEM; goto fail; } output_len = output->len; output->len = size; if (size <= output_len) { error = copyout(packed, output->schema, size); if (error != 0) goto fail; output->error = 0; } else /* * If we return an error then the ioctl code won't copyout * output back to userland, so we flag the error in the struct * instead. */ output->error = EMSGSIZE; error = 0; fail: free(packed, M_NVLIST); return (error); } static int pci_iov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { switch (cmd) { case IOV_CONFIG: return (pci_iov_config(dev, (struct pci_iov_arg *)data)); case IOV_DELETE: return (pci_iov_delete(dev)); case IOV_GET_SCHEMA: return (pci_iov_get_schema_ioctl(dev, (struct pci_iov_schema *)data)); default: return (EINVAL); } } struct resource * pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; struct pcicfg_iov *iov; struct pci_map *map; struct resource *res; struct resource_list_entry *rle; rman_res_t bar_start, bar_end; pci_addr_t bar_length; int error; dinfo = device_get_ivars(child); iov = dinfo->cfg.iov; map = pci_find_bar(child, *rid); if (map == NULL) return (NULL); bar_length = 1 << map->pm_size; bar_start = map->pm_value; bar_end = bar_start + bar_length - 1; /* Make sure that the resource fits the constraints. */ if (bar_start >= end || bar_end <= bar_start || count != 1) return (NULL); /* Clamp the resource to the constraints if necessary. */ if (bar_start < start) bar_start = start; if (bar_end > end) bar_end = end; bar_length = bar_end - bar_start + 1; res = rman_reserve_resource(&iov->rman, bar_start, bar_end, bar_length, flags, child); if (res == NULL) return (NULL); rle = resource_list_add(&dinfo->resources, SYS_RES_MEMORY, *rid, bar_start, bar_end, 1); if (rle == NULL) { rman_release_resource(res); return (NULL); } rman_set_rid(res, *rid); rman_set_type(res, SYS_RES_MEMORY); if (flags & RF_ACTIVE) { error = bus_activate_resource(child, SYS_RES_MEMORY, *rid, res); if (error != 0) { resource_list_delete(&dinfo->resources, SYS_RES_MEMORY, *rid); rman_release_resource(res); return (NULL); } } rle->res = res; return (res); } int -pci_vf_release_mem_resource(device_t dev, device_t child, int rid, - struct resource *r) +pci_vf_release_mem_resource(device_t dev, device_t child, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; - int error; + int error, rid; dinfo = device_get_ivars(child); if (rman_get_flags(r) & RF_ACTIVE) { - error = bus_deactivate_resource(child, SYS_RES_MEMORY, rid, r); + error = bus_deactivate_resource(child, r); if (error != 0) return (error); } + rid = rman_get_rid(r); rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, rid); if (rle != NULL) { rle->res = NULL; resource_list_delete(&dinfo->resources, SYS_RES_MEMORY, rid); } return (rman_release_resource(r)); } diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c index cef58a1670d7..35062d67050e 100644 --- a/sys/dev/pci/pci_pci.c +++ b/sys/dev/pci/pci_pci.c @@ -1,3159 +1,3158 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PCI:PCI bridge support. */ #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static int pcib_probe(device_t dev); static int pcib_suspend(device_t dev); static int pcib_resume(device_t dev); static bus_child_present_t pcib_child_present; static bus_alloc_resource_t pcib_alloc_resource; #ifdef NEW_PCIB static bus_adjust_resource_t pcib_adjust_resource; static bus_release_resource_t pcib_release_resource; static bus_activate_resource_t pcib_activate_resource; static bus_deactivate_resource_t pcib_deactivate_resource; static bus_map_resource_t pcib_map_resource; static bus_unmap_resource_t pcib_unmap_resource; #endif static int pcib_reset_child(device_t dev, device_t child, int flags); static int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate); static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width); static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width); static int pcib_ari_maxslots(device_t dev); static int pcib_ari_maxfuncs(device_t dev); static int pcib_try_enable_ari(device_t pcib, device_t dev); static int pcib_ari_enabled(device_t pcib); static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); #ifdef PCI_HP static void pcib_pcie_ab_timeout(void *arg, int pending); static void pcib_pcie_cc_timeout(void *arg, int pending); static void pcib_pcie_dll_timeout(void *arg, int pending); #endif static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature); static device_method_t pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcib_probe), DEVMETHOD(device_attach, pcib_attach), DEVMETHOD(device_detach, pcib_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pcib_suspend), DEVMETHOD(device_resume, pcib_resume), /* Bus interface */ DEVMETHOD(bus_child_present, pcib_child_present), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), DEVMETHOD(bus_release_resource, pcib_release_resource), DEVMETHOD(bus_activate_resource, pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, pcib_deactivate_resource), DEVMETHOD(bus_map_resource, pcib_map_resource), DEVMETHOD(bus_unmap_resource, pcib_unmap_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_reset_child, pcib_reset_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), DEVMETHOD(pcib_get_id, pcib_ari_get_id), DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), DEVMETHOD(pcib_request_feature, pcib_request_feature_default), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS); #if defined(NEW_PCIB) || defined(PCI_HP) SYSCTL_DECL(_hw_pci); #endif #ifdef NEW_PCIB static int pci_clear_pcib; SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); /* * Get the corresponding window if this resource from a child device was * sub-allocated from one of our window resource managers. */ static struct pcib_window * pcib_get_resource_window(struct pcib_softc *sc, struct resource *r) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: if (rman_is_region_manager(r, &sc->io.rman)) return (&sc->io); break; case SYS_RES_MEMORY: /* Prefetchable resources may live in either memory rman. */ if (rman_get_flags(r) & RF_PREFETCHABLE && rman_is_region_manager(r, &sc->pmem.rman)) return (&sc->pmem); if (rman_is_region_manager(r, &sc->mem.rman)) return (&sc->mem); break; } return (NULL); } /* * Is a resource from a child device sub-allocated from one of our * resource managers? */ static int pcib_is_resource_managed(struct pcib_softc *sc, struct resource *r) { #ifdef PCI_RES_BUS if (rman_get_type(r) == PCI_RES_BUS) return (rman_is_region_manager(r, &sc->bus.rman)); #endif return (pcib_get_resource_window(sc, r) != NULL); } static int pcib_is_window_open(struct pcib_window *pw) { return (pw->valid && pw->base < pw->limit); } /* * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and * handle for the resource, we could pass RF_ACTIVE up to the PCI bus * when allocating the resource windows and rely on the PCI bus driver * to do this for us. */ static void pcib_activate_window(struct pcib_softc *sc, int type) { PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); } static void pcib_write_windows(struct pcib_softc *sc, int mask) { device_t dev; uint32_t val; dev = sc->dev; if (sc->io.valid && mask & WIN_IO) { val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { pci_write_config(dev, PCIR_IOBASEH_1, sc->io.base >> 16, 2); pci_write_config(dev, PCIR_IOLIMITH_1, sc->io.limit >> 16, 2); } pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); } if (mask & WIN_MEM) { pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); } if (sc->pmem.valid && mask & WIN_PMEM) { val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { pci_write_config(dev, PCIR_PMBASEH_1, sc->pmem.base >> 32, 4); pci_write_config(dev, PCIR_PMLIMITH_1, sc->pmem.limit >> 32, 4); } pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); } } /* * This is used to reject I/O port allocations that conflict with an * ISA alias range. */ static int pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, rman_res_t count) { rman_res_t next_alias; if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) return (0); /* Only check fixed ranges for overlap. */ if (start + count - 1 != end) return (0); /* ISA aliases are only in the lower 64KB of I/O space. */ if (start >= 65536) return (0); /* Check for overlap with 0x000 - 0x0ff as a special case. */ if (start < 0x100) goto alias; /* * If the start address is an alias, the range is an alias. * Otherwise, compute the start of the next alias range and * check if it is before the end of the candidate range. */ if ((start & 0x300) != 0) goto alias; next_alias = (start & ~0x3fful) | 0x100; if (next_alias <= end) goto alias; return (0); alias: if (bootverbose) device_printf(sc->dev, "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, end); return (1); } static void pcib_add_window_resources(struct pcib_window *w, struct resource **res, int count) { struct resource **newarray; int error, i; newarray = malloc(sizeof(struct resource *) * (w->count + count), M_DEVBUF, M_WAITOK); if (w->res != NULL) bcopy(w->res, newarray, sizeof(struct resource *) * w->count); bcopy(res, newarray + w->count, sizeof(struct resource *) * count); free(w->res, M_DEVBUF); w->res = newarray; w->count += count; for (i = 0; i < count; i++) { error = rman_manage_region(&w->rman, rman_get_start(res[i]), rman_get_end(res[i])); if (error) panic("Failed to add resource to rman"); } } typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); static void pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, void *arg) { rman_res_t next_end; /* * If start is within an ISA alias range, move up to the start * of the next non-alias range. As a special case, addresses * in the range 0x000 - 0x0ff should also be skipped since * those are used for various system I/O devices in ISA * systems. */ if (start <= 65535) { if (start < 0x100 || (start & 0x300) != 0) { start &= ~0x3ff; start += 0x400; } } /* ISA aliases are only in the lower 64KB of I/O space. */ while (start <= MIN(end, 65535)) { next_end = MIN(start | 0xff, end); cb(start, next_end, arg); start += 0x400; } if (start <= end) cb(start, end, arg); } static void count_ranges(rman_res_t start, rman_res_t end, void *arg) { int *countp; countp = arg; (*countp)++; } struct alloc_state { struct resource **res; struct pcib_softc *sc; int count, error; }; static void alloc_ranges(rman_res_t start, rman_res_t end, void *arg) { struct alloc_state *as; struct pcib_window *w; int rid; as = arg; if (as->error != 0) return; w = &as->sc->io; rid = w->reg; if (bootverbose) device_printf(as->sc->dev, "allocating non-ISA range %#jx-%#jx\n", start, end); as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, &rid, start, end, end - start + 1, RF_ACTIVE | RF_UNMAPPED); if (as->res[as->count] == NULL) as->error = ENXIO; else as->count++; } static int pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) { struct alloc_state as; int i, new_count; /* First, see how many ranges we need. */ new_count = 0; pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); /* Second, allocate the ranges. */ as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, M_WAITOK); as.sc = sc; as.count = 0; as.error = 0; pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); if (as.error != 0) { for (i = 0; i < as.count; i++) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io.reg, as.res[i]); free(as.res, M_DEVBUF); return (as.error); } KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); /* Third, add the ranges to the window. */ pcib_add_window_resources(&sc->io, as.res, as.count); free(as.res, M_DEVBUF); return (0); } static void pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, int flags, pci_addr_t max_address) { struct resource *res; char buf[64]; int error, rid; if (max_address != (rman_res_t)max_address) max_address = ~0; w->rman.rm_start = 0; w->rman.rm_end = max_address; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s %s window", device_get_nameunit(sc->dev), w->name); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) panic("Failed to initialize %s %s rman", device_get_nameunit(sc->dev), w->name); if (!pcib_is_window_open(w)) return; if (w->base > max_address || w->limit > max_address) { device_printf(sc->dev, "initial %s window has too many bits, ignoring\n", w->name); return; } if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); else { rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, w->limit - w->base + 1, flags | RF_ACTIVE | RF_UNMAPPED); if (res != NULL) pcib_add_window_resources(w, &res, 1); } if (w->res == NULL) { device_printf(sc->dev, "failed to allocate initial %s window: %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); w->base = max_address; w->limit = 0; pcib_write_windows(sc, w->mask); return; } pcib_activate_window(sc, type); } /* * Initialize I/O windows. */ static void pcib_probe_windows(struct pcib_softc *sc) { pci_addr_t max; device_t dev; uint32_t val; dev = sc->dev; if (pci_clear_pcib) { pcib_bridge_init(dev); } /* Determine if the I/O port window is implemented. */ val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if (val == 0) { /* * If 'val' is zero, then only 16-bits of I/O space * are supported. */ pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { sc->io.valid = 1; pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); } } else sc->io.valid = 1; /* Read the existing I/O port window. */ if (sc->io.valid) { sc->io.reg = PCIR_IOBASEL_1; sc->io.step = 12; sc->io.mask = WIN_IO; sc->io.name = "I/O port"; if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { sc->io.base = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), val); sc->io.limit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffffffff; } else { sc->io.base = PCI_PPBIOBASE(0, val); sc->io.limit = PCI_PPBIOLIMIT(0, pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffff; } pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); } /* Read the existing memory window. */ sc->mem.valid = 1; sc->mem.reg = PCIR_MEMBASE_1; sc->mem.step = 20; sc->mem.mask = WIN_MEM; sc->mem.name = "memory"; sc->mem.base = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->mem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); /* Determine if the prefetchable memory window is implemented. */ val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if (val == 0) { /* * If 'val' is zero, then only 32-bits of memory space * are supported. */ pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { sc->pmem.valid = 1; pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); } } else sc->pmem.valid = 1; /* Read the existing prefetchable memory window. */ if (sc->pmem.valid) { sc->pmem.reg = PCIR_PMBASEL_1; sc->pmem.step = 20; sc->pmem.mask = WIN_PMEM; sc->pmem.name = "prefetch"; if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { sc->pmem.base = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), val); sc->pmem.limit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffffffffffff; } else { sc->pmem.base = PCI_PPBMEMBASE(0, val); sc->pmem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffff; } pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, RF_PREFETCHABLE, max); } } static void pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type) { device_t dev; int error, i; if (!w->valid) return; dev = sc->dev; error = rman_fini(&w->rman); if (error) { device_printf(dev, "failed to release %s rman\n", w->name); return; } free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF); for (i = 0; i < w->count; i++) { error = bus_free_resource(dev, type, w->res[i]); if (error) device_printf(dev, "failed to release %s resource: %d\n", w->name, error); } free(w->res, M_DEVBUF); } static void pcib_free_windows(struct pcib_softc *sc) { pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY); pcib_release_window(sc, &sc->io, SYS_RES_IOPORT); } #ifdef PCI_RES_BUS /* * Allocate a suitable secondary bus for this bridge if needed and * initialize the resource manager for the secondary bus range. Note * that the minimum count is a desired value and this may allocate a * smaller range. */ void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) { char buf[64]; int error, rid, sec_reg; switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; bus->sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; bus->sub_reg = PCIR_SUBBUS_2; break; default: panic("not a PCI bridge"); } bus->sec = pci_read_config(dev, sec_reg, 1); bus->sub = pci_read_config(dev, bus->sub_reg, 1); bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) panic("Failed to initialize %s bus number rman", device_get_nameunit(dev)); /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, min_count, RF_ACTIVE); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 1, RF_ACTIVE); } else if (rman_get_size(bus->res) < min_count) /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } } void pcib_free_secbus(device_t dev, struct pcib_secbus *bus) { int error; error = rman_fini(&bus->rman); if (error) { device_printf(dev, "failed to release bus number rman\n"); return; } free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF); error = bus_free_resource(dev, PCI_RES_BUS, bus->res); if (error) device_printf(dev, "failed to release bus numbers resource: %d\n", error); } static struct resource * pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; res = rman_reserve_resource(&bus->rman, start, end, count, flags, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(bus->dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } /* * Attempt to grow the secondary bus range. This is much simpler than * for I/O windows as the range can only be grown by increasing * subbus. */ static int pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) { rman_res_t old_end; int error; old_end = rman_get_end(bus->res); KASSERT(new_end > old_end, ("attempt to shrink subbus")); error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), new_end); if (error) return (error); if (bootverbose) device_printf(bus->dev, "grew bus range to %ju-%ju\n", rman_get_start(bus->res), rman_get_end(bus->res)); error = rman_manage_region(&bus->rman, old_end + 1, rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sub = rman_get_end(bus->res); pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); return (0); } struct resource * pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t start_free, end_free, new_end; /* * First, see if the request can be satisified by the existing * bus range. */ res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); if (res != NULL) return (res); /* * Figure out a range to grow the bus range. First, find the * first bus number after the last allocated bus in the rman and * enforce that as a minimum starting point for the range. */ if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || end_free != bus->sub) start_free = bus->sub + 1; if (start_free < start) start_free = start; new_end = start_free + count - 1; /* * See if this new range would satisfy the request if it * succeeds. */ if (new_end > end) return (NULL); /* Finally, attempt to grow the existing resource. */ if (bootverbose) { device_printf(bus->dev, "attempting to grow bus range for %ju buses\n", count); printf("\tback candidate range: %ju-%ju\n", start_free, new_end); } if (pcib_grow_subbus(bus, new_end) == 0) return (pcib_suballoc_bus(bus, child, rid, start, end, count, flags)); return (NULL); } #endif #else /* * Is the prefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_prefetch_open(struct pcib_softc *sc) { return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); } /* * Is the nonprefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_nonprefetch_open(struct pcib_softc *sc) { return (sc->membase > 0 && sc->membase < sc->memlimit); } /* * Is the io window open (eg, can we allocate ports in it?) */ static int pcib_is_io_open(struct pcib_softc *sc) { return (sc->iobase > 0 && sc->iobase < sc->iolimit); } /* * Get current I/O decode. */ static void pcib_get_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iolow; dev = sc->dev; iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iobase = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); else sc->iobase = PCI_PPBIOBASE(0, iolow); iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iolimit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); else sc->iolimit = PCI_PPBIOLIMIT(0, iolow); } /* * Get current memory decode. */ static void pcib_get_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemlow; dev = sc->dev; sc->membase = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->memlimit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmembase = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); else sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmemlimit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); else sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); } /* * Restore previous I/O decode. */ static void pcib_set_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iohi; dev = sc->dev; iohi = sc->iobase >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); iohi = sc->iolimit >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); } /* * Restore previous memory decode. */ static void pcib_set_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemhi; dev = sc->dev; pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); pmemhi = sc->pmembase >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); pmemhi = sc->pmemlimit >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); } #endif #ifdef PCI_HP /* * PCI-express HotPlug support. */ static int pci_enable_pcie_hp = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN, &pci_enable_pcie_hp, 0, "Enable support for native PCI-express HotPlug."); TASKQUEUE_DEFINE_THREAD(pci_hp); static void pcib_probe_hotplug(struct pcib_softc *sc) { device_t dev; uint32_t link_cap; uint16_t link_sta, slot_sta; if (!pci_enable_pcie_hp) return; dev = sc->dev; if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) return; if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) return; sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0) return; link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0) return; /* * Some devices report that they have an MRL when they actually * do not. Since they always report that the MRL is open, child * devices would be ignored. Try to detect these devices and * ignore their claim of HotPlug support. * * If there is an open MRL but the Data Link Layer is active, * the MRL is not real. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) { link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 && (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) { return; } } /* * Now that we're sure we want to do hot plug, ask the * firmware, if any, if that's OK. */ if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) { if (bootverbose) device_printf(dev, "Unable to activate hot plug feature.\n"); return; } sc->flags |= PCIB_HOTPLUG; } /* * Send a HotPlug command to the slot control register. If this slot * uses command completion interrupts and a previous command is still * in progress, then the command is dropped. Once the previous * command completes or times out, pcib_pcie_hotplug_update() will be * invoked to post a new command based on the slot's state at that * time. */ static void pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) { device_t dev; uint16_t ctl, new; dev = sc->dev; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) return; ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); new = (ctl & ~mask) | val; if (new == ctl) return; if (bootverbose) device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new); pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) && (ctl & new) & PCIEM_SLOT_CTL_CCIE) { sc->flags |= PCIB_HOTPLUG_CMD_PENDING; if (!cold) taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, hz); } } static void pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) { device_t dev; dev = sc->dev; if (bootverbose) device_printf(dev, "Command Completed\n"); if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) return; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; wakeup(sc); } /* * Returns true if a card is fully inserted from the user's * perspective. It may not yet be ready for access, but the driver * can now start enabling access if necessary. */ static bool pcib_hotplug_inserted(struct pcib_softc *sc) { /* Pretend the card isn't present if a detach is forced. */ if (sc->flags & PCIB_DETACHING) return (false); /* Card must be present in the slot. */ if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) return (false); /* A power fault implicitly turns off power to the slot. */ if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) return (false); /* If the MRL is disengaged, the slot is powered off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) return (false); return (true); } /* * Returns -1 if the card is fully inserted, powered, and ready for * access. Otherwise, returns 0. */ static int pcib_hotplug_present(struct pcib_softc *sc) { /* Card must be inserted. */ if (!pcib_hotplug_inserted(sc)) return (0); /* Require the Data Link Layer to be active. */ if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) return (0); return (-1); } static int pci_enable_pcie_ei = 0; SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_ei, CTLFLAG_RWTUN, &pci_enable_pcie_ei, 0, "Enable support for PCI-express Electromechanical Interlock."); static void pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, bool schedule_task) { bool card_inserted, ei_engaged; /* Clear DETACHING if Presence Detect has cleared. */ if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == PCIEM_SLOT_STA_PDC) sc->flags &= ~PCIB_DETACHING; card_inserted = pcib_hotplug_inserted(sc); /* Turn the power indicator on if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { mask |= PCIEM_SLOT_CTL_PIC; if (card_inserted) val |= PCIEM_SLOT_CTL_PI_ON; else if (sc->flags & PCIB_DETACH_PENDING) val |= PCIEM_SLOT_CTL_PI_BLINK; else val |= PCIEM_SLOT_CTL_PI_OFF; } /* Turn the power on via the Power Controller if a card is inserted. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { mask |= PCIEM_SLOT_CTL_PCC; if (card_inserted) val |= PCIEM_SLOT_CTL_PC_ON; else val |= PCIEM_SLOT_CTL_PC_OFF; } /* * If a card is inserted, enable the Electromechanical * Interlock. If a card is not inserted (or we are in the * process of detaching), disable the Electromechanical * Interlock. */ if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) && pci_enable_pcie_ei) { mask |= PCIEM_SLOT_CTL_EIC; ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0; if (card_inserted != ei_engaged) val |= PCIEM_SLOT_CTL_EIC; } /* * Start a timer to see if the Data Link Layer times out. * Note that we only start the timer if Presence Detect or MRL Sensor * changed on this interrupt. Stop any scheduled timer if * the Data Link Layer is active. */ if (card_inserted && !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && sc->pcie_slot_sta & (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) { if (cold) device_printf(sc->dev, "Data Link Layer inactive\n"); else taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, hz); } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, NULL); pcib_pcie_hotplug_command(sc, val, mask); /* * During attach the child "pci" device is added synchronously; * otherwise, the task is scheduled to manage the child * device. */ if (schedule_task && (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task); } static void pcib_pcie_intr_hotplug(void *arg) { struct pcib_softc *sc; device_t dev; uint16_t old_slot_sta; sc = arg; dev = sc->dev; PCIB_HP_LOCK(sc); old_slot_sta = sc->pcie_slot_sta; sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear the events just reported. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); if (bootverbose) device_printf(dev, "HotPlug interrupt: %#x\n", sc->pcie_slot_sta); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { if (sc->flags & PCIB_DETACH_PENDING) { device_printf(dev, "Attention Button Pressed: Detach Cancelled\n"); sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) { /* Only initiate detach sequence if device present. */ device_printf(dev, "Attention Button Pressed: Detaching in 5 seconds\n"); sc->flags |= PCIB_DETACH_PENDING; taskqueue_enqueue_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, 5 * hz); } } if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) device_printf(dev, "Power Fault Detected\n"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) device_printf(dev, "MRL Sensor Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : "closed"); if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) device_printf(dev, "Presence Detect Changed to %s\n", sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : "empty"); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) pcib_pcie_hotplug_command_completed(sc); if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (bootverbose) device_printf(dev, "Data Link Layer State Changed to %s\n", sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? "active" : "inactive"); } pcib_pcie_hotplug_update(sc, 0, 0, true); PCIB_HP_UNLOCK(sc); } static void pcib_pcie_hotplug_task(void *context, int pending) { struct pcib_softc *sc; device_t dev; sc = context; PCIB_HP_LOCK(sc); dev = sc->dev; if (pcib_hotplug_present(sc) != 0) { if (sc->child == NULL) { sc->child = device_add_child(dev, "pci", -1); bus_generic_attach(dev); } } else { if (sc->child != NULL) { if (device_delete_child(dev, sc->child) == 0) sc->child = NULL; } } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_ab_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; PCIB_HP_LOCK(sc); if (sc->flags & PCIB_DETACH_PENDING) { sc->flags |= PCIB_DETACHING; sc->flags &= ~PCIB_DETACH_PENDING; pcib_pcie_hotplug_update(sc, 0, 0, true); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_cc_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); if (!(sta & PCIEM_SLOT_STA_CC)) { device_printf(dev, "HotPlug Command Timed Out\n"); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } else { device_printf(dev, "Missed HotPlug interrupt waiting for Command Completion\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static void pcib_pcie_dll_timeout(void *arg, int pending) { struct pcib_softc *sc = arg; device_t dev = sc->dev; uint16_t sta; PCIB_HP_LOCK(sc); sta = pcie_read_config(dev, PCIER_LINK_STA, 2); if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { device_printf(dev, "Timed out waiting for Data Link Layer Active\n"); sc->flags |= PCIB_DETACHING; pcib_pcie_hotplug_update(sc, 0, 0, true); } else if (sta != sc->pcie_link_sta) { device_printf(dev, "Missed HotPlug interrupt waiting for DLL Active\n"); pcib_pcie_intr_hotplug(sc); } PCIB_HP_UNLOCK(sc); } static int pcib_alloc_pcie_irq(struct pcib_softc *sc) { device_t dev; int count, error, mem_rid, rid; rid = -1; dev = sc->dev; /* * For simplicity, only use MSI-X if there is a single message. * To support a device with multiple messages we would have to * use remap intr if the MSI number is not 0. */ count = pci_msix_count(dev); if (count == 1) { mem_rid = pci_msix_table_bar(dev); sc->pcie_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, RF_ACTIVE); if (sc->pcie_mem == NULL) { device_printf(dev, "Failed to allocate BAR for MSI-X table\n"); } else { error = pci_alloc_msix(dev, &count); if (error == 0) rid = 1; } } if (rid < 0 && pci_msi_count(dev) > 0) { count = 1; error = pci_alloc_msi(dev, &count); if (error == 0) rid = 1; } if (rid < 0) rid = 0; sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (sc->pcie_irq == NULL) { device_printf(dev, "Failed to allocate interrupt for PCI-e events\n"); if (rid > 0) pci_release_msi(dev); return (ENXIO); } error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE, NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand); if (error) { device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); if (rid > 0) pci_release_msi(dev); return (error); } return (0); } static int pcib_release_pcie_irq(struct pcib_softc *sc) { device_t dev; int error; dev = sc->dev; error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand); if (error) return (error); error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq); if (error) return (error); error = pci_release_msi(dev); if (error) return (error); if (sc->pcie_mem != NULL) error = bus_free_resource(dev, SYS_RES_MEMORY, sc->pcie_mem); return (error); } static void pcib_setup_hotplug(struct pcib_softc *sc) { device_t dev; uint16_t mask, val; dev = sc->dev; TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0, pcib_pcie_ab_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0, pcib_pcie_cc_timeout, sc); TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0, pcib_pcie_dll_timeout, sc); sc->pcie_hp_lock = bus_topo_mtx(); /* Allocate IRQ. */ if (pcib_alloc_pcie_irq(sc) != 0) return; sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); /* Clear any events previously pending. */ pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); /* Enable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) val |= PCIEM_SLOT_CTL_ABPE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) val |= PCIEM_SLOT_CTL_PFDE; if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) val |= PCIEM_SLOT_CTL_MRLSCE; if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) val |= PCIEM_SLOT_CTL_CCIE; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); } static int pcib_detach_hotplug(struct pcib_softc *sc) { uint16_t mask, val; int error; /* Disable the card in the slot and force it to detach. */ if (sc->flags & PCIB_DETACH_PENDING) { sc->flags &= ~PCIB_DETACH_PENDING; taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, NULL); } sc->flags |= PCIB_DETACHING; if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); tsleep(sc, 0, "hpcmd", hz); sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; } /* Disable HotPlug events. */ mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; val = 0; /* Turn the attention indicator off. */ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { mask |= PCIEM_SLOT_CTL_AIC; val |= PCIEM_SLOT_CTL_AI_OFF; } pcib_pcie_hotplug_update(sc, val, mask, false); error = pcib_release_pcie_irq(sc); if (error) return (error); taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task); taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task); return (0); } #endif /* * Get current bridge configuration. */ static void pcib_cfg_save(struct pcib_softc *sc) { #ifndef NEW_PCIB device_t dev; uint16_t command; dev = sc->dev; command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_get_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_get_mem_decode(sc); #endif } /* * Restore previous bridge configuration. */ static void pcib_cfg_restore(struct pcib_softc *sc) { #ifndef NEW_PCIB uint16_t command; #endif #ifdef NEW_PCIB pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); #else command = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_set_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_set_mem_decode(sc); #endif } /* * Generic device interface */ static int pcib_probe(device_t dev) { if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { device_set_desc(dev, "PCI-PCI bridge"); return(-10000); } return(ENXIO); } void pcib_attach_common(device_t dev) { struct pcib_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int comma; sc = device_get_softc(dev); sc->dev = dev; /* * Get current bridge configuration. */ sc->domain = pci_get_domain(dev); #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); #endif sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); pcib_cfg_save(sc); /* * The primary bus register should always be the bus of the * parent. */ sc->pribus = pci_get_bus(dev); pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); /* * Setup sysctl reporting nodes */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); /* * Quirk handling. */ switch (pci_get_devid(dev)) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) case 0x12258086: /* Intel 82454KX/GX (Orion) */ { uint8_t supbus; supbus = pci_read_config(dev, 0x41, 1); if (supbus != 0xff) { sc->bus.sec = supbus + 1; sc->bus.sub = supbus + 1; } break; } #endif /* * The i82380FB mobile docking controller is a PCI-PCI bridge, * and it is a subtractive bridge. However, the ProgIf is wrong * so the normal setting of PCIB_SUBTRACTIVE bit doesn't * happen. There are also Toshiba and Cavium ThunderX bridges * that behave this way. */ case 0xa002177d: /* Cavium ThunderX */ case 0x124b8086: /* Intel 82380FB Mobile */ case 0x060513d7: /* Toshiba ???? */ sc->flags |= PCIB_SUBTRACTIVE; break; #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* Compaq R3000 BIOS sets wrong subordinate bus number. */ case 0x00dd10de: { char *cp; if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sc->bus.sub < 0xa) { pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); } break; } #endif } if (pci_msi_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSI; if (pci_msix_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSIX; /* * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. * This means they act as if they were subtractively decoding * bridges and pass all transactions. Mark them and real ProgIf 1 * parts as subtractive. */ if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) sc->flags |= PCIB_SUBTRACTIVE; #ifdef PCI_HP pcib_probe_hotplug(sc); #endif #ifdef NEW_PCIB #ifdef PCI_RES_BUS pcib_setup_secbus(dev, &sc->bus, 1); #endif pcib_probe_windows(sc); #endif #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) pcib_setup_hotplug(sc); #endif if (bootverbose) { device_printf(dev, " domain %d\n", sc->domain); device_printf(dev, " secondary bus %d\n", sc->bus.sec); device_printf(dev, " subordinate bus %d\n", sc->bus.sub); #ifdef NEW_PCIB if (pcib_is_window_open(&sc->io)) device_printf(dev, " I/O decode 0x%jx-0x%jx\n", (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); if (pcib_is_window_open(&sc->mem)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); if (pcib_is_window_open(&sc->pmem)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); #else if (pcib_is_io_open(sc)) device_printf(dev, " I/O decode 0x%x-0x%x\n", sc->iobase, sc->iolimit); if (pcib_is_nonprefetch_open(sc)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); if (pcib_is_prefetch_open(sc)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); #endif if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || sc->flags & PCIB_SUBTRACTIVE) { device_printf(dev, " special decode "); comma = 0; if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { printf("ISA"); comma = 1; } if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { printf("%sVGA", comma ? ", " : ""); comma = 1; } if (sc->flags & PCIB_SUBTRACTIVE) printf("%ssubtractive", comma ? ", " : ""); printf("\n"); } } /* * Always enable busmastering on bridges so that transactions * initiated on the secondary bus are passed through to the * primary bus. */ pci_enable_busmaster(dev); } #ifdef PCI_HP static int pcib_present(struct pcib_softc *sc) { if (sc->flags & PCIB_HOTPLUG) return (pcib_hotplug_present(sc) != 0); return (1); } #endif int pcib_attach_child(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->bus.sec == 0) { /* no secondary bus; we should have fixed this */ return(0); } #ifdef PCI_HP if (!pcib_present(sc)) { /* An empty HotPlug slot, so don't add a PCI bus yet. */ return (0); } #endif sc->child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } int pcib_attach(device_t dev) { pcib_attach_common(dev); return (pcib_attach_child(dev)); } int pcib_detach(device_t dev) { #if defined(PCI_HP) || defined(NEW_PCIB) struct pcib_softc *sc; #endif int error; #if defined(PCI_HP) || defined(NEW_PCIB) sc = device_get_softc(dev); #endif error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_HP if (sc->flags & PCIB_HOTPLUG) { error = pcib_detach_hotplug(sc); if (error) return (error); } #endif error = device_delete_children(dev); if (error) return (error); #ifdef NEW_PCIB pcib_free_windows(sc); #ifdef PCI_RES_BUS pcib_free_secbus(dev, &sc->bus); #endif #endif return (0); } int pcib_suspend(device_t dev) { pcib_cfg_save(device_get_softc(dev)); return (bus_generic_suspend(dev)); } int pcib_resume(device_t dev) { pcib_cfg_restore(device_get_softc(dev)); /* * Restore the Command register only after restoring the windows. * The bridge should not be claiming random windows. */ pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2); return (bus_generic_resume(dev)); } void pcib_bridge_init(device_t dev) { pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); } int pcib_child_present(device_t dev, device_t child) { #ifdef PCI_HP struct pcib_softc *sc = device_get_softc(dev); int retval; retval = bus_child_present(dev); if (retval != 0 && sc->flags & PCIB_HOTPLUG) retval = pcib_hotplug_present(sc); return (retval); #else return (bus_child_present(dev)); #endif } int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return(0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return(0); } return(ENOENT); } int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return(EINVAL); case PCIB_IVAR_BUS: return(EINVAL); } return(ENOENT); } #ifdef NEW_PCIB /* * Attempt to allocate a resource from the existing resources assigned * to a window. */ static struct resource * pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (!pcib_is_window_open(w)) return (NULL); res = rman_reserve_resource(&w->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(sc->dev, "allocated %s range (%#jx-%#jx) for rid %x of %s\n", w->name, rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); rman_set_type(res, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } /* Allocate a fresh resource range for an unconfigured window. */ static int pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; rman_res_t base, limit, wmask; int rid; /* * If this is an I/O window on a bridge with ISA enable set * and the start address is below 64k, then try to allocate an * initial window of 0x1000 bytes long starting at address * 0xf000 and walking down. Note that if the original request * was larger than the non-aliased range size of 0x100 our * caller would have raised the start address up to 64k * already. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && start < 65536) { for (base = 0xf000; (long)base >= 0; base -= 0x1000) { limit = base + 0xfff; /* * Skip ranges that wouldn't work for the * original request. Note that the actual * window that overlaps are the non-alias * ranges within [base, limit], so this isn't * quite a simple comparison. */ if (start + count > limit - 0x400) continue; if (base == 0) { /* * The first open region for the window at * 0 is 0x400-0x4ff. */ if (end - count + 1 < 0x400) continue; } else { if (end - count + 1 < base) continue; } if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { w->base = base; w->limit = limit; return (0); } } return (ENOSPC); } wmask = ((rman_res_t)1 << w->step) - 1; if (RF_ALIGNMENT(flags) < w->step) { flags &= ~RF_ALIGNMENT_MASK; flags |= RF_ALIGNMENT_LOG2(w->step); } start &= ~wmask; end |= wmask; count = roundup2(count, (rman_res_t)1 << w->step); rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, flags | RF_ACTIVE | RF_UNMAPPED); if (res == NULL) return (ENOSPC); pcib_add_window_resources(w, &res, 1); pcib_activate_window(sc, type); w->base = rman_get_start(res); w->limit = rman_get_end(res); return (0); } /* Try to expand an existing window to the requested base and limit. */ static int pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t base, rman_res_t limit) { struct resource *res; int error, i, force_64k_base; KASSERT(base <= w->base && limit >= w->limit, ("attempting to shrink window")); /* * XXX: pcib_grow_window() doesn't try to do this anyway and * the error handling for all the edge cases would be tedious. */ KASSERT(limit == w->limit || base == w->base, ("attempting to grow both ends of a window")); /* * Yet more special handling for requests to expand an I/O * window behind an ISA-enabled bridge. Since I/O windows * have to grow in 0x1000 increments and the end of the 0xffff * range is an alias, growing a window below 64k will always * result in allocating new resources and never adjusting an * existing resource. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && (limit <= 65535 || (base <= 65535 && base != w->base))) { KASSERT(limit == w->limit || limit <= 65535, ("attempting to grow both ends across 64k ISA alias")); if (base != w->base) error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); else error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, limit); if (error == 0) { w->base = base; w->limit = limit; } return (error); } /* * Find the existing resource to adjust. Usually there is only one, * but for an ISA-enabled bridge we might be growing the I/O window * above 64k and need to find the existing resource that maps all * of the area above 64k. */ for (i = 0; i < w->count; i++) { if (rman_get_end(w->res[i]) == w->limit) break; } KASSERT(i != w->count, ("did not find existing resource")); res = w->res[i]; /* * Usually the resource we found should match the window's * existing range. The one exception is the ISA-enabled case * mentioned above in which case the resource should start at * 64k. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && w->base <= 65535) { KASSERT(rman_get_start(res) == 65536, ("existing resource mismatch")); force_64k_base = 1; } else { KASSERT(w->base == rman_get_start(res), ("existing resource mismatch")); force_64k_base = 0; } error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : base, limit); if (error) return (error); /* Add the newly allocated region to the resource manager. */ if (w->base != base) { error = rman_manage_region(&w->rman, base, w->base - 1); w->base = base; } else { error = rman_manage_region(&w->rman, w->limit + 1, limit); w->limit = limit; } if (error) { if (bootverbose) device_printf(sc->dev, "failed to expand %s resource manager\n", w->name); (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : w->base, w->limit); } return (error); } /* * Attempt to grow a window to make room for a given resource request. */ static int pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t align, start_free, end_free, front, back, wmask; int error; /* * Clamp the desired resource range to the maximum address * this window supports. Reject impossible requests. * * For I/O port requests behind a bridge with the ISA enable * bit set, force large allocations to start above 64k. */ if (!w->valid) return (EINVAL); if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && start < 65536) start = 65536; if (end > w->rman.rm_end) end = w->rman.rm_end; if (start + count - 1 > end || start + count < start) return (EINVAL); wmask = ((rman_res_t)1 << w->step) - 1; /* * If there is no resource at all, just try to allocate enough * aligned space for this resource. */ if (w->res == NULL) { error = pcib_alloc_new_window(sc, w, type, start, end, count, flags); if (error) { if (bootverbose) device_printf(sc->dev, "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", w->name, start, end, count); return (error); } if (bootverbose) device_printf(sc->dev, "allocated initial %s window of %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); goto updatewin; } /* * See if growing the window would help. Compute the minimum * amount of address space needed on both the front and back * ends of the existing window to satisfy the allocation. * * For each end, build a candidate region adjusting for the * required alignment, etc. If there is a free region at the * edge of the window, grow from the inner edge of the free * region. Otherwise grow from the window boundary. * * Growing an I/O window below 64k for a bridge with the ISA * enable bit doesn't require any special magic as the step * size of an I/O window (1k) always includes multiple * non-alias ranges when it is grown in either direction. * * XXX: Special case: if w->res is completely empty and the * request size is larger than w->res, we should find the * optimal aligned buffer containing w->res and allocate that. */ if (bootverbose) device_printf(sc->dev, "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", w->name, start, end, count); align = (rman_res_t)1 << RF_ALIGNMENT(flags); if (start < w->base) { if (rman_first_free_region(&w->rman, &start_free, &end_free) != 0 || start_free != w->base) end_free = w->base; if (end_free > end) end_free = end + 1; /* Move end_free down until it is properly aligned. */ end_free &= ~(align - 1); end_free--; front = end_free - (count - 1); /* * The resource would now be allocated at (front, * end_free). Ensure that fits in the (start, end) * bounds. end_free is checked above. If 'front' is * ok, ensure it is properly aligned for this window. * Also check for underflow. */ if (front >= start && front <= end_free) { if (bootverbose) printf("\tfront candidate range: %#jx-%#jx\n", front, end_free); front &= ~wmask; front = w->base - front; } else front = 0; } else front = 0; if (end > w->limit) { if (rman_last_free_region(&w->rman, &start_free, &end_free) != 0 || end_free != w->limit) start_free = w->limit + 1; if (start_free < start) start_free = start; /* Move start_free up until it is properly aligned. */ start_free = roundup2(start_free, align); back = start_free + count - 1; /* * The resource would now be allocated at (start_free, * back). Ensure that fits in the (start, end) * bounds. start_free is checked above. If 'back' is * ok, ensure it is properly aligned for this window. * Also check for overflow. */ if (back <= end && start_free <= back) { if (bootverbose) printf("\tback candidate range: %#jx-%#jx\n", start_free, back); back |= wmask; back -= w->limit; } else back = 0; } else back = 0; /* * Try to allocate the smallest needed region first. * If that fails, fall back to the other region. */ error = ENOSPC; while (front != 0 || back != 0) { if (front != 0 && (front <= back || back == 0)) { error = pcib_expand_window(sc, w, type, w->base - front, w->limit); if (error == 0) break; front = 0; } else { error = pcib_expand_window(sc, w, type, w->base, w->limit + back); if (error == 0) break; back = 0; } } if (error) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); updatewin: /* Write the new window. */ KASSERT((w->base & wmask) == 0, ("start address is not aligned")); KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); pcib_write_windows(sc, w->mask); return (0); } /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc; struct resource *r; sc = device_get_softc(dev); /* * VGA resources are decoded iff the VGA enable bit is set in * the bridge control register. VGA resources do not fall into * the resource windows and are passed up to the parent. */ if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); else return (NULL); } switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: if (pcib_is_isa_range(sc, start, end, count)) return (NULL); r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (pcib_grow_window(sc, &sc->io, type, start, end, count, flags) == 0) r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); break; case SYS_RES_MEMORY: /* * For prefetchable resources, prefer the prefetchable * memory window, but fall back to the regular memory * window if that fails. Try both windows before * attempting to grow a window in case the firmware * has used a range in the regular memory window to * map a prefetchable BAR. */ if (flags & RF_PREFETCHABLE) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (flags & RF_PREFETCHABLE) { if (pcib_grow_window(sc, &sc->pmem, type, start, end, count, flags) == 0) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } } if (pcib_grow_window(sc, &sc->mem, type, start, end, count, flags & ~RF_PREFETCHABLE) == 0) r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); break; default: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } /* * If attempts to suballocate from the window fail but this is a * subtractive bridge, pass the request up the tree. */ if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (r); } static int pcib_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct pcib_softc *sc; struct pcib_window *w; rman_res_t wmask; int error, type; sc = device_get_softc(bus); type = rman_get_type(r); /* * If the resource wasn't sub-allocated from one of our region * managers then just pass the request up. */ if (!pcib_is_resource_managed(sc, r)) return (bus_generic_adjust_resource(bus, child, r, start, end)); #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) { /* * If our bus range isn't big enough to grow the sub-allocation * then we need to grow our bus range. Any request that would * require us to decrease the start of our own bus range is * invalid, we can only extend the end; ignore such requests * and let rman_adjust_resource fail below. */ if (start >= sc->bus.sec && end > sc->bus.sub) { error = pcib_grow_subbus(&sc->bus, end); if (error != 0) return (error); } } else #endif { /* * Resource is managed and not a secondary bus number, must * be from one of our windows. */ w = pcib_get_resource_window(sc, r); KASSERT(w != NULL, ("%s: no window for resource (%#jx-%#jx) type %d", __func__, rman_get_start(r), rman_get_end(r), type)); /* * If our window isn't big enough to grow the sub-allocation * then we need to expand the window. */ if (start < w->base || end > w->limit) { wmask = ((rman_res_t)1 << w->step) - 1; error = pcib_expand_window(sc, w, type, MIN(start & ~wmask, w->base), MAX(end | wmask, w->limit)); if (error != 0) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); pcib_write_windows(sc, w->mask); } } return (rman_adjust_resource(r, start, end)); } static int -pcib_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +pcib_release_resource(device_t dev, device_t child, struct resource *r) { struct pcib_softc *sc; int error; sc = device_get_softc(dev); if (pcib_is_resource_managed(sc, r)) { if (rman_get_flags(r) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, rid, r); + error = bus_deactivate_resource(child, r); if (error) return (error); } return (rman_release_resource(r)); } - return (bus_generic_release_resource(dev, child, type, rid, r)); + return (bus_generic_release_resource(dev, child, r)); } static int pcib_activate_resource(device_t dev, device_t child, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; int error, type; if (!pcib_is_resource_managed(sc, r)) return (bus_generic_activate_resource(dev, child, r)); error = rman_activate_resource(r); if (error != 0) return (error); type = rman_get_type(r); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } static int pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map map; int error, type; if (!pcib_is_resource_managed(sc, r)) return (bus_generic_deactivate_resource(dev, child, r)); error = rman_deactivate_resource(r); if (error != 0) return (error); type = rman_get_type(r); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, r, &map); } return (0); } static struct resource * pcib_find_parent_resource(struct pcib_window *w, struct resource *r) { for (int i = 0; i < w->count; i++) { if (rman_get_start(w->res[i]) <= rman_get_start(r) && rman_get_end(w->res[i]) >= rman_get_end(r)) return (w->res[i]); } return (NULL); } static int pcib_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct resource_map_request args; struct pcib_window *w; struct resource *pres; rman_res_t length, start; int error; w = pcib_get_resource_window(sc, r); if (w == NULL) return (bus_generic_map_resource(dev, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = pcib_find_parent_resource(w, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, pres, &args, map)); } static int pcib_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct pcib_softc *sc = device_get_softc(dev); struct pcib_window *w; w = pcib_get_resource_window(sc, r); if (w != NULL) { r = pcib_find_parent_resource(w, r); if (r == NULL) return (ENOENT); } return (bus_generic_unmap_resource(dev, child, r, map)); } #else /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ static struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pcib_softc *sc = device_get_softc(dev); const char *name, *suffix; int ok; /* * Fail the allocation for this range if it's not supported. */ name = device_get_nameunit(child); if (name == NULL) { name = ""; suffix = ""; } else suffix = " "; switch (type) { case SYS_RES_IOPORT: ok = 0; if (!pcib_is_io_open(sc)) break; ok = (start >= sc->iobase && end <= sc->iolimit); /* * Make sure we allow access to VGA I/O addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_ioport_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { if (start < sc->iobase) start = sc->iobase; if (end > sc->iolimit) end = sc->iolimit; if (start < end) ok = 1; } } else { ok = 1; #if 0 /* * If we overlap with the subtractive range, then * pick the upper range to use. */ if (start < sc->iolimit && end > sc->iobase) start = sc->iolimit + 1; #endif } if (end < start) { device_printf(dev, "ioport: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok) { device_printf(dev, "%s%srequested unsupported I/O " "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", name, suffix, start, end, sc->iobase, sc->iolimit); return (NULL); } if (bootverbose) device_printf(dev, "%s%srequested I/O range 0x%jx-0x%jx: in range\n", name, suffix, start, end); break; case SYS_RES_MEMORY: ok = 0; if (pcib_is_nonprefetch_open(sc)) ok = ok || (start >= sc->membase && end <= sc->memlimit); if (pcib_is_prefetch_open(sc)) ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); /* * Make sure we allow access to VGA memory addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_memory_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { ok = 1; if (flags & RF_PREFETCHABLE) { if (pcib_is_prefetch_open(sc)) { if (start < sc->pmembase) start = sc->pmembase; if (end > sc->pmemlimit) end = sc->pmemlimit; } else { ok = 0; } } else { /* non-prefetchable */ if (pcib_is_nonprefetch_open(sc)) { if (start < sc->membase) start = sc->membase; if (end > sc->memlimit) end = sc->memlimit; } else { ok = 0; } } } } else if (!ok) { ok = 1; /* subtractive bridge: always ok */ #if 0 if (pcib_is_nonprefetch_open(sc)) { if (start < sc->memlimit && end > sc->membase) start = sc->memlimit + 1; } if (pcib_is_prefetch_open(sc)) { if (start < sc->pmemlimit && end > sc->pmembase) start = sc->pmemlimit + 1; } #endif } if (end < start) { device_printf(dev, "memory: end (%jx) < start (%jx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok && bootverbose) device_printf(dev, "%s%srequested unsupported memory range %#jx-%#jx " "(decoding %#jx-%#jx, %#jx-%#jx)\n", name, suffix, start, end, (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); if (!ok) return (NULL); if (bootverbose) device_printf(dev,"%s%srequested memory range " "0x%jx-0x%jx: good\n", name, suffix, start, end); break; default: break; } /* * Bridge is OK decoding this resource, so pass it up. */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif /* * If ARI is enabled on this downstream port, translate the function number * to the non-ARI slot/function. The downstream port will convert it back in * hardware. If ARI is not enabled slot and func are not modified. */ static __inline void pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) { struct pcib_softc *sc; int ari_func; sc = device_get_softc(pcib); ari_func = *func; if (sc->flags & PCIB_ENABLE_ARI) { KASSERT(*slot == 0, ("Non-zero slot number with ARI enabled!")); *slot = PCIE_ARI_SLOT(ari_func); *func = PCIE_ARI_FUNC(ari_func); } } static void pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) { uint32_t ctl2; ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); ctl2 |= PCIEM_CTL2_ARI; pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); sc->flags |= PCIB_ENABLE_ARI; } /* * PCIB interface. */ int pcib_maxslots(device_t dev) { #if !defined(__amd64__) && !defined(__i386__) uint32_t pcie_pos; uint16_t val; /* * If this is a PCIe rootport or downstream switch port, there's only * one slot permitted. */ if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) { val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2); val &= PCIEM_FLAGS_TYPE; if (val == PCIEM_TYPE_ROOT_PORT || val == PCIEM_TYPE_DOWNSTREAM_PORT) return (0); } #endif return (PCI_SLOTMAX); } static int pcib_ari_maxslots(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_SLOTMAX); else return (pcib_maxslots(dev)); } static int pcib_ari_maxfuncs(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_FUNCMAX); else return (PCI_FUNCMAX); } static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func) { struct pcib_softc *sc; sc = device_get_softc(pcib); *bus = PCI_RID2BUS(rid); if (sc->flags & PCIB_ENABLE_ARI) { *slot = PCIE_ARI_RID2SLOT(rid); *func = PCIE_ARI_RID2FUNC(rid); } else { *slot = PCI_RID2SLOT(rid); *func = PCI_RID2FUNC(rid); } } /* * Since we are a child of a PCI bus, its parent must support the pcib interface. */ static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) { switch (width) { case 2: return (0xffff); case 1: return (0xff); default: return (0xffffffff); } } #endif pcib_xlate_ari(dev, b, &s, &f); return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width)); } static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { #ifdef PCI_HP struct pcib_softc *sc; sc = device_get_softc(dev); if (!pcib_present(sc)) return; #endif pcib_xlate_ari(dev, b, &s, &f); PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width); } /* * Route an interrupt across a PCI bridge. */ int pcib_route_interrupt(device_t pcib, device_t dev, int pin) { device_t bus; int parent_intpin; int intnum; /* * * The PCI standard defines a swizzle of the child-side device/intpin to * the parent-side intpin as follows. * * device = device on child bus * child_intpin = intpin on child bus slot (0-3) * parent_intpin = intpin on parent bus slot (0-3) * * parent_intpin = (device + child_intpin) % 4 */ parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; /* * Our parent is a PCI bus. Its parent must export the pcib interface * which includes the ability to route interrupts. */ bus = device_get_parent(pcib); intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { device_printf(pcib, "slot %d INT%c is routed to irq %d\n", pci_get_slot(dev), 'A' + pin - 1, intnum); } return(intnum); } /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSI) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ int pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to release an MSI-X message up to the parent bridge. */ int pcib_release_msix(device_t pcib, device_t dev, int irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to map MSI/MSI-X message up to parent bridge. */ int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); pci_ht_map_msi(pcib, *addr); return (0); } /* Pass request for device power state up to parent bridge. */ int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t bus; bus = device_get_parent(pcib); return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); } static int pcib_ari_enabled(device_t pcib) { struct pcib_softc *sc; sc = device_get_softc(pcib); return ((sc->flags & PCIB_ENABLE_ARI) != 0); } static int pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id) { struct pcib_softc *sc; device_t bus_dev; uint8_t bus, slot, func; if (type != PCI_ID_RID) { bus_dev = device_get_parent(pcib); return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id)); } sc = device_get_softc(pcib); if (sc->flags & PCIB_ENABLE_ARI) { bus = pci_get_bus(dev); func = pci_get_function(dev); *id = (PCI_ARI_RID(bus, func)); } else { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); *id = (PCI_RID(bus, slot, func)); } return (0); } /* * Check that the downstream port (pcib) and the endpoint device (dev) both * support ARI. If so, enable it and return 0, otherwise return an error. */ static int pcib_try_enable_ari(device_t pcib, device_t dev) { struct pcib_softc *sc; int error; uint32_t cap2; int ari_cap_off; uint32_t ari_ver; uint32_t pcie_pos; sc = device_get_softc(pcib); /* * ARI is controlled in a register in the PCIe capability structure. * If the downstream port does not have the PCIe capability structure * then it does not support ARI. */ error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); if (error != 0) return (ENODEV); /* Check that the PCIe port advertises ARI support. */ cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); if (!(cap2 & PCIEM_CAP2_ARI)) return (ENODEV); /* * Check that the endpoint device advertises ARI support via the ARI * extended capability structure. */ error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); if (error != 0) return (ENODEV); /* * Finally, check that the endpoint device supports the same version * of ARI that we do. */ ari_ver = pci_read_config(dev, ari_cap_off, 4); if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { if (bootverbose) device_printf(pcib, "Unsupported version of ARI (%d) detected\n", PCI_EXTCAP_VER(ari_ver)); return (ENXIO); } pcib_enable_ari(sc, pcie_pos); return (0); } int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature) { /* * No host firmware we have to negotiate with, so we allow * every valid feature requested. */ switch (feature) { case PCI_FEATURE_AER: case PCI_FEATURE_HP: break; default: return (EINVAL); } return (0); } int pcib_request_feature(device_t dev, enum pci_feature feature) { /* * Invoke PCIB_REQUEST_FEATURE of this bridge first in case * the firmware overrides the method of PCI-PCI bridges. */ return (PCIB_REQUEST_FEATURE(dev, dev, feature)); } /* * Pass the request to use this PCI feature up the tree. Either there's a * firmware like ACPI that's using this feature that will approve (or deny) the * request to take it over, or the platform has no such firmware, in which case * the request will be approved. If the request is approved, the OS is expected * to make use of the feature or render it harmless. */ static int pcib_request_feature_default(device_t pcib, device_t dev, enum pci_feature feature) { device_t bus; /* * Our parent is necessarily a pci bus. Its parent will either be * another pci bridge (which passes it up) or a host bridge that can * approve or reject the request. */ bus = device_get_parent(pcib); return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature)); } static int pcib_reset_child(device_t dev, device_t child, int flags) { struct pci_devinfo *pdinfo; int error; error = 0; if (dev == NULL || device_get_parent(child) != dev) goto out; error = ENXIO; if (device_get_devclass(child) != devclass_find("pci")) goto out; pdinfo = device_get_ivars(dev); if (pdinfo->cfg.pcie.pcie_location != 0 && (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT || pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) { error = bus_helper_reset_prepare(child, flags); if (error == 0) { error = pcie_link_reset(dev, pdinfo->cfg.pcie.pcie_location); /* XXXKIB call _post even if error != 0 ? */ bus_helper_reset_post(child, flags); } } out: return (error); } diff --git a/sys/dev/pci/pci_private.h b/sys/dev/pci/pci_private.h index 6bec699bff2b..6645488d4929 100644 --- a/sys/dev/pci/pci_private.h +++ b/sys/dev/pci/pci_private.h @@ -1,187 +1,187 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _PCI_PRIVATE_H_ #define _PCI_PRIVATE_H_ /* * Export definitions of the pci bus so that we can more easily share * it with "subclass" buses. */ DECLARE_CLASS(pci_driver); struct pci_softc { bus_dma_tag_t sc_dma_tag; #ifdef PCI_RES_BUS struct resource *sc_bus; #endif }; extern int pci_do_power_resume; extern int pci_do_power_suspend; void pci_add_children(device_t dev, int domain, int busno); void pci_add_child(device_t bus, struct pci_devinfo *dinfo); device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask); void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov); struct pci_devinfo *pci_alloc_devinfo_method(device_t dev); int pci_attach(device_t dev); int pci_attach_common(device_t dev); int pci_detach(device_t dev); int pci_rescan_method(device_t dev); void pci_driver_added(device_t dev, driver_t *driver); int pci_ea_is_enabled(device_t dev, int rid); int pci_print_child(device_t dev, device_t child); void pci_probe_nomatch(device_t dev, device_t child); int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr); int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr); int pci_set_powerstate_method(device_t dev, device_t child, int state); int pci_get_powerstate_method(device_t dev, device_t child); uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width); void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width); int pci_enable_busmaster_method(device_t dev, device_t child); int pci_disable_busmaster_method(device_t dev, device_t child); int pci_enable_io_method(device_t dev, device_t child, int space); int pci_disable_io_method(device_t dev, device_t child, int space); int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_alloc_msi_method(device_t dev, device_t child, int *count); int pci_alloc_msix_method(device_t dev, device_t child, int *count); void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data); void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data); void pci_disable_msi_method(device_t dev, device_t child); int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors); int pci_release_msi_method(device_t dev, device_t child); int pci_msi_count_method(device_t dev, device_t child); int pci_msix_count_method(device_t dev, device_t child); int pci_msix_pba_bar_method(device_t dev, device_t child); int pci_msix_table_bar_method(device_t dev, device_t child); struct resource *pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int pci_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int pci_release_resource(device_t dev, device_t child, + struct resource *r); int pci_activate_resource(device_t dev, device_t child, struct resource *r); int pci_deactivate_resource(device_t dev, device_t child, struct resource *r); void pci_delete_resource(device_t dev, device_t child, int type, int rid); struct resource_list *pci_get_resource_list (device_t dev, device_t child); struct pci_devinfo *pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f); void pci_print_verbose(struct pci_devinfo *dinfo); int pci_freecfg(struct pci_devinfo *dinfo); void pci_child_deleted(device_t dev, device_t child); void pci_child_detached(device_t dev, device_t child); int pci_child_location_method(device_t cbdev, device_t child, struct sbuf *sb); int pci_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb); int pci_get_device_path_method(device_t dev, device_t child, const char *locator, struct sbuf *sb); int pci_assign_interrupt_method(device_t dev, device_t child); int pci_resume(device_t dev); int pci_resume_child(device_t dev, device_t child); int pci_suspend_child(device_t dev, device_t child); bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev); void pci_child_added_method(device_t dev, device_t child); /** Restore the config register state. The state must be previously * saved with pci_cfg_save. However, the pci bus driver takes care of * that. This function will also return the device to PCI_POWERSTATE_D0 * if it is currently in a lower power mode. */ void pci_cfg_restore(device_t, struct pci_devinfo *); /** Save the config register state. Optionally set the power state to D3 * if the third argument is non-zero. */ void pci_cfg_save(device_t, struct pci_devinfo *, int); int pci_mapsize(uint64_t testval); void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64); struct pci_map *pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size); struct resource *pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags); struct resource *pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags); int pci_iov_attach_method(device_t bus, device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema, const char *name); int pci_iov_detach_method(device_t bus, device_t dev); device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); struct resource *pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_vf_release_mem_resource(device_t dev, device_t child, - int rid, struct resource *r); + struct resource *r); #endif /* _PCI_PRIVATE_H_ */ diff --git a/sys/dev/pci/pci_subr.c b/sys/dev/pci/pci_subr.c index 19081a33f159..f916e7a8da87 100644 --- a/sys/dev/pci/pci_subr.c +++ b/sys/dev/pci/pci_subr.c @@ -1,417 +1,417 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Support APIs for Host to PCI bridge drivers and drivers that * provide PCI domains. */ #include #include #include #include #include #include #include #include /* * Try to read the bus number of a host-PCI bridge using appropriate config * registers. */ int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum) { uint32_t id; id = read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == 0xffffffff) return (0); switch (id) { case 0x12258086: /* Intel 824?? */ /* XXX This is a guess */ /* *busnum = read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x84c48086: /* Intel 82454KX/GX (Orion) */ *busnum = read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ return (0); case 0x84cb8086: switch (slot) { case 0x12: /* Intel 82454NX PXB#0, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd0, 1); break; case 0x13: /* Intel 82454NX PXB#0, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd1, 1) + 1; break; case 0x14: /* Intel 82454NX PXB#1, Bus#A */ *busnum = read_config(0, bus, 0x10, func, 0xd3, 1); break; case 0x15: /* Intel 82454NX PXB#1, Bus#B */ *busnum = read_config(0, bus, 0x10, func, 0xd4, 1) + 1; break; } break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: case 0x00061166: case 0x00081166: case 0x00091166: case 0x00101166: case 0x00111166: case 0x00171166: case 0x01011166: case 0x010f1014: case 0x01101166: case 0x02011166: case 0x02251166: case 0x03021014: *busnum = read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: *busnum = read_config(0, bus, slot, func, 0xc8, 1); break; default: /* Don't know how to read bus number. */ return 0; } return 1; } #ifdef NEW_PCIB /* * Return a pointer to a pretty name for a PCI device. If the device * has a driver attached, the device's name is used, otherwise a name * is generated from the device's PCI address. */ const char * pcib_child_name(device_t child) { static char buf[64]; if (device_get_nameunit(child) != NULL) return (device_get_nameunit(child)); snprintf(buf, sizeof(buf), "pci%d:%d:%d:%d", pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (buf); } /* * Some Host-PCI bridge drivers know which resource ranges they can * decode and should only allocate subranges to child PCI devices. * This API provides a way to manage this. The bridge driver should * initialize this structure during attach and call * pcib_host_res_decodes() on each resource range it decodes. It can * then use pcib_host_res_alloc() and pcib_host_res_adjust() as helper * routines for BUS_ALLOC_RESOURCE() and BUS_ADJUST_RESOURCE(). This * API assumes that resources for any decoded ranges can be safely * allocated from the parent via bus_generic_alloc_resource(). */ int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr) { hr->hr_pcib = pcib; resource_list_init(&hr->hr_rl); return (0); } int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr) { resource_list_free(&hr->hr_rl); return (0); } int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags) { struct resource_list_entry *rle; int rid; if (bootverbose) device_printf(hr->hr_pcib, "decoding %d %srange %#jx-%#jx\n", type, flags & RF_PREFETCHABLE ? "prefetchable ": "", start, end); rid = resource_list_add_next(&hr->hr_rl, type, start, end, end - start + 1); if (flags & RF_PREFETCHABLE) { KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, rid); rle->flags = RLE_PREFETCH; } return (0); } struct resource * pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle; struct resource *r; rman_res_t new_start, new_end; if (flags & RF_PREFETCHABLE) KASSERT(type == SYS_RES_MEMORY, ("only memory is prefetchable")); rle = resource_list_find(&hr->hr_rl, type, 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ return (bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, start, end, count, flags)); } restart: /* Try to allocate from each decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->type != type) continue; if (((flags & RF_PREFETCHABLE) != 0) != ((rle->flags & RLE_PREFETCH) != 0)) continue; new_start = ummax(start, rle->start); new_end = ummin(end, rle->end); if (new_start > new_end || new_start + count - 1 > new_end || new_start + count < new_start) continue; r = bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid, new_start, new_end, count, flags); if (r != NULL) { if (bootverbose) device_printf(hr->hr_pcib, "allocated type %d (%#jx-%#jx) for rid %x of %s\n", type, rman_get_start(r), rman_get_end(r), *rid, pcib_child_name(dev)); return (r); } } /* * If we failed to find a prefetch range for a memory * resource, try again without prefetch. */ if (flags & RF_PREFETCHABLE) { flags &= ~RF_PREFETCHABLE; rle = resource_list_find(&hr->hr_rl, type, 0); goto restart; } return (NULL); } int pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { struct resource_list_entry *rle; rle = resource_list_find(&hr->hr_rl, rman_get_type(r), 0); if (rle == NULL) { /* * No decoding ranges for this resource type, just pass * the request up to the parent. */ return (bus_generic_adjust_resource(hr->hr_pcib, dev, r, start, end)); } /* Only allow adjustments that stay within a decoded range. */ for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) { if (rle->start <= start && rle->end >= end) return (bus_generic_adjust_resource(hr->hr_pcib, dev, r, start, end)); } return (ERANGE); } #ifdef PCI_RES_BUS struct pci_domain { int pd_domain; struct rman pd_bus_rman; TAILQ_ENTRY(pci_domain) pd_link; }; static TAILQ_HEAD(, pci_domain) domains = TAILQ_HEAD_INITIALIZER(domains); /* * Each PCI domain maintains its own resource manager for PCI bus * numbers in that domain. Domain objects are created on first use. * Host to PCI bridge drivers and PCI-PCI bridge drivers should * allocate their bus ranges from their domain. */ static struct pci_domain * pci_find_domain(int domain) { struct pci_domain *d; char buf[64]; int error; TAILQ_FOREACH(d, &domains, pd_link) { if (d->pd_domain == domain) return (d); } snprintf(buf, sizeof(buf), "PCI domain %d bus numbers", domain); d = malloc(sizeof(*d) + strlen(buf) + 1, M_DEVBUF, M_WAITOK | M_ZERO); d->pd_domain = domain; d->pd_bus_rman.rm_start = 0; d->pd_bus_rman.rm_end = PCI_BUSMAX; d->pd_bus_rman.rm_type = RMAN_ARRAY; strcpy((char *)(d + 1), buf); d->pd_bus_rman.rm_descr = (char *)(d + 1); error = rman_init(&d->pd_bus_rman); if (error == 0) error = rman_manage_region(&d->pd_bus_rman, 0, PCI_BUSMAX); if (error) panic("Failed to initialize PCI domain %d rman", domain); TAILQ_INSERT_TAIL(&domains, d, pd_link); return (d); } struct resource * pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_domain *d; struct resource *res; if (domain < 0 || domain > PCI_DOMAINMAX) return (NULL); d = pci_find_domain(domain); res = rman_reserve_resource(&d->pd_bus_rman, start, end, count, flags, dev); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_type(res, PCI_RES_BUS); return (res); } int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_adjust_resource(r, start, end)); } int -pci_domain_release_bus(int domain, device_t dev, int rid, struct resource *r) +pci_domain_release_bus(int domain, device_t dev, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_release_resource(r)); } int pci_domain_activate_bus(int domain, device_t dev, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_activate_resource(r)); } int pci_domain_deactivate_bus(int domain, device_t dev, struct resource *r) { #ifdef INVARIANTS struct pci_domain *d; #endif if (domain < 0 || domain > PCI_DOMAINMAX) return (EINVAL); #ifdef INVARIANTS d = pci_find_domain(domain); KASSERT(rman_is_region_manager(r, &d->pd_bus_rman), ("bad resource")); #endif return (rman_deactivate_resource(r)); } #endif /* PCI_RES_BUS */ #endif /* NEW_PCIB */ diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h index c95321586ac2..65b3ed31cf94 100644 --- a/sys/dev/pci/pcib_private.h +++ b/sys/dev/pci/pcib_private.h @@ -1,199 +1,199 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef __PCIB_PRIVATE_H__ #define __PCIB_PRIVATE_H__ #include #ifdef NEW_PCIB /* * Data structure and routines that Host to PCI bridge drivers can use * to restrict allocations for child devices to ranges decoded by the * bridge. */ struct pcib_host_resources { device_t hr_pcib; struct resource_list hr_rl; }; int pcib_host_res_init(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_free(device_t pcib, struct pcib_host_resources *hr); int pcib_host_res_decodes(struct pcib_host_resources *hr, int type, rman_res_t start, rman_res_t end, u_int flags); struct resource *pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev, struct resource *r, rman_res_t start, rman_res_t end); #endif /* * Export portions of generic PCI:PCI bridge support so that it can be * used by subclasses. */ DECLARE_CLASS(pcib_driver); #ifdef NEW_PCIB #define WIN_IO 0x1 #define WIN_MEM 0x2 #define WIN_PMEM 0x4 struct pcib_window { pci_addr_t base; /* base address */ pci_addr_t limit; /* topmost address */ struct rman rman; struct resource **res; int count; /* size of 'res' array */ int reg; /* resource id from parent */ int valid; int mask; /* WIN_* bitmask of this window */ int step; /* log_2 of window granularity */ const char *name; }; #endif struct pcib_secbus { u_int sec; u_int sub; #if defined(NEW_PCIB) && defined(PCI_RES_BUS) device_t dev; struct rman rman; struct resource *res; const char *name; int sub_reg; #endif }; /* * Bridge-specific data. */ struct pcib_softc { device_t dev; device_t child; uint32_t flags; /* flags */ #define PCIB_SUBTRACTIVE 0x1 #define PCIB_DISABLE_MSI 0x2 #define PCIB_DISABLE_MSIX 0x4 #define PCIB_ENABLE_ARI 0x8 #define PCIB_HOTPLUG 0x10 #define PCIB_HOTPLUG_CMD_PENDING 0x20 #define PCIB_DETACH_PENDING 0x40 #define PCIB_DETACHING 0x80 u_int domain; /* domain number */ u_int pribus; /* primary bus number */ struct pcib_secbus bus; /* secondary bus numbers */ #ifdef NEW_PCIB struct pcib_window io; /* I/O port window */ struct pcib_window mem; /* memory window */ struct pcib_window pmem; /* prefetchable memory window */ #else pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ pci_addr_t membase; /* base address of memory window */ pci_addr_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ #endif uint16_t bridgectl; /* bridge control register */ uint16_t pcie_link_sta; uint16_t pcie_slot_sta; uint32_t pcie_slot_cap; struct resource *pcie_mem; struct resource *pcie_irq; void *pcie_ihand; struct task pcie_hp_task; struct timeout_task pcie_ab_task; struct timeout_task pcie_cc_task; struct timeout_task pcie_dll_task; struct mtx *pcie_hp_lock; }; #define PCIB_HP_LOCK(sc) mtx_lock((sc)->pcie_hp_lock) #define PCIB_HP_UNLOCK(sc) mtx_unlock((sc)->pcie_hp_lock) #define PCIB_HP_LOCK_ASSERT(sc) mtx_assert((sc)->pcie_hp_lock, MA_OWNED) #define PCIB_SUPPORTED_ARI_VER 1 typedef uint32_t pci_read_config_fn(int d, int b, int s, int f, int reg, int width); int host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func, uint8_t *busnum); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct resource *pci_domain_alloc_bus(int domain, device_t dev, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_domain_adjust_bus(int domain, device_t dev, struct resource *r, rman_res_t start, rman_res_t end); -int pci_domain_release_bus(int domain, device_t dev, int rid, +int pci_domain_release_bus(int domain, device_t dev, struct resource *r); int pci_domain_activate_bus(int domain, device_t dev, struct resource *r); int pci_domain_deactivate_bus(int domain, device_t dev, struct resource *r); struct resource *pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); void pcib_free_secbus(device_t dev, struct pcib_secbus *bus); void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count); #endif int pcib_attach(device_t dev); int pcib_attach_child(device_t dev); void pcib_attach_common(device_t dev); void pcib_bridge_init(device_t dev); #ifdef NEW_PCIB const char *pcib_child_name(device_t child); #endif int pcib_detach(device_t dev); int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pcib_maxslots(device_t dev); int pcib_maxfuncs(device_t dev); int pcib_route_interrupt(device_t pcib, device_t dev, int pin); int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs); int pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int pcib_release_msix(device_t pcib, device_t dev, int irq); int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); int pcib_get_id(device_t pcib, device_t dev, enum pci_id_type type, uintptr_t *id); void pcib_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); int pcib_request_feature(device_t dev, enum pci_feature feature); int pcib_request_feature_allow(device_t pcib, device_t dev, enum pci_feature feature); #endif diff --git a/sys/dev/pci/vga_pci.c b/sys/dev/pci/vga_pci.c index 57fec1780e3c..16ee93bc8d99 100644 --- a/sys/dev/pci/vga_pci.c +++ b/sys/dev/pci/vga_pci.c @@ -1,788 +1,784 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2005 John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Simple driver for PCI VGA display devices. Drivers such as agp(4) and * drm(4) should attach as children of this device. * * XXX: The vgapci name is a hack until we somehow merge the isa vga driver * in or rename it. */ #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #endif #include #include #include /* To re-POST the card. */ struct vga_resource { struct resource *vr_res; int vr_refs; }; struct vga_pci_softc { device_t vga_msi_child; /* Child driver using MSI. */ struct vga_resource vga_bars[PCIR_MAX_BAR_0 + 1]; struct vga_resource vga_bios; }; SYSCTL_DECL(_hw_pci); static struct vga_resource *lookup_res(struct vga_pci_softc *sc, int rid); static struct resource *vga_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -static int vga_pci_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +static int vga_pci_release_resource(device_t dev, device_t child, + struct resource *r); int vga_pci_default_unit = -1; SYSCTL_INT(_hw_pci, OID_AUTO, default_vgapci_unit, CTLFLAG_RDTUN, &vga_pci_default_unit, -1, "Default VGA-compatible display"); int vga_pci_is_boot_display(device_t dev) { int unit; device_t pcib; uint16_t config; /* Check that the given device is a video card */ if ((pci_get_class(dev) != PCIC_DISPLAY && (pci_get_class(dev) != PCIC_OLD || pci_get_subclass(dev) != PCIS_OLD_VGA))) return (0); unit = device_get_unit(dev); if (vga_pci_default_unit >= 0) { /* * The boot display device was determined by a previous * call to this function, or the user forced it using * the hw.pci.default_vgapci_unit tunable. */ return (vga_pci_default_unit == unit); } /* * The primary video card used as a boot display must have the * "I/O" and "Memory Address Space Decoding" bits set in its * Command register. * * Furthermore, if the card is attached to a bridge, instead of * the root PCI bus, the bridge must have the "VGA Enable" bit * set in its Control register. */ pcib = device_get_parent(device_get_parent(dev)); if (device_get_devclass(device_get_parent(pcib)) == devclass_find("pci")) { /* * The parent bridge is a PCI-to-PCI bridge: check the * value of the "VGA Enable" bit. */ config = pci_read_config(pcib, PCIR_BRIDGECTL_1, 2); if ((config & PCIB_BCR_VGA_ENABLE) == 0) return (0); } config = pci_read_config(dev, PCIR_COMMAND, 2); if ((config & (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) == 0) return (0); /* * Disable interrupts until a chipset driver is loaded for * this PCI device. Else unhandled display adapter interrupts * might freeze the CPU. */ pci_write_config(dev, PCIR_COMMAND, config | PCIM_CMD_INTxDIS, 2); /* This video card is the boot display: record its unit number. */ vga_pci_default_unit = unit; device_set_flags(dev, 1); return (1); } static void vga_pci_reset(device_t dev) { int ps; /* * FLR is unsupported on GPUs so attempt a power-management reset by cycling * the device in/out of D3 state. * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); } void * vga_pci_map_bios(device_t dev, size_t *size) { struct vga_resource *vr; struct resource *res; device_t pcib; uint32_t rom_addr; uint16_t config; volatile unsigned char *bios; int i, rid, found; #if defined(__amd64__) || defined(__i386__) if (vga_pci_is_boot_display(dev)) { /* * On x86, the System BIOS copy the default display * device's Video BIOS at a fixed location in system * memory (0xC0000, 128 kBytes long) at boot time. * * We use this copy for the default boot device, because * the original ROM may not be valid after boot. */ *size = VGA_PCI_BIOS_SHADOW_SIZE; return (pmap_mapbios(VGA_PCI_BIOS_SHADOW_ADDR, *size)); } #endif pcib = device_get_parent(device_get_parent(dev)); if (device_get_devclass(device_get_parent(pcib)) == devclass_find("pci")) { /* * The parent bridge is a PCI-to-PCI bridge: check the * value of the "VGA Enable" bit. */ config = pci_read_config(pcib, PCIR_BRIDGECTL_1, 2); if ((config & PCIB_BCR_VGA_ENABLE) == 0) { config |= PCIB_BCR_VGA_ENABLE; pci_write_config(pcib, PCIR_BRIDGECTL_1, config, 2); } } switch(pci_read_config(dev, PCIR_HDRTYPE, 1)) { case PCIM_HDRTYPE_BRIDGE: rid = PCIR_BIOS_1; break; case PCIM_HDRTYPE_CARDBUS: rid = 0; break; default: rid = PCIR_BIOS; break; } if (rid == 0) return (NULL); res = vga_pci_alloc_resource(dev, NULL, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (res == NULL) { device_printf(dev, "vga_pci_alloc_resource failed\n"); return (NULL); } bios = rman_get_virtual(res); *size = rman_get_size(res); for (found = i = 0; i < hz; i++) { found = (bios[0] == 0x55 && bios[1] == 0xaa); if (found) break; pause("vgabios", 1); } if (found) return (__DEVOLATILE(void *, bios)); if (bootverbose) device_printf(dev, "initial ROM mapping failed -- resetting\n"); /* * Enable ROM decode */ vga_pci_reset(dev); rom_addr = pci_read_config(dev, rid, 4); rom_addr &= 0x7ff; rom_addr |= rman_get_start(res) | 0x1; pci_write_config(dev, rid, rom_addr, 4); vr = lookup_res(device_get_softc(dev), rid); - vga_pci_release_resource(dev, NULL, SYS_RES_MEMORY, rid, - vr->vr_res); + vga_pci_release_resource(dev, NULL, vr->vr_res); /* * re-allocate */ res = vga_pci_alloc_resource(dev, NULL, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (res == NULL) { device_printf(dev, "vga_pci_alloc_resource failed\n"); return (NULL); } bios = rman_get_virtual(res); *size = rman_get_size(res); for (found = i = 0; i < 3*hz; i++) { found = (bios[0] == 0x55 && bios[1] == 0xaa); if (found) break; pause("vgabios", 1); } if (found) return (__DEVOLATILE(void *, bios)); device_printf(dev, "ROM mapping failed\n"); vr = lookup_res(device_get_softc(dev), rid); - vga_pci_release_resource(dev, NULL, SYS_RES_MEMORY, rid, - vr->vr_res); + vga_pci_release_resource(dev, NULL, vr->vr_res); return (NULL); } void vga_pci_unmap_bios(device_t dev, void *bios) { struct vga_resource *vr; int rid; if (bios == NULL) { return; } #if defined(__amd64__) || defined(__i386__) if (vga_pci_is_boot_display(dev)) { /* We mapped the BIOS shadow copy located at 0xC0000. */ pmap_unmapdev(bios, VGA_PCI_BIOS_SHADOW_SIZE); return; } #endif switch(pci_read_config(dev, PCIR_HDRTYPE, 1)) { case PCIM_HDRTYPE_BRIDGE: rid = PCIR_BIOS_1; break; case PCIM_HDRTYPE_CARDBUS: rid = 0; break; default: rid = PCIR_BIOS; break; } if (rid == 0) return; /* * Look up the PCIR_BIOS resource in our softc. It should match * the address we returned previously. */ vr = lookup_res(device_get_softc(dev), rid); KASSERT(vr->vr_res != NULL, ("vga_pci_unmap_bios: bios not mapped")); KASSERT(rman_get_virtual(vr->vr_res) == bios, ("vga_pci_unmap_bios: mismatch")); - vga_pci_release_resource(dev, NULL, SYS_RES_MEMORY, rid, - vr->vr_res); + vga_pci_release_resource(dev, NULL, vr->vr_res); } int vga_pci_repost(device_t dev) { #if defined(__amd64__) || defined(__i386__) x86regs_t regs; if (!vga_pci_is_boot_display(dev)) return (EINVAL); if (x86bios_get_orm(VGA_PCI_BIOS_SHADOW_ADDR) == NULL) return (ENOTSUP); x86bios_init_regs(®s); regs.R_AH = pci_get_bus(dev); regs.R_AL = (pci_get_slot(dev) << 3) | (pci_get_function(dev) & 0x07); regs.R_DL = 0x80; device_printf(dev, "REPOSTing\n"); x86bios_call(®s, X86BIOS_PHYSTOSEG(VGA_PCI_BIOS_SHADOW_ADDR + 3), X86BIOS_PHYSTOOFF(VGA_PCI_BIOS_SHADOW_ADDR + 3)); x86bios_get_intr(0x10); return (0); #else return (ENOTSUP); #endif } static int vga_pci_probe(device_t dev) { switch (pci_get_class(dev)) { case PCIC_DISPLAY: break; case PCIC_OLD: if (pci_get_subclass(dev) != PCIS_OLD_VGA) return (ENXIO); break; default: return (ENXIO); } /* Probe default display. */ vga_pci_is_boot_display(dev); device_set_desc(dev, "VGA-compatible display"); return (BUS_PROBE_GENERIC); } static int vga_pci_attach(device_t dev) { bus_generic_probe(dev); /* Always create a drmn child for now to make it easier on drm. */ device_add_child(dev, "drmn", -1); bus_generic_attach(dev); if (vga_pci_is_boot_display(dev)) device_printf(dev, "Boot video device\n"); return (0); } static int vga_pci_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vga_pci_detach(device_t dev) { int error; error = bus_generic_detach(dev); if (error == 0) error = device_delete_children(dev); return (error); } static int vga_pci_resume(device_t dev) { return (bus_generic_resume(dev)); } /* Bus interface. */ static int vga_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } static int vga_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static int vga_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { return (BUS_SETUP_INTR(device_get_parent(dev), dev, irq, flags, filter, intr, arg, cookiep)); } static int vga_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { return (BUS_TEARDOWN_INTR(device_get_parent(dev), dev, irq, cookie)); } static struct vga_resource * lookup_res(struct vga_pci_softc *sc, int rid) { int bar; if (rid == PCIR_BIOS) return (&sc->vga_bios); bar = PCI_RID2BAR(rid); if (bar >= 0 && bar <= PCIR_MAX_BAR_0) return (&sc->vga_bars[bar]); return (NULL); } static struct resource * vga_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct vga_resource *vr; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we cache the resource so that we only allocate it * from the PCI bus once. */ vr = lookup_res(device_get_softc(dev), *rid); if (vr == NULL) return (NULL); if (vr->vr_res == NULL) vr->vr_res = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (vr->vr_res != NULL) vr->vr_refs++; return (vr->vr_res); } return (bus_alloc_resource(dev, type, rid, start, end, count, flags)); } static int -vga_pci_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vga_pci_release_resource(device_t dev, device_t child, struct resource *r) { struct vga_resource *vr; int error; - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we release the resource from the PCI bus * when the last child reference goes away. */ - vr = lookup_res(device_get_softc(dev), rid); + vr = lookup_res(device_get_softc(dev), rman_get_rid(r)); if (vr == NULL) return (EINVAL); if (vr->vr_res == NULL) return (EINVAL); KASSERT(vr->vr_res == r, ("vga_pci resource mismatch")); if (vr->vr_refs > 1) { vr->vr_refs--; return (0); } KASSERT(vr->vr_refs > 0, ("vga_pci resource reference count underflow")); - error = bus_release_resource(dev, type, rid, r); + error = bus_release_resource(dev, r); if (error == 0) { vr->vr_res = NULL; vr->vr_refs = 0; } return (error); } - return (bus_release_resource(dev, type, rid, r)); + return (bus_release_resource(dev, r)); } /* PCI interface. */ static uint32_t vga_pci_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } static void vga_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } static int vga_pci_enable_busmaster(device_t dev, device_t child) { return (pci_enable_busmaster(dev)); } static int vga_pci_disable_busmaster(device_t dev, device_t child) { return (pci_disable_busmaster(dev)); } static int vga_pci_enable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_enable_io\n", device_get_nameunit(child)); return (pci_enable_io(dev, space)); } static int vga_pci_disable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_disable_io\n", device_get_nameunit(child)); return (pci_disable_io(dev, space)); } static int vga_pci_get_vpd_ident(device_t dev, device_t child, const char **identptr) { return (pci_get_vpd_ident(dev, identptr)); } static int vga_pci_get_vpd_readonly(device_t dev, device_t child, const char *kw, const char **vptr) { return (pci_get_vpd_readonly(dev, kw, vptr)); } static int vga_pci_set_powerstate(device_t dev, device_t child, int state) { device_printf(dev, "child %s requested pci_set_powerstate\n", device_get_nameunit(child)); return (pci_set_powerstate(dev, state)); } static int vga_pci_get_powerstate(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_get_powerstate\n", device_get_nameunit(child)); return (pci_get_powerstate(dev)); } static int vga_pci_assign_interrupt(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_assign_interrupt\n", device_get_nameunit(child)); return (PCI_ASSIGN_INTERRUPT(device_get_parent(dev), dev)); } static int vga_pci_find_cap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_cap(dev, capability, capreg)); } static int vga_pci_find_next_cap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_cap(dev, capability, start, capreg)); } static int vga_pci_find_extcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_extcap(dev, capability, capreg)); } static int vga_pci_find_next_extcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_extcap(dev, capability, start, capreg)); } static int vga_pci_find_htcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_htcap(dev, capability, capreg)); } static int vga_pci_find_next_htcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_htcap(dev, capability, start, capreg)); } static int vga_pci_alloc_msi(device_t dev, device_t child, int *count) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != NULL) return (EBUSY); error = pci_alloc_msi(dev, count); if (error == 0) sc->vga_msi_child = child; return (error); } static int vga_pci_alloc_msix(device_t dev, device_t child, int *count) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != NULL) return (EBUSY); error = pci_alloc_msix(dev, count); if (error == 0) sc->vga_msi_child = child; return (error); } static int vga_pci_remap_msix(device_t dev, device_t child, int count, const u_int *vectors) { struct vga_pci_softc *sc; sc = device_get_softc(dev); if (sc->vga_msi_child != child) return (ENXIO); return (pci_remap_msix(dev, count, vectors)); } static int vga_pci_release_msi(device_t dev, device_t child) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != child) return (ENXIO); error = pci_release_msi(dev); if (error == 0) sc->vga_msi_child = NULL; return (error); } static int vga_pci_msi_count(device_t dev, device_t child) { return (pci_msi_count(dev)); } static int vga_pci_msix_count(device_t dev, device_t child) { return (pci_msix_count(dev)); } static bus_dma_tag_t vga_pci_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t vga_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vga_pci_probe), DEVMETHOD(device_attach, vga_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, vga_pci_suspend), DEVMETHOD(device_detach, vga_pci_detach), DEVMETHOD(device_resume, vga_pci_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vga_pci_read_ivar), DEVMETHOD(bus_write_ivar, vga_pci_write_ivar), DEVMETHOD(bus_setup_intr, vga_pci_setup_intr), DEVMETHOD(bus_teardown_intr, vga_pci_teardown_intr), DEVMETHOD(bus_alloc_resource, vga_pci_alloc_resource), DEVMETHOD(bus_release_resource, vga_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_dma_tag, vga_pci_get_dma_tag), /* PCI interface */ DEVMETHOD(pci_read_config, vga_pci_read_config), DEVMETHOD(pci_write_config, vga_pci_write_config), DEVMETHOD(pci_enable_busmaster, vga_pci_enable_busmaster), DEVMETHOD(pci_disable_busmaster, vga_pci_disable_busmaster), DEVMETHOD(pci_enable_io, vga_pci_enable_io), DEVMETHOD(pci_disable_io, vga_pci_disable_io), DEVMETHOD(pci_get_vpd_ident, vga_pci_get_vpd_ident), DEVMETHOD(pci_get_vpd_readonly, vga_pci_get_vpd_readonly), DEVMETHOD(pci_get_powerstate, vga_pci_get_powerstate), DEVMETHOD(pci_set_powerstate, vga_pci_set_powerstate), DEVMETHOD(pci_assign_interrupt, vga_pci_assign_interrupt), DEVMETHOD(pci_find_cap, vga_pci_find_cap), DEVMETHOD(pci_find_next_cap, vga_pci_find_next_cap), DEVMETHOD(pci_find_extcap, vga_pci_find_extcap), DEVMETHOD(pci_find_next_extcap, vga_pci_find_next_extcap), DEVMETHOD(pci_find_htcap, vga_pci_find_htcap), DEVMETHOD(pci_find_next_htcap, vga_pci_find_next_htcap), DEVMETHOD(pci_alloc_msi, vga_pci_alloc_msi), DEVMETHOD(pci_alloc_msix, vga_pci_alloc_msix), DEVMETHOD(pci_remap_msix, vga_pci_remap_msix), DEVMETHOD(pci_release_msi, vga_pci_release_msi), DEVMETHOD(pci_msi_count, vga_pci_msi_count), DEVMETHOD(pci_msix_count, vga_pci_msix_count), { 0, 0 } }; static driver_t vga_pci_driver = { "vgapci", vga_pci_methods, sizeof(struct vga_pci_softc), }; DRIVER_MODULE(vgapci, pci, vga_pci_driver, 0, 0); MODULE_DEPEND(vgapci, x86bios, 1, 1, 1); diff --git a/sys/dev/ppc/ppc.c b/sys/dev/ppc/ppc.c index b8a80dca76ec..e5ef392c4063 100644 --- a/sys/dev/ppc/ppc.c +++ b/sys/dev/ppc/ppc.c @@ -1,1998 +1,1988 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997-2000 Nicolas Souchu * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ppc.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #include #include #include #endif #include #include #include #include #include "ppbus_if.h" static void ppcintr(void *arg); #define IO_LPTSIZE_EXTENDED 8 /* "Extended" LPT controllers */ #define IO_LPTSIZE_NORMAL 4 /* "Normal" LPT controllers */ #define LOG_PPC(function, ppc, string) \ if (bootverbose) printf("%s: %s\n", function, string) #define DEVTOSOFTC(dev) ((struct ppc_data *)device_get_softc(dev)) /* * We use critical enter/exit for the simple config locking needed to * detect the devices. We just want to make sure that both of our writes * happen without someone else also writing to those config registers. Since * we just do this at startup, Giant keeps multiple threads from executing, * and critical_enter() then is all that's needed to keep us from being preempted * during the critical sequences with the hardware. * * Note: this doesn't prevent multiple threads from putting the chips into * config mode, but since we only do that to detect the type at startup the * extra overhead isn't needed since Giant protects us from multiple entry * and no other code changes these registers. */ #define PPC_CONFIG_LOCK(ppc) critical_enter() #define PPC_CONFIG_UNLOCK(ppc) critical_exit() const char ppc_driver_name[] = "ppc"; static char *ppc_models[] = { "SMC-like", "SMC FDC37C665GT", "SMC FDC37C666GT", "PC87332", "PC87306", "82091AA", "Generic", "W83877F", "W83877AF", "Winbond", "PC87334", "SMC FDC37C935", "PC87303", 0 }; /* list of available modes */ static char *ppc_avms[] = { "COMPATIBLE", "NIBBLE-only", "PS2-only", "PS2/NIBBLE", "EPP-only", "EPP/NIBBLE", "EPP/PS2", "EPP/PS2/NIBBLE", "ECP-only", "ECP/NIBBLE", "ECP/PS2", "ECP/PS2/NIBBLE", "ECP/EPP", "ECP/EPP/NIBBLE", "ECP/EPP/PS2", "ECP/EPP/PS2/NIBBLE", 0 }; /* list of current executing modes * Note that few modes do not actually exist. */ static char *ppc_modes[] = { "COMPATIBLE", "NIBBLE", "PS/2", "PS/2", "EPP", "EPP", "EPP", "EPP", "ECP", "ECP", "ECP+PS2", "ECP+PS2", "ECP+EPP", "ECP+EPP", "ECP+EPP", "ECP+EPP", 0 }; static char *ppc_epp_protocol[] = { " (EPP 1.9)", " (EPP 1.7)", 0 }; #ifdef __i386__ /* * BIOS printer list - used by BIOS probe. */ #define BIOS_PPC_PORTS 0x408 #define BIOS_PORTS ((short *)BIOS_PADDRTOVADDR(BIOS_PPC_PORTS)) #define BIOS_MAX_PPC 4 #endif /* * ppc_ecp_sync() XXX */ int ppc_ecp_sync(device_t dev) { int i, r; struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); if (!(ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_dtm & PPB_ECP)) return 0; r = r_ecr(ppc); if ((r & 0xe0) != PPC_ECR_EPP) return 0; for (i = 0; i < 100; i++) { r = r_ecr(ppc); if (r & 0x1) return 0; DELAY(100); } device_printf(dev, "ECP sync failed as data still present in FIFO.\n"); return 0; } /* * ppc_detect_fifo() * * Detect parallel port FIFO */ static int ppc_detect_fifo(struct ppc_data *ppc) { char ecr_sav; char ctr_sav, ctr; short i; /* save registers */ ecr_sav = r_ecr(ppc); ctr_sav = r_ctr(ppc); /* enter ECP configuration mode, no interrupt, no DMA */ w_ecr(ppc, 0xf4); /* read PWord size - transfers in FIFO mode must be PWord aligned */ ppc->ppc_pword = (r_cnfgA(ppc) & PPC_PWORD_MASK); /* XXX 16 and 32 bits implementations not supported */ if (ppc->ppc_pword != PPC_PWORD_8) { LOG_PPC(__func__, ppc, "PWord not supported"); goto error; } w_ecr(ppc, 0x34); /* byte mode, no interrupt, no DMA */ ctr = r_ctr(ppc); w_ctr(ppc, ctr | PCD); /* set direction to 1 */ /* enter ECP test mode, no interrupt, no DMA */ w_ecr(ppc, 0xd4); /* flush the FIFO */ for (i=0; i<1024; i++) { if (r_ecr(ppc) & PPC_FIFO_EMPTY) break; r_fifo(ppc); } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't flush FIFO"); goto error; } /* enable interrupts, no DMA */ w_ecr(ppc, 0xd0); /* determine readIntrThreshold * fill the FIFO until serviceIntr is set */ for (i=0; i<1024; i++) { w_fifo(ppc, (char)i); if (!ppc->ppc_rthr && (r_ecr(ppc) & PPC_SERVICE_INTR)) { /* readThreshold reached */ ppc->ppc_rthr = i+1; } if (r_ecr(ppc) & PPC_FIFO_FULL) { ppc->ppc_fifo = i+1; break; } } if (i >= 1024) { LOG_PPC(__func__, ppc, "can't fill FIFO"); goto error; } w_ecr(ppc, 0xd4); /* test mode, no interrupt, no DMA */ w_ctr(ppc, ctr & ~PCD); /* set direction to 0 */ w_ecr(ppc, 0xd0); /* enable interrupts */ /* determine writeIntrThreshold * empty the FIFO until serviceIntr is set */ for (i=ppc->ppc_fifo; i>0; i--) { if (r_fifo(ppc) != (char)(ppc->ppc_fifo-i)) { LOG_PPC(__func__, ppc, "invalid data in FIFO"); goto error; } if (r_ecr(ppc) & PPC_SERVICE_INTR) { /* writeIntrThreshold reached */ ppc->ppc_wthr = ppc->ppc_fifo - i+1; } /* if FIFO empty before the last byte, error */ if (i>1 && (r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "data lost in FIFO"); goto error; } } /* FIFO must be empty after the last byte */ if (!(r_ecr(ppc) & PPC_FIFO_EMPTY)) { LOG_PPC(__func__, ppc, "can't empty the FIFO"); goto error; } w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (0); error: w_ctr(ppc, ctr_sav); w_ecr(ppc, ecr_sav); return (EINVAL); } static int ppc_detect_port(struct ppc_data *ppc) { w_ctr(ppc, 0x0c); /* To avoid missing PS2 ports */ w_dtr(ppc, 0xaa); if (r_dtr(ppc) != 0xaa) return (0); return (1); } /* * EPP timeout, according to the PC87332 manual * Semantics of clearing EPP timeout bit. * PC87332 - reading SPP_STR does it... * SMC - write 1 to EPP timeout bit XXX * Others - (?) write 0 to EPP timeout bit */ static void ppc_reset_epp_timeout(struct ppc_data *ppc) { char r; r = r_str(ppc); w_str(ppc, r | 0x1); w_str(ppc, r & 0xfe); return; } static int ppc_check_epp_timeout(struct ppc_data *ppc) { ppc_reset_epp_timeout(ppc); return (!(r_str(ppc) & TIMEOUT)); } /* * Configure current operating mode */ static int ppc_generic_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) return (EINVAL); else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } /* * The ppc driver is free to choose options like FIFO or DMA * if ECP mode is available. * * The 'RAW' option allows the upper drivers to force the ppc mode * even with FIFO, DMA available. */ static int ppc_smclike_setmode(struct ppc_data *ppc, int mode) { u_char ecr = 0; /* check if mode is available */ if (mode && !(ppc->ppc_avm & mode)) return (EINVAL); /* if ECP mode, configure ecr register */ if ((ppc->ppc_avm & PPB_ECP) || (ppc->ppc_dtm & PPB_ECP)) { /* return to byte mode (keeping direction bit), * no interrupt, no DMA to be able to change to * ECP or EPP mode */ w_ecr(ppc, PPC_ECR_RESET); ecr = PPC_DISABLE_INTR; if (mode & PPB_EPP) /* select EPP mode */ ecr |= PPC_ECR_EPP; else if (mode & PPB_ECP) /* select ECP mode */ ecr |= PPC_ECR_ECP; else if (mode & PPB_PS2) /* select PS2 mode with ECP */ ecr |= PPC_ECR_PS2; else /* select COMPATIBLE/NIBBLE mode */ ecr |= PPC_ECR_STD; w_ecr(ppc, ecr); } ppc->ppc_mode = mode; return (0); } #ifdef PPC_PROBE_CHIPSET /* * ppc_pc873xx_detect * * Probe for a Natsemi PC873xx-family part. * * References in this function are to the National Semiconductor * PC87332 datasheet TL/C/11930, May 1995 revision. */ static int pc873xx_basetab[] = {0x0398, 0x026e, 0x015c, 0x002e, 0}; static int pc873xx_porttab[] = {0x0378, 0x03bc, 0x0278, 0}; static int pc873xx_irqtab[] = {5, 7, 5, 0}; static int pc873xx_regstab[] = { PC873_FER, PC873_FAR, PC873_PTR, PC873_FCR, PC873_PCR, PC873_PMC, PC873_TUP, PC873_SID, PC873_PNP0, PC873_PNP1, PC873_LPTBA, -1 }; static char *pc873xx_rnametab[] = { "FER", "FAR", "PTR", "FCR", "PCR", "PMC", "TUP", "SID", "PNP0", "PNP1", "LPTBA", NULL }; static int ppc_pc873xx_detect(struct ppc_data *ppc, int chipset_mode) /* XXX mode never forced */ { static int index = 0; int idport, irq; int ptr, pcr, val, i; while ((idport = pc873xx_basetab[index++])) { /* XXX should check first to see if this location is already claimed */ /* * Pull the 873xx through the power-on ID cycle (2.2,1.). * We can't use this to locate the chip as it may already have * been used by the BIOS. */ (void)inb(idport); (void)inb(idport); (void)inb(idport); (void)inb(idport); /* * Read the SID byte. Possible values are : * * 01010xxx PC87334 * 0001xxxx PC87332 * 01110xxx PC87306 * 00110xxx PC87303 */ outb(idport, PC873_SID); val = inb(idport + 1); if ((val & 0xf0) == 0x10) { ppc->ppc_model = NS_PC87332; } else if ((val & 0xf8) == 0x70) { ppc->ppc_model = NS_PC87306; } else if ((val & 0xf8) == 0x50) { ppc->ppc_model = NS_PC87334; } else if ((val & 0xf8) == 0x40) { /* Should be 0x30 by the documentation, but probing yielded 0x40... */ ppc->ppc_model = NS_PC87303; } else { if (bootverbose && (val != 0xff)) printf("PC873xx probe at 0x%x got unknown ID 0x%x\n", idport, val); continue ; /* not recognised */ } /* print registers */ if (bootverbose) { printf("PC873xx"); for (i=0; pc873xx_regstab[i] != -1; i++) { outb(idport, pc873xx_regstab[i]); printf(" %s=0x%x", pc873xx_rnametab[i], inb(idport + 1) & 0xff); } printf("\n"); } /* * We think we have one. Is it enabled and where we want it to be? */ outb(idport, PC873_FER); val = inb(idport + 1); if (!(val & PC873_PPENABLE)) { if (bootverbose) printf("PC873xx parallel port disabled\n"); continue; } outb(idport, PC873_FAR); val = inb(idport + 1); /* XXX we should create a driver instance for every port found */ if (pc873xx_porttab[val & 0x3] != ppc->ppc_base) { /* First try to change the port address to that requested... */ switch (ppc->ppc_base) { case 0x378: val &= 0xfc; break; case 0x3bc: val &= 0xfd; break; case 0x278: val &= 0xfe; break; default: val &= 0xfd; break; } outb(idport, PC873_FAR); outb(idport + 1, val); outb(idport + 1, val); /* Check for success by reading back the value we supposedly wrote and comparing...*/ outb(idport, PC873_FAR); val = inb(idport + 1) & 0x3; /* If we fail, report the failure... */ if (pc873xx_porttab[val] != ppc->ppc_base) { if (bootverbose) printf("PC873xx at 0x%x not for driver at port 0x%x\n", pc873xx_porttab[val], ppc->ppc_base); } continue; } outb(idport, PC873_PTR); ptr = inb(idport + 1); /* get irq settings */ if (ppc->ppc_base == 0x378) irq = (ptr & PC873_LPTBIRQ7) ? 7 : 5; else irq = pc873xx_irqtab[val]; if (bootverbose) printf("PC873xx irq %d at 0x%x\n", irq, ppc->ppc_base); /* * Check if irq settings are correct */ if (irq != ppc->ppc_irq) { /* * If the chipset is not locked and base address is 0x378, * we have another chance */ if (ppc->ppc_base == 0x378 && !(ptr & PC873_CFGLOCK)) { if (ppc->ppc_irq == 7) { outb(idport + 1, (ptr | PC873_LPTBIRQ7)); outb(idport + 1, (ptr | PC873_LPTBIRQ7)); } else { outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); outb(idport + 1, (ptr & ~PC873_LPTBIRQ7)); } if (bootverbose) printf("PC873xx irq set to %d\n", ppc->ppc_irq); } else { if (bootverbose) printf("PC873xx sorry, can't change irq setting\n"); } } else { if (bootverbose) printf("PC873xx irq settings are correct\n"); } outb(idport, PC873_PCR); pcr = inb(idport + 1); if ((ptr & PC873_CFGLOCK) || !chipset_mode) { if (bootverbose) printf("PC873xx %s", (ptr & PC873_CFGLOCK)?"locked":"unlocked"); ppc->ppc_avm |= PPB_NIBBLE; if (bootverbose) printf(", NIBBLE"); if (pcr & PC873_EPPEN) { ppc->ppc_avm |= PPB_EPP; if (bootverbose) printf(", EPP"); if (pcr & PC873_EPP19) ppc->ppc_epp = EPP_1_9; else ppc->ppc_epp = EPP_1_7; if ((ppc->ppc_model == NS_PC87332) && bootverbose) { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EPPRDIR) printf(", Regular mode"); else printf(", Automatic mode"); } } else if (pcr & PC873_ECPEN) { ppc->ppc_avm |= PPB_ECP; if (bootverbose) printf(", ECP"); if (pcr & PC873_ECPCLK) { /* XXX */ ppc->ppc_avm |= PPB_PS2; if (bootverbose) printf(", PS/2"); } } else { outb(idport, PC873_PTR); ptr = inb(idport + 1); if (ptr & PC873_EXTENDED) { ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(", SPP"); } } } else { if (bootverbose) printf("PC873xx unlocked"); if (chipset_mode & PPB_ECP) { if ((chipset_mode & PPB_EPP) && bootverbose) printf(", ECP+EPP not supported"); pcr &= ~PC873_EPPEN; pcr |= (PC873_ECPEN | PC873_ECPCLK); /* XXX */ outb(idport + 1, pcr); outb(idport + 1, pcr); if (bootverbose) printf(", ECP"); } else if (chipset_mode & PPB_EPP) { pcr &= ~(PC873_ECPEN | PC873_ECPCLK); pcr |= (PC873_EPPEN | PC873_EPP19); outb(idport + 1, pcr); outb(idport + 1, pcr); ppc->ppc_epp = EPP_1_9; /* XXX */ if (bootverbose) printf(", EPP1.9"); /* enable automatic direction turnover */ if (ppc->ppc_model == NS_PC87332) { outb(idport, PC873_PTR); ptr = inb(idport + 1); ptr &= ~PC873_EPPRDIR; outb(idport + 1, ptr); outb(idport + 1, ptr); if (bootverbose) printf(", Automatic mode"); } } else { pcr &= ~(PC873_ECPEN | PC873_ECPCLK | PC873_EPPEN); outb(idport + 1, pcr); outb(idport + 1, pcr); /* configure extended bit in PTR */ outb(idport, PC873_PTR); ptr = inb(idport + 1); if (chipset_mode & PPB_PS2) { ptr |= PC873_EXTENDED; if (bootverbose) printf(", PS/2"); } else { /* default to NIBBLE mode */ ptr &= ~PC873_EXTENDED; if (bootverbose) printf(", NIBBLE"); } outb(idport + 1, ptr); outb(idport + 1, ptr); } ppc->ppc_avm = chipset_mode; } if (bootverbose) printf("\n"); ppc->ppc_type = PPC_TYPE_GENERIC; ppc_generic_setmode(ppc, chipset_mode); return(chipset_mode); } return(-1); } /* * ppc_smc37c66xgt_detect * * SMC FDC37C66xGT configuration. */ static int ppc_smc37c66xgt_detect(struct ppc_data *ppc, int chipset_mode) { int i; u_char r; int type = -1; int csr = SMC66x_CSR; /* initial value is 0x3F0 */ int port_address[] = { -1 /* disabled */ , 0x3bc, 0x378, 0x278 }; #define cio csr+1 /* config IO port is either 0x3F1 or 0x371 */ /* * Detection: enter configuration mode and read CRD register. */ PPC_CONFIG_LOCK(ppc); outb(csr, SMC665_iCODE); outb(csr, SMC665_iCODE); PPC_CONFIG_UNLOCK(ppc); outb(csr, 0xd); if (inb(cio) == 0x65) { type = SMC_37C665GT; goto config; } for (i = 0; i < 2; i++) { PPC_CONFIG_LOCK(ppc); outb(csr, SMC666_iCODE); outb(csr, SMC666_iCODE); PPC_CONFIG_UNLOCK(ppc); outb(csr, 0xd); if (inb(cio) == 0x66) { type = SMC_37C666GT; break; } /* Another chance, CSR may be hard-configured to be at 0x370 */ csr = SMC666_CSR; } config: /* * If chipset not found, do not continue. */ if (type == -1) { outb(csr, 0xaa); /* end config mode */ return (-1); } /* select CR1 */ outb(csr, 0x1); /* read the port's address: bits 0 and 1 of CR1 */ r = inb(cio) & SMC_CR1_ADDR; if (port_address[(int)r] != ppc->ppc_base) { outb(csr, 0xaa); /* end config mode */ return (-1); } ppc->ppc_model = type; /* * CR1 and CR4 registers bits 3 and 0/1 for mode configuration * If SPP mode is detected, try to set ECP+EPP mode */ if (bootverbose) { outb(csr, 0x1); device_printf(ppc->ppc_dev, "SMC registers CR1=0x%x", inb(cio) & 0xff); outb(csr, 0x4); printf(" CR4=0x%x", inb(cio) & 0xff); } /* select CR1 */ outb(csr, 0x1); if (!chipset_mode) { /* autodetect mode */ /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) { ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" configuration hardwired, supposing " \ "ECP+EPP SPP"); } else if ((inb(cio) & SMC_CR1_MODE) == 0) { /* already in extended parallel port mode, read CR4 */ outb(csr, 0x4); r = (inb(cio) & SMC_CR4_EMODE); switch (r) { case SMC_SPP: ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); break; case SMC_EPPSPP: ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) printf(" EPP SPP"); break; case SMC_ECP: ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP SPP"); break; case SMC_ECPEPP: ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; if (bootverbose) printf(" ECP+EPP SPP"); break; } } else { /* not an extended port mode */ ppc->ppc_avm |= PPB_SPP; if (bootverbose) printf(" SPP"); } } else { /* mode forced */ ppc->ppc_avm = chipset_mode; /* 666GT is ~certainly~ hardwired to an extended ECP+EPP mode */ if (type == SMC_37C666GT) goto end_detect; r = inb(cio); if ((chipset_mode & (PPB_ECP | PPB_EPP)) == 0) { /* do not use ECP when the mode is not forced to */ outb(cio, r | SMC_CR1_MODE); if (bootverbose) printf(" SPP"); } else { /* an extended mode is selected */ outb(cio, r & ~SMC_CR1_MODE); /* read CR4 register and reset mode field */ outb(csr, 0x4); r = inb(cio) & ~SMC_CR4_EMODE; if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(cio, r | SMC_ECPEPP); if (bootverbose) printf(" ECP+EPP"); } else { outb(cio, r | SMC_ECP); if (bootverbose) printf(" ECP"); } } else { /* PPB_EPP is set */ outb(cio, r | SMC_EPPSPP); if (bootverbose) printf(" EPP SPP"); } } ppc->ppc_avm = chipset_mode; } /* set FIFO threshold to 16 */ if (ppc->ppc_avm & PPB_ECP) { /* select CRA */ outb(csr, 0xa); outb(cio, 16); } end_detect: if (bootverbose) printf ("\n"); if (ppc->ppc_avm & PPB_EPP) { /* select CR4 */ outb(csr, 0x4); r = inb(cio); /* * Set the EPP protocol... * Low=EPP 1.9 (1284 standard) and High=EPP 1.7 */ if (ppc->ppc_epp == EPP_1_9) outb(cio, (r & ~SMC_CR4_EPPTYPE)); else outb(cio, (r | SMC_CR4_EPPTYPE)); } outb(csr, 0xaa); /* end config mode */ ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * SMC FDC37C935 configuration * Found on many Alpha machines */ static int ppc_smc37c935_detect(struct ppc_data *ppc, int chipset_mode) { int type = -1; PPC_CONFIG_LOCK(ppc); outb(SMC935_CFG, 0x55); /* enter config mode */ outb(SMC935_CFG, 0x55); PPC_CONFIG_UNLOCK(ppc); outb(SMC935_IND, SMC935_ID); /* check device id */ if (inb(SMC935_DAT) == 0x2) type = SMC_37C935; if (type == -1) { outb(SMC935_CFG, 0xaa); /* exit config mode */ return (-1); } ppc->ppc_model = type; outb(SMC935_IND, SMC935_LOGDEV); /* select parallel port, */ outb(SMC935_DAT, 3); /* which is logical device 3 */ /* set io port base */ outb(SMC935_IND, SMC935_PORTHI); outb(SMC935_DAT, (u_char)((ppc->ppc_base & 0xff00) >> 8)); outb(SMC935_IND, SMC935_PORTLO); outb(SMC935_DAT, (u_char)(ppc->ppc_base & 0xff)); if (!chipset_mode) ppc->ppc_avm = PPB_COMPATIBLE; /* default mode */ else { ppc->ppc_avm = chipset_mode; outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_CENT); /* start in compatible mode */ /* SPP + EPP or just plain SPP */ if (chipset_mode & (PPB_SPP)) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP19SPP); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_EPP17SPP); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_SPP); } } /* ECP + EPP or just plain ECP */ if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { if (ppc->ppc_epp == EPP_1_9) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP19); } if (ppc->ppc_epp == EPP_1_7) { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECPEPP17); } } else { outb(SMC935_IND, SMC935_PPMODE); outb(SMC935_DAT, SMC935_ECP); } } } outb(SMC935_CFG, 0xaa); /* exit config mode */ ppc->ppc_type = PPC_TYPE_SMCLIKE; ppc_smclike_setmode(ppc, chipset_mode); return (chipset_mode); } /* * Winbond W83877F stuff * * EFER: extended function enable register * EFIR: extended function index register * EFDR: extended function data register */ #define efir ((efer == 0x250) ? 0x251 : 0x3f0) #define efdr ((efer == 0x250) ? 0x252 : 0x3f1) static int w83877f_efers[] = { 0x250, 0x3f0, 0x3f0, 0x250 }; static int w83877f_keys[] = { 0x89, 0x86, 0x87, 0x88 }; static int w83877f_keyiter[] = { 1, 2, 2, 1 }; static int w83877f_hefs[] = { WINB_HEFERE, WINB_HEFRAS, WINB_HEFERE | WINB_HEFRAS, 0 }; static int ppc_w83877f_detect(struct ppc_data *ppc, int chipset_mode) { int i, j, efer; unsigned char r, hefere, hefras; for (i = 0; i < 4; i ++) { /* first try to enable configuration registers */ efer = w83877f_efers[i]; /* write the key to the EFER */ for (j = 0; j < w83877f_keyiter[i]; j ++) outb (efer, w83877f_keys[i]); /* then check HEFERE and HEFRAS bits */ outb (efir, 0x0c); hefere = inb(efdr) & WINB_HEFERE; outb (efir, 0x16); hefras = inb(efdr) & WINB_HEFRAS; /* * HEFRAS HEFERE * 0 1 write 89h to 250h (power-on default) * 1 0 write 86h twice to 3f0h * 1 1 write 87h twice to 3f0h * 0 0 write 88h to 250h */ if ((hefere | hefras) == w83877f_hefs[i]) goto found; } return (-1); /* failed */ found: /* check base port address - read from CR23 */ outb(efir, 0x23); if (ppc->ppc_base != inb(efdr) * 4) /* 4 bytes boundaries */ return (-1); /* read CHIP ID from CR9/bits0-3 */ outb(efir, 0x9); switch (inb(efdr) & WINB_CHIPID) { case WINB_W83877F_ID: ppc->ppc_model = WINB_W83877F; break; case WINB_W83877AF_ID: ppc->ppc_model = WINB_W83877AF; break; default: ppc->ppc_model = WINB_UNKNOWN; } if (bootverbose) { /* dump of registers */ device_printf(ppc->ppc_dev, "0x%x - ", w83877f_keys[i]); for (i = 0; i <= 0xd; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } for (i = 0x10; i <= 0x17; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } outb(efir, 0x1e); printf("0x%x ", inb(efdr)); for (i = 0x20; i <= 0x29; i ++) { outb(efir, i); printf("0x%x ", inb(efdr)); } printf("\n"); } ppc->ppc_type = PPC_TYPE_GENERIC; if (!chipset_mode) { /* autodetect mode */ /* select CR0 */ outb(efir, 0x0); r = inb(efdr) & (WINB_PRTMODS0 | WINB_PRTMODS1); /* select CR9 */ outb(efir, 0x9); r |= (inb(efdr) & WINB_PRTMODS2); switch (r) { case WINB_W83757: if (bootverbose) device_printf(ppc->ppc_dev, "W83757 compatible mode\n"); return (-1); /* generic or SMC-like */ case WINB_EXTFDC: case WINB_EXTADP: case WINB_EXT2FDD: case WINB_JOYSTICK: if (bootverbose) device_printf(ppc->ppc_dev, "not in parallel port mode\n"); return (-1); case (WINB_PARALLEL | WINB_EPP_SPP): ppc->ppc_avm |= PPB_EPP | PPB_SPP; if (bootverbose) device_printf(ppc->ppc_dev, "EPP SPP\n"); break; case (WINB_PARALLEL | WINB_ECP): ppc->ppc_avm |= PPB_ECP | PPB_SPP; if (bootverbose) device_printf(ppc->ppc_dev, "ECP SPP\n"); break; case (WINB_PARALLEL | WINB_ECP_EPP): ppc->ppc_avm |= PPB_ECP | PPB_EPP | PPB_SPP; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) device_printf(ppc->ppc_dev, "ECP+EPP SPP\n"); break; default: printf("%s: unknown case (0x%x)!\n", __func__, r); } } else { /* mode forced */ /* select CR9 and set PRTMODS2 bit */ outb(efir, 0x9); outb(efdr, inb(efdr) & ~WINB_PRTMODS2); /* select CR0 and reset PRTMODSx bits */ outb(efir, 0x0); outb(efdr, inb(efdr) & ~(WINB_PRTMODS0 | WINB_PRTMODS1)); if (chipset_mode & PPB_ECP) { if (chipset_mode & PPB_EPP) { outb(efdr, inb(efdr) | WINB_ECP_EPP); if (bootverbose) device_printf(ppc->ppc_dev, "ECP+EPP\n"); ppc->ppc_type = PPC_TYPE_SMCLIKE; } else { outb(efdr, inb(efdr) | WINB_ECP); if (bootverbose) device_printf(ppc->ppc_dev, "ECP\n"); } } else { /* select EPP_SPP otherwise */ outb(efdr, inb(efdr) | WINB_EPP_SPP); if (bootverbose) device_printf(ppc->ppc_dev, "EPP SPP\n"); } ppc->ppc_avm = chipset_mode; } /* exit configuration mode */ outb(efer, 0xaa); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } #endif /* * ppc_generic_detect */ static int ppc_generic_detect(struct ppc_data *ppc, int chipset_mode) { /* default to generic */ ppc->ppc_type = PPC_TYPE_GENERIC; if (bootverbose) device_printf(ppc->ppc_dev, "SPP"); /* first, check for ECP */ w_ecr(ppc, PPC_ECR_PS2); if ((r_ecr(ppc) & 0xe0) == PPC_ECR_PS2) { ppc->ppc_dtm |= PPB_ECP | PPB_SPP; if (bootverbose) printf(" ECP "); /* search for SMC style ECP+EPP mode */ w_ecr(ppc, PPC_ECR_EPP); } /* try to reset EPP timeout bit */ if (ppc_check_epp_timeout(ppc)) { ppc->ppc_dtm |= PPB_EPP; if (ppc->ppc_dtm & PPB_ECP) { /* SMC like chipset found */ ppc->ppc_model = SMC_LIKE; ppc->ppc_type = PPC_TYPE_SMCLIKE; if (bootverbose) printf(" ECP+EPP"); } else { if (bootverbose) printf(" EPP"); } } else { /* restore to standard mode */ w_ecr(ppc, PPC_ECR_STD); } /* XXX try to detect NIBBLE and PS2 modes */ ppc->ppc_dtm |= PPB_NIBBLE; if (chipset_mode) ppc->ppc_avm = chipset_mode; else ppc->ppc_avm = ppc->ppc_dtm; if (bootverbose) printf("\n"); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: ppc_smclike_setmode(ppc, chipset_mode); break; default: ppc_generic_setmode(ppc, chipset_mode); break; } return (chipset_mode); } /* * ppc_detect() * * mode is the mode suggested at boot */ static int ppc_detect(struct ppc_data *ppc, int chipset_mode) { #ifdef PPC_PROBE_CHIPSET int i, mode; /* list of supported chipsets */ int (*chipset_detect[])(struct ppc_data *, int) = { ppc_pc873xx_detect, ppc_smc37c66xgt_detect, ppc_w83877f_detect, ppc_smc37c935_detect, ppc_generic_detect, NULL }; #endif /* if can't find the port and mode not forced return error */ if (!ppc_detect_port(ppc) && chipset_mode == 0) return (EIO); /* failed, port not present */ /* assume centronics compatible mode is supported */ ppc->ppc_avm = PPB_COMPATIBLE; #ifdef PPC_PROBE_CHIPSET /* we have to differenciate available chipset modes, * chipset running modes and IEEE-1284 operating modes * * after detection, the port must support running in compatible mode */ if (ppc->ppc_flags & 0x40) { if (bootverbose) printf("ppc: chipset forced to generic\n"); #endif ppc->ppc_mode = ppc_generic_detect(ppc, chipset_mode); #ifdef PPC_PROBE_CHIPSET } else { for (i=0; chipset_detect[i] != NULL; i++) { if ((mode = chipset_detect[i](ppc, chipset_mode)) != -1) { ppc->ppc_mode = mode; break; } } } #endif /* configure/detect ECP FIFO */ if ((ppc->ppc_avm & PPB_ECP) && !(ppc->ppc_flags & 0x80)) ppc_detect_fifo(ppc); return (0); } /* * ppc_exec_microseq() * * Execute a microsequence. * Microsequence mechanism is supposed to handle fast I/O operations. */ int ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq) { struct ppc_data *ppc = DEVTOSOFTC(dev); struct ppb_microseq *mi; char cc, *p; int i, iter, len; int error; int reg; char mask; int accum = 0; char *ptr = NULL; struct ppb_microseq *stack = NULL; /* microsequence registers are equivalent to PC-like port registers */ #define r_reg(reg,ppc) (bus_read_1((ppc)->res_ioport, reg)) #define w_reg(reg, ppc, byte) (bus_write_1((ppc)->res_ioport, reg, byte)) #define INCR_PC (mi ++) /* increment program counter */ PPC_ASSERT_LOCKED(ppc); mi = *p_msq; for (;;) { switch (mi->opcode) { case MS_OP_RSET: cc = r_reg(mi->arg[0].i, ppc); cc &= (char)mi->arg[2].i; /* clear mask */ cc |= (char)mi->arg[1].i; /* assert mask */ w_reg(mi->arg[0].i, ppc, cc); INCR_PC; break; case MS_OP_RASSERT_P: reg = mi->arg[1].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) w_reg(reg, ppc, *ptr++); ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH_P: reg = mi->arg[1].i; mask = (char)mi->arg[2].i; ptr = ppc->ppc_ptr; if ((len = mi->arg[0].i) == MS_ACCUM) { accum = ppc->ppc_accum; for (; accum; accum--) *ptr++ = r_reg(reg, ppc) & mask; ppc->ppc_accum = accum; } else for (i=0; ippc_ptr = ptr; INCR_PC; break; case MS_OP_RFETCH: *((char *) mi->arg[2].p) = r_reg(mi->arg[0].i, ppc) & (char)mi->arg[1].i; INCR_PC; break; case MS_OP_RASSERT: case MS_OP_DELAY: /* let's suppose the next instr. is the same */ prefetch: for (;mi->opcode == MS_OP_RASSERT; INCR_PC) w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i); if (mi->opcode == MS_OP_DELAY) { DELAY(mi->arg[0].i); INCR_PC; goto prefetch; } break; case MS_OP_ADELAY: if (mi->arg[0].i) { PPC_UNLOCK(ppc); pause("ppbdelay", mi->arg[0].i * (hz/1000)); PPC_LOCK(ppc); } INCR_PC; break; case MS_OP_TRIG: reg = mi->arg[0].i; iter = mi->arg[1].i; p = (char *)mi->arg[2].p; /* XXX delay limited to 255 us */ for (i=0; ippc_accum = mi->arg[0].i; INCR_PC; break; case MS_OP_DBRA: if (--ppc->ppc_accum > 0) mi += mi->arg[0].i; INCR_PC; break; case MS_OP_BRSET: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == (char)mi->arg[0].i) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRCLEAR: cc = r_str(ppc); if ((cc & (char)mi->arg[0].i) == 0) mi += mi->arg[1].i; INCR_PC; break; case MS_OP_BRSTAT: cc = r_str(ppc); if ((cc & ((char)mi->arg[0].i | (char)mi->arg[1].i)) == (char)mi->arg[0].i) mi += mi->arg[2].i; INCR_PC; break; case MS_OP_C_CALL: /* * If the C call returns !0 then end the microseq. * The current state of ptr is passed to the C function */ if ((error = mi->arg[0].f(mi->arg[1].p, ppc->ppc_ptr))) return (error); INCR_PC; break; case MS_OP_PTR: ppc->ppc_ptr = (char *)mi->arg[0].p; INCR_PC; break; case MS_OP_CALL: if (stack) panic("%s: too much calls", __func__); if (mi->arg[0].p) { /* store the state of the actual * microsequence */ stack = mi; /* jump to the new microsequence */ mi = (struct ppb_microseq *)mi->arg[0].p; } else INCR_PC; break; case MS_OP_SUBRET: /* retrieve microseq and pc state before the call */ mi = stack; /* reset the stack */ stack = NULL; /* XXX return code */ INCR_PC; break; case MS_OP_PUT: case MS_OP_GET: case MS_OP_RET: /* can't return to ppb level during the execution * of a submicrosequence */ if (stack) panic("%s: can't return to ppb level", __func__); /* update pc for ppb level of execution */ *p_msq = mi; /* return to ppb level of execution */ return (0); default: panic("%s: unknown microsequence opcode 0x%x", __func__, mi->opcode); } } /* unreached */ } static void ppcintr(void *arg) { struct ppc_data *ppc = arg; u_char ctr, ecr, str; /* * If we have any child interrupt handlers registered, let * them handle this interrupt. * * XXX: If DMA is in progress should we just complete that w/o * doing this? */ PPC_LOCK(ppc); if (ppc->ppc_intr_hook != NULL && ppc->ppc_intr_hook(ppc->ppc_intr_arg) == 0) { PPC_UNLOCK(ppc); return; } str = r_str(ppc); ctr = r_ctr(ppc); ecr = r_ecr(ppc); #if defined(PPC_DEBUG) && PPC_DEBUG > 1 printf("![%x/%x/%x]", ctr, ecr, str); #endif /* don't use ecp mode with IRQENABLE set */ if (ctr & IRQENABLE) { PPC_UNLOCK(ppc); return; } /* interrupts are generated by nFault signal * only in ECP mode */ if ((str & nFAULT) && (ppc->ppc_mode & PPB_ECP)) { /* check if ppc driver has programmed the * nFault interrupt */ if (ppc->ppc_irqstat & PPC_IRQ_nFAULT) { w_ecr(ppc, ecr | PPC_nFAULT_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_nFAULT; } else { /* shall be handled by underlying layers XXX */ PPC_UNLOCK(ppc); return; } } if (ppc->ppc_irqstat & PPC_IRQ_DMA) { /* disable interrupts (should be done by hardware though) */ w_ecr(ppc, ecr | PPC_SERVICE_INTR); ppc->ppc_irqstat &= ~PPC_IRQ_DMA; ecr = r_ecr(ppc); /* check if DMA completed */ if ((ppc->ppc_avm & PPB_ECP) && (ecr & PPC_ENABLE_DMA)) { #ifdef PPC_DEBUG printf("a"); #endif /* stop DMA */ w_ecr(ppc, ecr & ~PPC_ENABLE_DMA); ecr = r_ecr(ppc); if (ppc->ppc_dmastat == PPC_DMA_STARTED) { #ifdef PPC_DEBUG printf("d"); #endif ppc->ppc_dmadone(ppc); ppc->ppc_dmastat = PPC_DMA_COMPLETE; /* wakeup the waiting process */ wakeup(ppc); } } } else if (ppc->ppc_irqstat & PPC_IRQ_FIFO) { /* classic interrupt I/O */ ppc->ppc_irqstat &= ~PPC_IRQ_FIFO; } PPC_UNLOCK(ppc); return; } int ppc_read(device_t dev, char *buf, int len, int mode) { return (EINVAL); } int ppc_write(device_t dev, char *buf, int len, int how) { return (EINVAL); } int ppc_reset_epp(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); ppc_reset_epp_timeout(ppc); return 0; } int ppc_setmode(device_t dev, int mode) { struct ppc_data *ppc = DEVTOSOFTC(dev); PPC_ASSERT_LOCKED(ppc); switch (ppc->ppc_type) { case PPC_TYPE_SMCLIKE: return (ppc_smclike_setmode(ppc, mode)); break; case PPC_TYPE_GENERIC: default: return (ppc_generic_setmode(ppc, mode)); break; } /* not reached */ return (ENXIO); } int ppc_probe(device_t dev, int rid) { struct ppc_data *ppc; #ifdef __i386__ static short next_bios_ppc = 0; int error; rman_res_t port; #endif /* * Allocate the ppc_data structure. */ ppc = DEVTOSOFTC(dev); bzero(ppc, sizeof(struct ppc_data)); ppc->rid_ioport = rid; #ifdef __i386__ /* retrieve ISA parameters */ error = bus_get_resource(dev, SYS_RES_IOPORT, rid, &port, NULL); /* * If port not specified, use bios list. */ if (error) { if ((next_bios_ppc < BIOS_MAX_PPC) && (*(BIOS_PORTS + next_bios_ppc) != 0)) { port = *(BIOS_PORTS + next_bios_ppc++); if (bootverbose) device_printf(dev, "parallel port found at 0x%jx\n", port); } else { device_printf(dev, "parallel port not found.\n"); return (ENXIO); } bus_set_resource(dev, SYS_RES_IOPORT, rid, port, IO_LPTSIZE_EXTENDED); } #endif /* IO port is mandatory */ /* Try "extended" IO port range...*/ ppc->res_ioport = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ppc->rid_ioport, IO_LPTSIZE_EXTENDED, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using extended I/O port range\n"); } else { /* Failed? If so, then try the "normal" IO port range... */ ppc->res_ioport = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ppc->rid_ioport, IO_LPTSIZE_NORMAL, RF_ACTIVE); if (ppc->res_ioport != 0) { if (bootverbose) device_printf(dev, "using normal I/O port range\n"); } else { if (bootverbose) device_printf(dev, "cannot reserve I/O port range\n"); goto error; } } ppc->ppc_base = rman_get_start(ppc->res_ioport); ppc->ppc_flags = device_get_flags(dev); if (!(ppc->ppc_flags & 0x20)) { ppc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ppc->rid_irq, RF_SHAREABLE); ppc->res_drq = bus_alloc_resource_any(dev, SYS_RES_DRQ, &ppc->rid_drq, RF_ACTIVE); } if (ppc->res_irq) ppc->ppc_irq = rman_get_start(ppc->res_irq); if (ppc->res_drq) ppc->ppc_dmachan = rman_get_start(ppc->res_drq); ppc->ppc_dev = dev; ppc->ppc_model = GENERIC; ppc->ppc_mode = PPB_COMPATIBLE; ppc->ppc_epp = (ppc->ppc_flags & 0x10) >> 4; ppc->ppc_type = PPC_TYPE_GENERIC; /* * Try to detect the chipset and its mode. */ if (ppc_detect(ppc, ppc->ppc_flags & 0xf)) goto error; return (0); error: if (ppc->res_irq != 0) { bus_release_resource(dev, SYS_RES_IRQ, ppc->rid_irq, ppc->res_irq); } if (ppc->res_ioport != 0) { bus_release_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); } if (ppc->res_drq != 0) { bus_release_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); } return (ENXIO); } int ppc_attach(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); int error; mtx_init(&ppc->ppc_lock, device_get_nameunit(dev), "ppc", MTX_DEF); device_printf(dev, "%s chipset (%s) in %s mode%s\n", ppc_models[ppc->ppc_model], ppc_avms[ppc->ppc_avm], ppc_modes[ppc->ppc_mode], (PPB_IS_EPP(ppc->ppc_mode)) ? ppc_epp_protocol[ppc->ppc_epp] : ""); if (ppc->ppc_fifo) device_printf(dev, "FIFO with %d/%d/%d bytes threshold\n", ppc->ppc_fifo, ppc->ppc_wthr, ppc->ppc_rthr); if (ppc->res_irq) { /* default to the tty mask for registration */ /* XXX */ error = bus_setup_intr(dev, ppc->res_irq, INTR_TYPE_TTY | INTR_MPSAFE, NULL, ppcintr, ppc, &ppc->intr_cookie); if (error) { device_printf(dev, "failed to register interrupt handler: %d\n", error); mtx_destroy(&ppc->ppc_lock); return (error); } } /* add ppbus as a child of this isa to parallel bridge */ ppc->ppbus = device_add_child(dev, "ppbus", -1); /* * Probe the ppbus and attach devices found. */ device_probe_and_attach(ppc->ppbus); return (0); } int ppc_detach(device_t dev) { struct ppc_data *ppc = DEVTOSOFTC(dev); if (ppc->res_irq == 0) { return (ENXIO); } /* detach & delete all children */ device_delete_children(dev); if (ppc->res_irq != 0) { bus_teardown_intr(dev, ppc->res_irq, ppc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, ppc->rid_irq, ppc->res_irq); } if (ppc->res_ioport != 0) { bus_release_resource(dev, SYS_RES_IOPORT, ppc->rid_ioport, ppc->res_ioport); } if (ppc->res_drq != 0) { bus_release_resource(dev, SYS_RES_DRQ, ppc->rid_drq, ppc->res_drq); } mtx_destroy(&ppc->ppc_lock); return (0); } u_char ppc_io(device_t ppcdev, int iop, u_char *addr, int cnt, u_char byte) { struct ppc_data *ppc = DEVTOSOFTC(ppcdev); PPC_ASSERT_LOCKED(ppc); switch (iop) { case PPB_OUTSB_EPP: bus_write_multi_1(ppc->res_ioport, PPC_EPP_DATA, addr, cnt); break; case PPB_OUTSW_EPP: bus_write_multi_2(ppc->res_ioport, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_OUTSL_EPP: bus_write_multi_4(ppc->res_ioport, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_INSB_EPP: bus_read_multi_1(ppc->res_ioport, PPC_EPP_DATA, addr, cnt); break; case PPB_INSW_EPP: bus_read_multi_2(ppc->res_ioport, PPC_EPP_DATA, (u_int16_t *)addr, cnt); break; case PPB_INSL_EPP: bus_read_multi_4(ppc->res_ioport, PPC_EPP_DATA, (u_int32_t *)addr, cnt); break; case PPB_RDTR: return (r_dtr(ppc)); case PPB_RSTR: return (r_str(ppc)); case PPB_RCTR: return (r_ctr(ppc)); case PPB_REPP_A: return (r_epp_A(ppc)); case PPB_REPP_D: return (r_epp_D(ppc)); case PPB_RECR: return (r_ecr(ppc)); case PPB_RFIFO: return (r_fifo(ppc)); case PPB_WDTR: w_dtr(ppc, byte); break; case PPB_WSTR: w_str(ppc, byte); break; case PPB_WCTR: w_ctr(ppc, byte); break; case PPB_WEPP_A: w_epp_A(ppc, byte); break; case PPB_WEPP_D: w_epp_D(ppc, byte); break; case PPB_WECR: w_ecr(ppc, byte); break; case PPB_WFIFO: w_fifo(ppc, byte); break; default: panic("%s: unknown I/O operation", __func__); break; } return (0); /* not significative */ } int ppc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val) { struct ppc_data *ppc = (struct ppc_data *)device_get_softc(bus); switch (index) { case PPC_IVAR_EPP_PROTO: PPC_ASSERT_LOCKED(ppc); *val = (u_long)ppc->ppc_epp; break; case PPC_IVAR_LOCK: *val = (uintptr_t)&ppc->ppc_lock; break; default: return (ENOENT); } return (0); } int ppc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { struct ppc_data *ppc = (struct ppc_data *)device_get_softc(bus); switch (index) { case PPC_IVAR_INTR_HANDLER: PPC_ASSERT_LOCKED(ppc); if (dev != ppc->ppbus) return (EINVAL); if (val == 0) { ppc->ppc_intr_hook = NULL; break; } if (ppc->ppc_intr_hook != NULL) return (EBUSY); ppc->ppc_intr_hook = (void *)val; ppc->ppc_intr_arg = device_get_softc(dev); break; default: return (ENOENT); } return (0); } /* * We allow child devices to allocate an IRQ resource at rid 0 for their * interrupt handlers. */ struct resource * ppc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ppc_data *ppc = DEVTOSOFTC(bus); switch (type) { case SYS_RES_IRQ: if (*rid == 0) return (ppc->res_irq); break; } return (NULL); } int -ppc_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +ppc_release_resource(device_t bus, device_t child, struct resource *r) { -#ifdef INVARIANTS struct ppc_data *ppc = DEVTOSOFTC(bus); -#endif - switch (type) { - case SYS_RES_IRQ: - if (rid == 0) { - KASSERT(r == ppc->res_irq, - ("ppc child IRQ resource mismatch")); - return (0); - } - break; - } + if (r == ppc->res_irq) + return (0); return (EINVAL); } MODULE_DEPEND(ppc, ppbus, 1, 1, 1); diff --git a/sys/dev/ppc/ppcvar.h b/sys/dev/ppc/ppcvar.h index 39643ea03513..3697c8c35eef 100644 --- a/sys/dev/ppc/ppcvar.h +++ b/sys/dev/ppc/ppcvar.h @@ -1,51 +1,50 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997-2000 Nicolas Souchu * Copyright (c) 2001 Alcove - Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ int ppc_probe(device_t dev, int rid); int ppc_attach(device_t dev); int ppc_detach(device_t dev); int ppc_read_ivar(device_t bus, device_t dev, int index, uintptr_t *val); int ppc_write_ivar(device_t bus, device_t dev, int index, uintptr_t val); int ppc_read(device_t, char *, int, int); int ppc_write(device_t, char *, int, int); u_char ppc_io(device_t, int, u_char *, int, u_char); int ppc_exec_microseq(device_t, struct ppb_microseq **); struct resource *ppc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -int ppc_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r); +int ppc_release_resource(device_t bus, device_t child, struct resource *r); int ppc_reset_epp(device_t); int ppc_ecp_sync(device_t); int ppc_setmode(device_t, int); extern const char ppc_driver_name[]; diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c index ecf199fd6020..a37016c80226 100644 --- a/sys/dev/puc/puc.c +++ b/sys/dev/puc/puc.c @@ -1,765 +1,759 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PUC_ISRCCNT 5 struct puc_port { struct puc_bar *p_bar; struct resource *p_rres; struct resource *p_ires; device_t p_dev; int p_nr; int p_type; int p_rclk; bool p_hasintr:1; serdev_intr_t *p_ihsrc[PUC_ISRCCNT]; void *p_iharg; int p_ipend; }; const char puc_driver_name[] = "puc"; static MALLOC_DEFINE(M_PUC, "PUC", "PUC driver"); SYSCTL_NODE(_hw, OID_AUTO, puc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "puc(4) driver configuration"); struct puc_bar * puc_get_bar(struct puc_softc *sc, int rid) { struct puc_bar *bar; struct rman *rm; rman_res_t end, start; int error, i; /* Find the BAR entry with the given RID. */ i = 0; while (i < PUC_PCI_BARS && sc->sc_bar[i].b_rid != rid) i++; if (i < PUC_PCI_BARS) return (&sc->sc_bar[i]); /* Not found. If we're looking for an unused entry, return NULL. */ if (rid == -1) return (NULL); /* Get an unused entry for us to fill. */ bar = puc_get_bar(sc, -1); if (bar == NULL) return (NULL); bar->b_rid = rid; bar->b_type = SYS_RES_IOPORT; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = rid; bar->b_type = SYS_RES_MEMORY; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = -1; return (NULL); } } /* Update our managed space. */ rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport : &sc->sc_iomem; start = rman_get_start(bar->b_res); end = rman_get_end(bar->b_res); error = rman_manage_region(rm, start, end); if (error) { bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); bar->b_res = NULL; bar->b_rid = -1; bar = NULL; } return (bar); } static int puc_intr(void *arg) { struct puc_port *port; struct puc_softc *sc = arg; u_long ds, dev, devs; int i, idx, ipend, isrc, nints; uint8_t ilr; nints = 0; while (1) { /* * Obtain the set of devices with pending interrupts. */ devs = sc->sc_serdevs; if (sc->sc_ilr == PUC_ILR_DIGI) { idx = 0; while (devs & (0xfful << idx)) { ilr = ~bus_read_1(sc->sc_port[idx].p_rres, 7); devs &= ~0ul ^ ((u_long)ilr << idx); idx += 8; } } else if (sc->sc_ilr == PUC_ILR_QUATECH) { /* * Don't trust the value if it's the same as the option * register. It may mean that the ILR is not active and * we're reading the option register instead. This may * lead to false positives on 8-port boards. */ ilr = bus_read_1(sc->sc_port[0].p_rres, 7); if (ilr != (sc->sc_cfg_data & 0xff)) devs &= (u_long)ilr; } if (devs == 0UL) break; /* * Obtain the set of interrupt sources from those devices * that have pending interrupts. */ ipend = 0; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; port->p_ipend = SERDEV_IPEND(port->p_dev); ipend |= port->p_ipend; } if (ipend == 0) break; i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < PUC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < PUC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; if (!(port->p_ipend & isrc)) continue; if (port->p_ihsrc[i] != NULL) (*port->p_ihsrc[i])(port->p_iharg); nints++; } } } return ((nints > 0) ? FILTER_HANDLED : FILTER_STRAY); } int puc_bfe_attach(device_t dev) { char buffer[64]; struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; struct rman *rm; intptr_t res; bus_addr_t ofs, start; bus_size_t size; bus_space_handle_t bsh; bus_space_tag_t bst; int error, idx; sc = device_get_softc(dev); for (idx = 0; idx < PUC_PCI_BARS; idx++) sc->sc_bar[idx].b_rid = -1; do { sc->sc_ioport.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_ioport); if (!error) { sc->sc_iomem.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_iomem); if (!error) { sc->sc_irq.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_irq); if (!error) break; rman_fini(&sc->sc_iomem); } rman_fini(&sc->sc_ioport); } return (error); } while (0); snprintf(buffer, sizeof(buffer), "%s I/O port mapping", device_get_nameunit(dev)); sc->sc_ioport.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s I/O memory mapping", device_get_nameunit(dev)); sc->sc_iomem.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s port numbers", device_get_nameunit(dev)); sc->sc_irq.rm_descr = strdup(buffer, M_PUC); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); KASSERT(error == 0, ("%s %d", __func__, __LINE__)); sc->sc_nports = (int)res; sc->sc_port = malloc(sc->sc_nports * sizeof(struct puc_port), M_PUC, M_WAITOK|M_ZERO); error = rman_manage_region(&sc->sc_irq, 1, sc->sc_nports); if (error) goto fail; error = puc_config(sc, PUC_CFG_SETUP, 0, &res); if (error) goto fail; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; port->p_nr = idx + 1; error = puc_config(sc, PUC_CFG_GET_TYPE, idx, &res); if (error) goto fail; port->p_type = res; error = puc_config(sc, PUC_CFG_GET_RID, idx, &res); if (error) goto fail; bar = puc_get_bar(sc, res); if (bar == NULL) { error = ENXIO; goto fail; } port->p_bar = bar; start = rman_get_start(bar->b_res); error = puc_config(sc, PUC_CFG_GET_OFS, idx, &res); if (error) goto fail; ofs = res; error = puc_config(sc, PUC_CFG_GET_LEN, idx, &res); if (error) goto fail; size = res; rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport: &sc->sc_iomem; port->p_rres = rman_reserve_resource(rm, start + ofs, start + ofs + size - 1, size, 0, NULL); if (port->p_rres != NULL) { bsh = rman_get_bushandle(bar->b_res); bst = rman_get_bustag(bar->b_res); bus_space_subregion(bst, bsh, ofs, size, &bsh); rman_set_bushandle(port->p_rres, bsh); rman_set_bustag(port->p_rres, bst); } port->p_ires = rman_reserve_resource(&sc->sc_irq, port->p_nr, port->p_nr, 1, 0, NULL); if (port->p_ires == NULL) { error = ENXIO; goto fail; } error = puc_config(sc, PUC_CFG_GET_CLOCK, idx, &res); if (error) goto fail; port->p_rclk = res; port->p_dev = device_add_child(dev, NULL, -1); if (port->p_dev != NULL) device_set_ivars(port->p_dev, (void *)port); } error = puc_config(sc, PUC_CFG_GET_ILR, 0, &res); if (error) goto fail; sc->sc_ilr = res; if (bootverbose && sc->sc_ilr != 0) device_printf(dev, "using interrupt latch register\n"); sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE|RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, puc_intr, NULL, sc, &sc->sc_icookie); if (error) error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)puc_intr, sc, &sc->sc_icookie); else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) { /* XXX no interrupt resource. Force polled mode. */ sc->sc_polled = 1; } /* Probe and attach our children. */ for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; error = device_probe_and_attach(port->p_dev); if (error) { device_delete_child(dev, port->p_dev); port->p_dev = NULL; } } /* * If there are no serdev devices, then our interrupt handler * will do nothing. Tear it down. */ if (sc->sc_serdevs == 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); return (0); fail: for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev != NULL) device_delete_child(dev, port->p_dev); if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (error); } int puc_bfe_detach(device_t dev) { struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; int error, idx; sc = device_get_softc(dev); /* Detach our children. */ error = 0; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; if (device_delete_child(dev, port->p_dev) == 0) { if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } else error = ENXIO; } if (error) return (error); if (sc->sc_serdevs != 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (0); } int puc_bfe_probe(device_t dev, const struct puc_cfg *cfg) { struct puc_softc *sc; intptr_t res; int error; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_cfg = cfg; /* We don't attach to single-port serial cards. */ if (cfg->ports == PUC_PORT_1S || cfg->ports == PUC_PORT_1P) return (EDOOFUS); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); if (error) return (error); error = puc_config(sc, PUC_CFG_GET_DESC, 0, &res); if (error) return (error); if (res != 0) device_set_desc(dev, (const char *)res); return (BUS_PROBE_DEFAULT); } struct resource * puc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct puc_port *port; struct resource *res; device_t assigned, originator; int error; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (NULL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid == NULL || *rid != 0) return (NULL); /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (NULL); if (res == NULL) return (NULL); assigned = rman_get_device(res); if (assigned == NULL) /* Not allocated */ rman_set_device(res, originator); else if (assigned != originator) return (NULL); if (flags & RF_ACTIVE) { error = rman_activate_resource(res); if (error) { if (assigned == NULL) rman_set_device(res, NULL); return (NULL); } } return (res); } int -puc_bus_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +puc_bus_release_resource(device_t dev, device_t child, struct resource *res) { struct puc_port *port; device_t originator; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); - if (rid != 0 || res == NULL) + if (res == NULL) return (EINVAL); - if (type == port->p_bar->b_type) { - if (res != port->p_rres) - return (EINVAL); - } else if (type == SYS_RES_IRQ) { - if (res != port->p_ires) - return (EINVAL); + if (res == port->p_ires) { if (port->p_hasintr) return (EBUSY); - } else + } else if (res != port->p_rres) return (EINVAL); if (rman_get_device(res) != originator) return (ENXIO); if (rman_get_flags(res) & RF_ACTIVE) rman_deactivate_resource(res); rman_set_device(res, NULL); return (0); } int puc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct puc_port *port; struct resource *res; rman_res_t start; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (ENXIO); if (rid != 0 || res == NULL) return (ENXIO); start = rman_get_start(res); if (startp != NULL) *startp = start; if (countp != NULL) *countp = rman_get_end(res) - start + 1; return (0); } int puc_bus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i, isrc, serdev; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (cookiep == NULL || res != port->p_ires) return (EINVAL); /* We demand that serdev devices use filter_only interrupts. */ if (port->p_type == PUC_TYPE_SERIAL && ihand != NULL) return (ENXIO); if (rman_get_device(port->p_ires) != originator) return (ENXIO); /* * Have non-serdev ports handled by the bus implementation. It * supports multiple handlers for a single interrupt as it is, * so we wouldn't add value if we did it ourselves. */ serdev = 0; if (port->p_type == PUC_TYPE_SERIAL) { i = 0, isrc = SER_INT_OVERRUN; while (i < PUC_ISRCCNT) { port->p_ihsrc[i] = SERDEV_IHAND(originator, isrc); if (port->p_ihsrc[i] != NULL) serdev = 1; i++, isrc <<= 1; } } if (!serdev) return (BUS_SETUP_INTR(device_get_parent(dev), originator, sc->sc_ires, flags, filt, ihand, arg, cookiep)); sc->sc_serdevs |= 1UL << (port->p_nr - 1); port->p_hasintr = 1; port->p_iharg = arg; *cookiep = port; return (0); } int puc_bus_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (res != port->p_ires) return (EINVAL); if (rman_get_device(port->p_ires) != originator) return (ENXIO); if (!port->p_hasintr) return (BUS_TEARDOWN_INTR(device_get_parent(dev), originator, sc->sc_ires, cookie)); if (cookie != port) return (EINVAL); port->p_hasintr = 0; port->p_iharg = NULL; for (i = 0; i < PUC_ISRCCNT; i++) port->p_ihsrc[i] = NULL; return (0); } int puc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct puc_port *port; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (result == NULL) return (EINVAL); switch(index) { case PUC_IVAR_CLOCK: *result = port->p_rclk; break; case PUC_IVAR_TYPE: *result = port->p_type; break; default: return (ENOENT); } return (0); } int puc_bus_print_child(device_t dev, device_t child) { struct puc_port *port; int retval; port = device_get_ivars(child); retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at port %d", port->p_nr); retval += bus_print_child_footer(dev, child); return (retval); } int puc_bus_child_location(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); sbuf_printf(sb, "port=%d", port->p_nr); return (0); } int puc_bus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); sbuf_printf(sb, "type=%d", port->p_type); return (0); } diff --git a/sys/dev/puc/puc_bfe.h b/sys/dev/puc/puc_bfe.h index 9592edbd59f9..43fff4d79930 100644 --- a/sys/dev/puc/puc_bfe.h +++ b/sys/dev/puc/puc_bfe.h @@ -1,99 +1,99 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DEV_PUC_BFE_H_ #define _DEV_PUC_BFE_H_ #define PUC_PCI_BARS 6 struct puc_cfg; struct puc_port; extern const struct puc_cfg puc_pci_devices[]; extern const char puc_driver_name[]; struct puc_bar { struct resource *b_res; int b_rid; int b_type; }; struct puc_softc { device_t sc_dev; const struct puc_cfg *sc_cfg; intptr_t sc_cfg_data; struct puc_bar sc_bar[PUC_PCI_BARS]; struct rman sc_ioport; struct rman sc_iomem; struct rman sc_irq; struct resource *sc_ires; void *sc_icookie; int sc_irid; int sc_nports; struct puc_port *sc_port; bool sc_fastintr:1; bool sc_leaving:1; bool sc_polled:1; bool sc_msi:1; int sc_ilr; /* * Bitmask of ports that use the serdev I/F. This allows for * 32 ports on ILP32 machines and 64 ports on LP64 machines. */ u_long sc_serdevs; }; struct puc_bar *puc_get_bar(struct puc_softc *sc, int rid); int puc_bfe_attach(device_t); int puc_bfe_detach(device_t); int puc_bfe_probe(device_t, const struct puc_cfg *); int puc_bus_child_location(device_t, device_t, struct sbuf *sb); int puc_bus_child_pnpinfo(device_t, device_t, struct sbuf *sb); struct resource *puc_bus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); int puc_bus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); int puc_bus_print_child(device_t, device_t); int puc_bus_read_ivar(device_t, device_t, int, uintptr_t *); -int puc_bus_release_resource(device_t, device_t, int, int, struct resource *); +int puc_bus_release_resource(device_t, device_t, struct resource *); int puc_bus_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); int puc_bus_teardown_intr(device_t, device_t, struct resource *, void *); SYSCTL_DECL(_hw_puc); #endif /* _DEV_PUC_BFE_H_ */ diff --git a/sys/dev/quicc/quicc_bfe.h b/sys/dev/quicc/quicc_bfe.h index 4c36e9305ee6..e81e52d891aa 100644 --- a/sys/dev/quicc/quicc_bfe.h +++ b/sys/dev/quicc/quicc_bfe.h @@ -1,72 +1,72 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DEV_QUICC_BFE_H_ #define _DEV_QUICC_BFE_H_ struct quicc_device; struct quicc_softc { device_t sc_dev; struct resource *sc_rres; /* Register resource. */ int sc_rrid; int sc_rtype; /* SYS_RES_{IOPORT|MEMORY}. */ struct resource *sc_ires; /* Interrupt resource. */ void *sc_icookie; int sc_irid; struct rman sc_rman; struct quicc_device *sc_device; u_int sc_clock; bool sc_fastintr:1; bool sc_polled:1; }; extern char quicc_driver_name[]; int quicc_bfe_attach(device_t); int quicc_bfe_detach(device_t); int quicc_bfe_probe(device_t, u_int); struct resource *quicc_bus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); int quicc_bus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); int quicc_bus_read_ivar(device_t, device_t, int, uintptr_t *); -int quicc_bus_release_resource(device_t, device_t, int, int, struct resource *); +int quicc_bus_release_resource(device_t, device_t, struct resource *); int quicc_bus_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, void (*)(void *), void *, void **); int quicc_bus_teardown_intr(device_t, device_t, struct resource *, void *); #endif /* _DEV_QUICC_BFE_H_ */ diff --git a/sys/dev/quicc/quicc_core.c b/sys/dev/quicc/quicc_core.c index fe8c4c6c99ff..134481b1c0a1 100644 --- a/sys/dev/quicc/quicc_core.c +++ b/sys/dev/quicc/quicc_core.c @@ -1,399 +1,399 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define quicc_read2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, o) #define quicc_read4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, o) #define quicc_write2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, o, v) #define quicc_write4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, o, v) char quicc_driver_name[] = "quicc"; static MALLOC_DEFINE(M_QUICC, "QUICC", "QUICC driver"); struct quicc_device { struct rman *qd_rman; struct resource_list qd_rlist; device_t qd_dev; int qd_devtype; driver_filter_t *qd_ih; void *qd_ih_arg; }; static int quicc_bfe_intr(void *arg) { struct quicc_device *qd; struct quicc_softc *sc = arg; uint32_t sipnr; sipnr = quicc_read4(sc->sc_rres, QUICC_REG_SIPNR_L); if (sipnr & 0x00f00000) qd = sc->sc_device; else qd = NULL; if (qd == NULL || qd->qd_ih == NULL) { device_printf(sc->sc_dev, "Stray interrupt %08x\n", sipnr); return (FILTER_STRAY); } return ((*qd->qd_ih)(qd->qd_ih_arg)); } int quicc_bfe_attach(device_t dev) { struct quicc_device *qd; struct quicc_softc *sc; struct resource_list_entry *rle; const char *sep; rman_res_t size, start; int error; sc = device_get_softc(dev); /* * Re-allocate. We expect that the softc contains the information * collected by quicc_bfe_probe() intact. */ sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); start = rman_get_start(sc->sc_rres); size = rman_get_size(sc->sc_rres); sc->sc_rman.rm_start = start; sc->sc_rman.rm_end = start + size - 1; sc->sc_rman.rm_type = RMAN_ARRAY; sc->sc_rman.rm_descr = "QUICC resources"; error = rman_init(&sc->sc_rman); if (!error) error = rman_manage_region(&sc->sc_rman, start, start + size - 1); if (error) { bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (error); } /* * Allocate interrupt resource. */ sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, quicc_bfe_intr, NULL, sc, &sc->sc_icookie); if (error) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) sc->sc_polled = 1; if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode", sep); sep = ", "; } printf("\n"); } sc->sc_device = qd = malloc(sizeof(struct quicc_device), M_QUICC, M_WAITOK | M_ZERO); qd->qd_devtype = QUICC_DEVTYPE_SCC; qd->qd_rman = &sc->sc_rman; resource_list_init(&qd->qd_rlist); resource_list_add(&qd->qd_rlist, sc->sc_rtype, 0, start, start + size - 1, size); resource_list_add(&qd->qd_rlist, SYS_RES_IRQ, 0, 0xf00, 0xf00, 1); rle = resource_list_find(&qd->qd_rlist, SYS_RES_IRQ, 0); rle->res = sc->sc_ires; qd->qd_dev = device_add_child(dev, NULL, -1); device_set_ivars(qd->qd_dev, (void *)qd); error = device_probe_and_attach(qd->qd_dev); /* Enable all SCC interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIMR_L, 0x00f00000); /* Clear all pending interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_H, ~0); quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_L, ~0); return (error); } int quicc_bfe_detach(device_t dev) { struct quicc_softc *sc; sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (0); } int quicc_bfe_probe(device_t dev, u_int clock) { struct quicc_softc *sc; uint16_t rev; sc = device_get_softc(dev); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, "Quad integrated communications controller"); sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_MEMORY; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_IOPORT; sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } sc->sc_clock = clock; /* * Check that the microcode revision is 0x00e8, as documented * in the MPC8555E PowerQUICC III Integrated Processor Family * Reference Manual. */ rev = quicc_read2(sc->sc_rres, QUICC_PRAM_REV_NUM); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((rev == 0x00e8) ? BUS_PROBE_DEFAULT : ENXIO); } struct resource * quicc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (NULL); /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, *rid); if (rle == NULL) return (NULL); if (rle->res == NULL) { rle->res = rman_reserve_resource(qd->qd_rman, rle->start, rle->start + rle->count - 1, rle->count, flags, child); if (rle->res != NULL) { rman_set_bustag(rle->res, &bs_be_tag); rman_set_bushandle(rle->res, rle->start); } } return (rle->res); } int quicc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, rid); if (rle == NULL) return (EINVAL); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } int quicc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct quicc_device *qd; struct quicc_softc *sc; uint32_t sccr; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); qd = device_get_ivars(child); switch (index) { case QUICC_IVAR_CLOCK: *result = sc->sc_clock; break; case QUICC_IVAR_BRGCLK: sccr = quicc_read4(sc->sc_rres, QUICC_REG_SCCR) & 3; *result = sc->sc_clock / ((1 << (sccr + 1)) << sccr); break; case QUICC_IVAR_DEVTYPE: *result = qd->qd_devtype; break; default: return (EINVAL); } return (0); } int -quicc_bus_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +quicc_bus_release_resource(device_t dev, device_t child, struct resource *res) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); - rle = resource_list_find(&qd->qd_rlist, type, rid); + rle = resource_list_find(&qd->qd_rlist, rman_get_type(res), + rman_get_rid(res)); return ((rle == NULL) ? EINVAL : 0); } int quicc_bus_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct quicc_device *qd; struct quicc_softc *sc; if (device_get_parent(child) != dev) return (EINVAL); /* Interrupt handlers must be FAST or MPSAFE. */ if (filt == NULL && !(flags & INTR_MPSAFE)) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_polled) return (ENXIO); if (sc->sc_fastintr && filt == NULL) { sc->sc_fastintr = 0; bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } qd = device_get_ivars(child); qd->qd_ih = (filt != NULL) ? filt : (driver_filter_t *)ihand; qd->qd_ih_arg = arg; *cookiep = ihand; return (0); } int quicc_bus_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct quicc_device *qd; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); if (qd->qd_ih != cookie) return (EINVAL); qd->qd_ih = NULL; qd->qd_ih_arg = NULL; return (0); } diff --git a/sys/dev/scc/scc_bfe.h b/sys/dev/scc/scc_bfe.h index 63d8fb23319e..b743d0655cab 100644 --- a/sys/dev/scc/scc_bfe.h +++ b/sys/dev/scc/scc_bfe.h @@ -1,153 +1,153 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004-2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DEV_SCC_BFE_H_ #define _DEV_SCC_BFE_H_ #include /* * Bus access structure. This structure holds the minimum information needed * to access the SCC. The rclk field, although not important to actually * access the SCC, is important for baudrate programming, delay loops and * other timing related computations. */ struct scc_bas { bus_space_tag_t bst; bus_space_handle_t bsh; u_int range; u_int rclk; u_int regshft; }; #define scc_regofs(bas, reg) ((reg) << (bas)->regshft) #define scc_getreg(bas, reg) \ bus_space_read_1((bas)->bst, (bas)->bsh, scc_regofs(bas, reg)) #define scc_setreg(bas, reg, value) \ bus_space_write_1((bas)->bst, (bas)->bsh, scc_regofs(bas, reg), value) #define scc_barrier(bas) \ bus_space_barrier((bas)->bst, (bas)->bsh, 0, (bas)->range, \ BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE) /* * SCC mode (child) and channel control structures. */ #define SCC_NMODES 3 #define SCC_ISRCCNT 5 struct scc_chan; struct scc_mode { struct scc_chan *m_chan; device_t m_dev; u_int m_mode; bool m_attached:1; bool m_fastintr:1; bool m_hasintr:1; bool m_probed:1; bool m_sysdev:1; driver_filter_t *ih; serdev_intr_t *ih_src[SCC_ISRCCNT]; void *ih_arg; }; struct scc_chan { struct resource ch_rres; struct resource_list ch_rlist; struct resource *ch_ires; /* Interrupt resource. */ void *ch_icookie; int ch_irid; struct scc_mode ch_mode[SCC_NMODES]; u_int ch_nr; bool ch_enabled:1; bool ch_sysdev:1; uint32_t ch_ipend; uint32_t ch_hwsig; }; /* * SCC class & instance (=softc) */ struct scc_class { KOBJ_CLASS_FIELDS; u_int cl_channels; /* Number of independent channels. */ u_int cl_class; /* SCC bus class ID. */ u_int cl_modes; /* Supported modes (bitset). */ int cl_range; }; extern struct scc_class scc_quicc_class; extern struct scc_class scc_z8530_escc_class; extern struct scc_class scc_z8530_legacy_class; struct scc_softc { KOBJ_FIELDS; struct scc_class *sc_class; struct scc_bas sc_bas; device_t sc_dev; struct mtx sc_hwmtx; /* Spinlock protecting hardware. */ struct resource *sc_rres; /* Register resource. */ int sc_rrid; int sc_rtype; /* SYS_RES_{IOPORT|MEMORY}. */ struct scc_chan *sc_chan; bool sc_fastintr:1; bool sc_leaving:1; bool sc_polled:1; uint32_t sc_hwsig; /* Signal state. Used by HW driver. */ }; extern const char scc_driver_name[]; int scc_bfe_attach(device_t dev, u_int ipc); int scc_bfe_detach(device_t dev); int scc_bfe_probe(device_t dev, u_int regshft, u_int rclk, u_int rid); struct resource *scc_bus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); int scc_bus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); int scc_bus_read_ivar(device_t, device_t, int, uintptr_t *); -int scc_bus_release_resource(device_t, device_t, int, int, struct resource *); +int scc_bus_release_resource(device_t, device_t, struct resource *); int scc_bus_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, void (*)(void *), void *, void **); int scc_bus_teardown_intr(device_t, device_t, struct resource *, void *); #endif /* _DEV_SCC_BFE_H_ */ diff --git a/sys/dev/scc/scc_core.c b/sys/dev/scc/scc_core.c index dda2b505a605..376cc25d11b4 100644 --- a/sys/dev/scc/scc_core.c +++ b/sys/dev/scc/scc_core.c @@ -1,584 +1,584 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004-2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "scc_if.h" const char scc_driver_name[] = "scc"; static MALLOC_DEFINE(M_SCC, "SCC", "SCC driver"); static int scc_bfe_intr(void *arg) { struct scc_softc *sc = arg; struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; int c, i, ipend, isrc; cl = sc->sc_class; while (!sc->sc_leaving && (ipend = SCC_IPEND(sc)) != 0) { i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < SCC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < SCC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (!(ch->ch_ipend & isrc)) continue; m = &ch->ch_mode[0]; if (m->ih_src[i] == NULL) continue; if ((*m->ih_src[i])(m->ih_arg)) ch->ch_ipend &= ~isrc; } } for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (!ch->ch_ipend) continue; m = &ch->ch_mode[0]; if (m->ih != NULL) (*m->ih)(m->ih_arg); else SCC_ICLEAR(sc, ch); } return (FILTER_HANDLED); } return (FILTER_STRAY); } int scc_bfe_attach(device_t dev, u_int ipc) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc, *sc0; const char *sep; bus_space_handle_t bh; rman_res_t base, size, start, sz; int c, error, mode, sysdev; /* * The sc_class field defines the type of SCC we're going to work * with and thus the size of the softc. Replace the generic softc * with one that matches the SCC now that we're certain we handle * the device. */ sc0 = device_get_softc(dev); cl = sc0->sc_class; if (cl->size > sizeof(*sc)) { sc = malloc(cl->size, M_SCC, M_WAITOK|M_ZERO); bcopy(sc0, sc, sizeof(*sc)); device_set_softc(dev, sc); } else sc = sc0; size = abs(cl->cl_range) << sc->sc_bas.regshft; mtx_init(&sc->sc_hwmtx, "scc_hwmtx", NULL, MTX_SPIN); /* * Re-allocate. We expect that the softc contains the information * collected by scc_bfe_probe() intact. */ sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* * Allocate interrupt resources. There may be a different interrupt * per channel. We allocate them all... */ sc->sc_chan = malloc(sizeof(struct scc_chan) * cl->cl_channels, M_SCC, M_WAITOK | M_ZERO); for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; /* * XXX temporary hack. If we have more than 1 interrupt * per channel, allocate the first for the channel. At * this time only the macio bus front-end has more than * 1 interrupt per channel and we don't use the 2nd and * 3rd, because we don't support DMA yet. */ ch->ch_irid = c * ipc; ch->ch_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ch->ch_irid, RF_ACTIVE | RF_SHAREABLE); if (ipc == 0) break; } /* * Create the control structures for our children. Probe devices * and query them to see if we can reset the hardware. */ sysdev = 0; base = rman_get_start(sc->sc_rres); sz = (size != 0) ? size : rman_get_size(sc->sc_rres); start = base + ((cl->cl_range < 0) ? size * (cl->cl_channels - 1) : 0); for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; resource_list_init(&ch->ch_rlist); ch->ch_nr = c + 1; if (!SCC_ENABLED(sc, ch)) goto next; ch->ch_enabled = 1; resource_list_add(&ch->ch_rlist, sc->sc_rtype, 0, start, start + sz - 1, sz); rle = resource_list_find(&ch->ch_rlist, sc->sc_rtype, 0); rle->res = &ch->ch_rres; bus_space_subregion(rman_get_bustag(sc->sc_rres), rman_get_bushandle(sc->sc_rres), start - base, sz, &bh); rman_set_bushandle(rle->res, bh); rman_set_bustag(rle->res, rman_get_bustag(sc->sc_rres)); resource_list_add(&ch->ch_rlist, SYS_RES_IRQ, 0, c, c, 1); rle = resource_list_find(&ch->ch_rlist, SYS_RES_IRQ, 0); rle->res = (ch->ch_ires != NULL) ? ch->ch_ires : sc->sc_chan[0].ch_ires; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; m->m_chan = ch; m->m_mode = 1U << mode; if ((cl->cl_modes & m->m_mode) == 0 || ch->ch_sysdev) continue; m->m_dev = device_add_child(dev, NULL, -1); device_set_ivars(m->m_dev, (void *)m); error = device_probe_child(dev, m->m_dev); if (!error) { m->m_probed = 1; m->m_sysdev = SERDEV_SYSDEV(m->m_dev) ? 1 : 0; ch->ch_sysdev |= m->m_sysdev; } } next: start += (cl->cl_range < 0) ? -size : size; sysdev |= ch->ch_sysdev; } /* * Have the hardware driver initialize the hardware. Tell it * whether or not a hardware reset should be performed. */ if (bootverbose) { device_printf(dev, "%sresetting hardware\n", (sysdev) ? "not " : ""); } error = SCC_ATTACH(sc, !sysdev); if (error) goto fail; /* * Setup our interrupt handler. Make it FAST under the assumption * that our children's are fast as well. We make it MPSAFE as soon * as a child sets up a MPSAFE interrupt handler. * Of course, if we can't setup a fast handler, we make it MPSAFE * right away. */ for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; error = bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY, scc_bfe_intr, NULL, sc, &ch->ch_icookie); if (error) { error = bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)scc_bfe_intr, sc, &ch->ch_icookie); } else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); ch->ch_ires = NULL; } } sc->sc_polled = 1; for (c = 0; c < cl->cl_channels; c++) { if (sc->sc_chan[0].ch_ires != NULL) sc->sc_polled = 0; } /* * Attach all child devices that were probed successfully. */ for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; if (!m->m_probed) continue; error = device_attach(m->m_dev); if (error) continue; m->m_attached = 1; } } if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode", sep); sep = ", "; } printf("\n"); } return (0); fail: for (c = 0; c < cl->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (error); } int scc_bfe_detach(device_t dev) { struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc; int chan, error, mode; sc = device_get_softc(dev); cl = sc->sc_class; /* Detach our children. */ error = 0; for (chan = 0; chan < cl->cl_channels; chan++) { ch = &sc->sc_chan[chan]; for (mode = 0; mode < SCC_NMODES; mode++) { m = &ch->ch_mode[mode]; if (!m->m_attached) continue; if (device_detach(m->m_dev) != 0) error = ENXIO; else m->m_attached = 0; } } if (error) return (error); for (chan = 0; chan < cl->cl_channels; chan++) { ch = &sc->sc_chan[chan]; if (ch->ch_ires == NULL) continue; bus_teardown_intr(dev, ch->ch_ires, ch->ch_icookie); bus_release_resource(dev, SYS_RES_IRQ, ch->ch_irid, ch->ch_ires); } bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); free(sc->sc_chan, M_SCC); mtx_destroy(&sc->sc_hwmtx); return (0); } int scc_bfe_probe(device_t dev, u_int regshft, u_int rclk, u_int rid) { struct scc_softc *sc; struct scc_class *cl; u_long size, sz; int error; /* * Initialize the instance. Note that the instance (=softc) does * not necessarily match the hardware specific softc. We can't do * anything about it now, because we may not attach to the device. * Hardware drivers cannot use any of the class specific fields * while probing. */ sc = device_get_softc(dev); cl = sc->sc_class; kobj_init((kobj_t)sc, (kobj_class_t)cl); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, cl->name); size = abs(cl->cl_range) << regshft; /* * Allocate the register resource. We assume that all SCCs have a * single register window in either I/O port space or memory mapped * I/O space. Any SCC that needs multiple windows will consequently * not be supported by this driver as-is. */ sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_MEMORY; sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = rid; sc->sc_rtype = SYS_RES_IOPORT; sc->sc_rres = bus_alloc_resource_anywhere(dev, sc->sc_rtype, &sc->sc_rrid, cl->cl_channels * size, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } /* * Fill in the bus access structure and call the hardware specific * probe method. */ sz = (size != 0) ? size : rman_get_size(sc->sc_rres); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); sc->sc_bas.range = sz; sc->sc_bas.rclk = rclk; sc->sc_bas.regshft = regshft; error = SCC_PROBE(sc); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((error == 0) ? BUS_PROBE_DEFAULT : error); } struct resource * scc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (NULL); /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); m = device_get_ivars(child); ch = m->m_chan; rle = resource_list_find(&ch->ch_rlist, type, 0); if (rle == NULL) return (NULL); *rid = 0; return (rle->res); } int scc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); ch = m->m_chan; rle = resource_list_find(&ch->ch_rlist, type, rid); if (rle == NULL) return (EINVAL); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } int scc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct scc_chan *ch; struct scc_class *cl; struct scc_mode *m; struct scc_softc *sc; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); cl = sc->sc_class; m = device_get_ivars(child); ch = m->m_chan; switch (index) { case SCC_IVAR_CHANNEL: *result = ch->ch_nr; break; case SCC_IVAR_CLASS: *result = cl->cl_class; break; case SCC_IVAR_CLOCK: *result = sc->sc_bas.rclk; break; case SCC_IVAR_MODE: *result = m->m_mode; break; case SCC_IVAR_REGSHFT: *result = sc->sc_bas.regshft; break; case SCC_IVAR_HWMTX: *result = (uintptr_t)&sc->sc_hwmtx; break; default: return (EINVAL); } return (0); } int -scc_bus_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +scc_bus_release_resource(device_t dev, device_t child, struct resource *res) { struct resource_list_entry *rle; struct scc_chan *ch; struct scc_mode *m; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); ch = m->m_chan; - rle = resource_list_find(&ch->ch_rlist, type, rid); + rle = resource_list_find(&ch->ch_rlist, rman_get_type(res), + rman_get_rid(res)); return ((rle == NULL) ? EINVAL : 0); } int scc_bus_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct scc_chan *ch; struct scc_mode *m; struct scc_softc *sc; int c, i, isrc; if (device_get_parent(child) != dev) return (EINVAL); /* Interrupt handlers must be FAST or MPSAFE. */ if (filt == NULL && !(flags & INTR_MPSAFE)) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_polled) return (ENXIO); if (sc->sc_fastintr && filt == NULL) { sc->sc_fastintr = 0; for (c = 0; c < sc->sc_class->cl_channels; c++) { ch = &sc->sc_chan[c]; if (ch->ch_ires == NULL) continue; bus_teardown_intr(dev, ch->ch_ires, ch->ch_icookie); bus_setup_intr(dev, ch->ch_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)scc_bfe_intr, sc, &ch->ch_icookie); } } m = device_get_ivars(child); m->m_hasintr = 1; m->m_fastintr = (filt != NULL) ? 1 : 0; m->ih = (filt != NULL) ? filt : (driver_filter_t *)ihand; m->ih_arg = arg; i = 0, isrc = SER_INT_OVERRUN; while (i < SCC_ISRCCNT) { m->ih_src[i] = SERDEV_IHAND(child, isrc); if (m->ih_src[i] != NULL) m->ih = NULL; i++, isrc <<= 1; } return (0); } int scc_bus_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct scc_mode *m; int i; if (device_get_parent(child) != dev) return (EINVAL); m = device_get_ivars(child); if (!m->m_hasintr) return (EINVAL); m->m_hasintr = 0; m->m_fastintr = 0; m->ih = NULL; m->ih_arg = NULL; for (i = 0; i < SCC_ISRCCNT; i++) m->ih_src[i] = NULL; return (0); } diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c index e6e8bfa3515b..54ef7ff440aa 100644 --- a/sys/dev/siis/siis.c +++ b/sys/dev/siis/siis.c @@ -1,1987 +1,1986 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "siis.h" #include #include #include #include #include /* local prototypes */ static int siis_setup_interrupt(device_t dev); static void siis_intr(void *data); static int siis_suspend(device_t dev); static int siis_resume(device_t dev); static int siis_ch_init(device_t dev); static int siis_ch_deinit(device_t dev); static int siis_ch_suspend(device_t dev); static int siis_ch_resume(device_t dev); static void siis_ch_intr_locked(void *data); static void siis_ch_intr(void *data); static void siis_ch_led(void *priv, int onoff); static void siis_begin_transaction(device_t dev, union ccb *ccb); static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void siis_execute_transaction(struct siis_slot *slot); static void siis_timeout(void *arg); static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et); static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag); static void siis_dmainit(device_t dev); static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void siis_dmafini(device_t dev); static void siis_slotsalloc(device_t dev); static void siis_slotsfree(device_t dev); static void siis_reset(device_t dev); static void siis_portinit(device_t dev); static int siis_wait_ready(device_t dev, int t); static int siis_sata_connect(struct siis_channel *ch); static void siis_issue_recovery(device_t dev); static void siis_process_read_log(device_t dev, union ccb *ccb); static void siis_process_request_sense(device_t dev, union ccb *ccb); static void siisaction(struct cam_sim *sim, union ccb *ccb); static void siispoll(struct cam_sim *sim); static MALLOC_DEFINE(M_SIIS, "SIIS driver", "SIIS driver data buffers"); static struct { uint32_t id; const char *name; int ports; int quirks; #define SIIS_Q_SNTF 1 #define SIIS_Q_NOMSI 2 } siis_ids[] = { {0x31241095, "SiI3124", 4, 0}, {0x31248086, "SiI3124", 4, 0}, {0x31321095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02421095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02441095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x31311095, "SiI3131", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x35311095, "SiI3531", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0, NULL, 0, 0} }; #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static int siis_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) { snprintf(buf, sizeof(buf), "%s SATA controller", siis_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int siis_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); uint32_t devid = pci_get_devid(dev); device_t child; int error, i, unit; ctlr->dev = dev; for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) break; } ctlr->quirks = siis_ids[i].quirks; /* Global memory */ ctlr->r_grid = PCIR_BAR(0); if (!(ctlr->r_gmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_grid, RF_ACTIVE))) return (ENXIO); ctlr->gctl = ATA_INL(ctlr->r_gmem, SIIS_GCTL); /* Channels memory */ ctlr->r_rid = PCIR_BAR(2); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return (ENXIO); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); /* Reset controller */ siis_resume(dev); /* Number of HW channels */ ctlr->channels = siis_ids[i].ports; /* Setup interrupts. */ if (siis_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "siisch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int siis_detach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (0); } static int siis_suspend(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return 0; } static int siis_resume(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Set PCIe max read request size to at least 1024 bytes */ if (pci_get_max_read_req(dev) < 1024) pci_set_max_read_req(dev, 1024); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); DELAY(10000); /* Get controller out of reset state and enable port interrupts. */ ctlr->gctl &= ~(SIIS_GCTL_GRESET | SIIS_GCTL_I2C_IE); ctlr->gctl |= 0x0000000f; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return (bus_generic_resume(dev)); } static int siis_setup_interrupt(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); int msi = ctlr->quirks & SIIS_Q_NOMSI ? 0 : 1; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, siis_intr, ctlr, &ctlr->irq.handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } return (0); } /* * Common case interrupt handler. */ static void siis_intr(void *data) { struct siis_controller *ctlr = (struct siis_controller *)data; u_int32_t is; void *arg; int unit; is = ATA_INL(ctlr->r_gmem, SIIS_IS); for (unit = 0; unit < ctlr->channels; unit++) { if ((is & SIIS_IS_PORT(unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* Acknowledge interrupt, if MSI enabled. */ if (ctlr->irq.r_irq_rid) { ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl | SIIS_GCTL_MSIACK); } } static struct resource * siis_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct siis_controller *ctlr = device_get_softc(dev); int unit = ((struct siis_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = unit << 13; rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + 0x2000, 0x2000, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 0x2000, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int -siis_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +siis_release_resource(device_t dev, device_t child, struct resource *r) { - switch (type) { + switch (rman_get_type(r)) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: - if (rid != ATA_IRQ_RID) + if (rman_get_rid(r) != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int siis_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("siis.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int siis_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int siis_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int siis_child_location(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t siis_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t siis_methods[] = { DEVMETHOD(device_probe, siis_probe), DEVMETHOD(device_attach, siis_attach), DEVMETHOD(device_detach, siis_detach), DEVMETHOD(device_suspend, siis_suspend), DEVMETHOD(device_resume, siis_resume), DEVMETHOD(bus_print_child, siis_print_child), DEVMETHOD(bus_alloc_resource, siis_alloc_resource), DEVMETHOD(bus_release_resource, siis_release_resource), DEVMETHOD(bus_setup_intr, siis_setup_intr), DEVMETHOD(bus_teardown_intr,siis_teardown_intr), DEVMETHOD(bus_child_location, siis_child_location), DEVMETHOD(bus_get_dma_tag, siis_get_dma_tag), { 0, 0 } }; static driver_t siis_driver = { "siis", siis_methods, sizeof(struct siis_controller) }; DRIVER_MODULE(siis, pci, siis_driver, 0, 0); MODULE_VERSION(siis, 1); MODULE_DEPEND(siis, cam, 1, 1, 1); static int siis_ch_probe(device_t dev) { device_set_desc_copy(dev, "SIIS channel"); return (BUS_PROBE_DEFAULT); } static int siis_ch_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(device_get_parent(dev)); struct siis_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->quirks = ctlr->quirks; ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = SIIS_MAX_SLOTS; ch->curr[i] = ch->user[i]; if (ch->pm_level) ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ; ch->user[i].caps |= CTS_SATA_CAPS_H_AN; } mtx_init(&ch->mtx, "SIIS channel lock", NULL, MTX_DEF); rid = ch->unit; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); siis_dmainit(dev); siis_slotsalloc(dev); siis_ch_init(dev); mtx_lock(&ch->mtx); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, siis_ch_intr_locked, dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(SIIS_MAX_SLOTS); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(siisaction, siispoll, "siisch", ch, device_get_unit(dev), &ch->mtx, 2, SIIS_MAX_SLOTS, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } mtx_unlock(&ch->mtx); ch->led = led_create(siis_ch_led, dev, device_get_nameunit(dev)); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int siis_ch_detach(device_t dev) { struct siis_channel *ch = device_get_softc(dev); led_destroy(ch->led); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); siis_ch_deinit(dev); siis_slotsfree(dev); siis_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int siis_ch_init(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); return (0); } static int siis_ch_deinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Put port into reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); return (0); } static int siis_ch_suspend(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "siissusp", hz/100); siis_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int siis_ch_resume(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_init(dev); siis_reset(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } static device_method_t siisch_methods[] = { DEVMETHOD(device_probe, siis_ch_probe), DEVMETHOD(device_attach, siis_ch_attach), DEVMETHOD(device_detach, siis_ch_detach), DEVMETHOD(device_suspend, siis_ch_suspend), DEVMETHOD(device_resume, siis_ch_resume), { 0, 0 } }; static driver_t siisch_driver = { "siisch", siisch_methods, sizeof(struct siis_channel) }; DRIVER_MODULE(siisch, siis, siisch_driver, 0, 0); static void siis_ch_led(void *priv, int onoff) { device_t dev; struct siis_channel *ch; dev = (device_t)priv; ch = device_get_softc(dev); if (onoff == 0) ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_LED_ON); else ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_LED_ON); } struct siis_dc_cb_args { bus_addr_t maddr; int error; }; static void siis_dmainit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct siis_dc_cb_args dcba; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_WORK_SIZE, 1, SIIS_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, SIIS_WORK_SIZE, siis_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_SG_ENTRIES * PAGE_SIZE, SIIS_SG_ENTRIES, 0xFFFFFFFF, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); siis_dmafini(dev); } static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_dc_cb_args *dcba = (struct siis_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void siis_dmafini(device_t dev) { struct siis_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work_map = NULL; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void siis_slotsalloc(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; slot->dev = dev; slot->slot = i; slot->state = SIIS_SLOT_EMPTY; slot->prb_offset = SIIS_PRB_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void siis_slotsfree(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static void siis_notify_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct cam_path *dpath; u_int32_t status; int i; if (ch->quirks & SIIS_Q_SNTF) { status = ATA_INL(ch->r_mem, SIIS_P_SNTF); ATA_OUTL(ch->r_mem, SIIS_P_SNTF, status); } else { /* * Without SNTF we have no idea which device sent notification. * If PMP is connected, assume it, else - device. */ status = (ch->pm_present) ? 0x8000 : 0x0001; } if (bootverbose) device_printf(dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void siis_phy_check_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* If we have a connection event, deal with it */ if (ch->pm_level == 0) { u_int32_t status = ATA_INL(ch->r_mem, SIIS_P_SSTS); union ccb *ccb; if (bootverbose) { if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) { device_printf(dev, "CONNECT requested\n"); } else device_printf(dev, "DISCONNECT requested\n"); } siis_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } } static void siis_ch_intr_locked(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_intr(data); mtx_unlock(&ch->mtx); } static void siis_ch_intr(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); uint32_t istatus, sstatus, ctx, estatus, ok; enum siis_err_type et; int i, ccs, port, tslots; mtx_assert(&ch->mtx, MA_OWNED); /* Read command statuses. */ sstatus = ATA_INL(ch->r_mem, SIIS_P_SS); ok = ch->rslots & ~sstatus; /* Complete all successful commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if ((ok >> i) & 1) siis_end_transaction(&ch->slot[i], SIIS_ERR_NONE); } /* Do we have any other events? */ if ((sstatus & SIIS_P_SS_ATTN) == 0) return; /* Read and clear interrupt statuses. */ istatus = ATA_INL(ch->r_mem, SIIS_P_IS) & (0xFFFF & ~SIIS_P_IX_COMMCOMP); ATA_OUTL(ch->r_mem, SIIS_P_IS, istatus); /* Process PHY events */ if (istatus & SIIS_P_IX_PHYRDYCHG) siis_phy_check_events(dev); /* Process NOTIFY events */ if (istatus & SIIS_P_IX_SDBN) siis_notify_events(dev); /* Process command errors */ if (istatus & SIIS_P_IX_COMMERR) { estatus = ATA_INL(ch->r_mem, SIIS_P_CMDERR); ctx = ATA_INL(ch->r_mem, SIIS_P_CTX); ccs = (ctx & SIIS_P_CTX_SLOT) >> SIIS_P_CTX_SLOT_SHIFT; port = (ctx & SIIS_P_CTX_PMP) >> SIIS_P_CTX_PMP_SHIFT; //device_printf(dev, "%s ERROR ss %08x is %08x rs %08x es %d act %d port %d serr %08x\n", // __func__, sstatus, istatus, ch->rslots, estatus, ccs, port, // ATA_INL(ch->r_mem, SIIS_P_SERR)); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } if (estatus == SIIS_P_CMDERR_DEV || estatus == SIIS_P_CMDERR_SDB || estatus == SIIS_P_CMDERR_DATAFIS) { tslots = ch->numtslots[port]; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; if (ch->slot[i].ccb->ccb_h.target_id != port) continue; if (tslots == 0) { /* Untagged operation. */ if (i == ccs) et = SIIS_ERR_TFE; else et = SIIS_ERR_INNOCENT; } else { /* Tagged operation. */ et = SIIS_ERR_NCQ; } siis_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_RESUME); } else { if (estatus == SIIS_P_CMDERR_SENDFIS || estatus == SIIS_P_CMDERR_INCSTATE || estatus == SIIS_P_CMDERR_PPE || estatus == SIIS_P_CMDERR_SERVICE) { et = SIIS_ERR_SATA; } else et = SIIS_ERR_INVALID; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; siis_end_transaction(&ch->slot[i], et); } } } } /* Must be called with channel locked. */ static int siis_check_collision(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0x7fffffff >> (31 - ch->curr[ccb->ccb_h.target_id].tags))) == 0) return (1); } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void siis_begin_transaction(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); struct siis_slot *slot; int tag, tags; mtx_assert(&ch->mtx, MA_OWNED); /* Choose empty slot. */ tags = SIIS_MAX_SLOTS; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; tag = fls((~ch->oslots) & (0x7fffffff >> (31 - tags))) - 1; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Update channel stats. */ ch->oslots |= (1 << slot->slot); ch->numrslots++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]++; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; /* If request moves data, setup and load SG list */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = SIIS_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, siis_dmasetprd, slot, 0); } else siis_execute_transaction(slot); } /* Locked by busdma engine. */ static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_slot *slot = arg; struct siis_channel *ch = device_get_softc(slot->dev); struct siis_cmd *ctp; struct siis_dma_prd *prd; int i; mtx_assert(&ch->mtx, MA_OWNED); if (error) { device_printf(slot->dev, "DMA load error\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } KASSERT(nsegs <= SIIS_SG_ENTRIES, ("too many DMA segment entries\n")); slot->dma.nsegs = nsegs; if (nsegs != 0) { /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); /* Fill S/G table */ if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) prd = &ctp->u.ata.prd[0]; else prd = &ctp->u.atapi.prd[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32(segs[i].ds_len); prd[i].control = 0; } prd[nsegs - 1].control = htole32(SIIS_PRD_TRM); bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } siis_execute_transaction(slot); } /* Must be called with channel locked. */ static void siis_execute_transaction(struct siis_slot *slot) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); struct siis_cmd *ctp; union ccb *ccb = slot->ccb; u_int64_t prb_bus; mtx_assert(&ch->mtx, MA_OWNED); /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); ctp->control = 0; ctp->protocol_override = 0; ctp->transfer_count = 0; /* Special handling for Soft Reset command. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { if (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) { ctp->control |= htole16(SIIS_PRB_SOFT_RESET); } else { ctp->control |= htole16(SIIS_PRB_PROTOCOL_OVERRIDE); if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_NCQ); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_READ); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_WRITE); } } } else if (ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ctp->control |= htole16(SIIS_PRB_PACKET_READ); else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ctp->control |= htole16(SIIS_PRB_PACKET_WRITE); } /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { /* Kick controller into sane state */ siis_portinit(dev); } /* Setup the FIS for this request */ if (!siis_setup_fis(dev, ctp, ccb, slot->slot)) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREWRITE); /* Issue command to the controller. */ slot->state = SIIS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); prb_bus = ch->dma.work_bus + slot->prb_offset; ATA_OUTL(ch->r_mem, SIIS_P_CACTL(slot->slot), prb_bus); ATA_OUTL(ch->r_mem, SIIS_P_CACTH(slot->slot), prb_bus >> 32); /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, siis_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void siis_process_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } /* Handle the rest of commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; siis_end_transaction(&ch->slot[i], SIIS_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void siis_rearm_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < SIIS_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout, 0, siis_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void siis_timeout(void *arg) { struct siis_slot *slot = arg; device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; mtx_assert(&ch->mtx, MA_OWNED); /* Check for stale timeout. */ if (slot->state < SIIS_SLOT_RUNNING) return; /* Handle soft-reset timeouts without doing hard-reset. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { xpt_freeze_simq(ch->sim, ch->numrslots); siis_end_transaction(slot, SIIS_ERR_TFE); return; } device_printf(dev, "Timeout on slot %d\n", slot->slot); device_printf(dev, "%s is %08x ss %08x rs %08x es %08x sts %08x serr %08x\n", __func__, ATA_INL(ch->r_mem, SIIS_P_IS), ATA_INL(ch->r_mem, SIIS_P_SS), ch->rslots, ATA_INL(ch->r_mem, SIIS_P_CMDERR), ATA_INL(ch->r_mem, SIIS_P_STS), ATA_INL(ch->r_mem, SIIS_P_SERR)); if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) siis_process_timeout(dev); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } /* Must be called with channel locked. */ static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; int lastto; mtx_assert(&ch->mtx, MA_OWNED); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTWRITE); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == SIIS_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { int offs = SIIS_P_LRAM_SLOT(slot->slot) + 8; res->status = ATA_INB(ch->r_mem, offs + 2); res->error = ATA_INB(ch->r_mem, offs + 3); res->lba_low = ATA_INB(ch->r_mem, offs + 4); res->lba_mid = ATA_INB(ch->r_mem, offs + 5); res->lba_high = ATA_INB(ch->r_mem, offs + 6); res->device = ATA_INB(ch->r_mem, offs + 7); res->lba_low_exp = ATA_INB(ch->r_mem, offs + 8); res->lba_mid_exp = ATA_INB(ch->r_mem, offs + 9); res->lba_high_exp = ATA_INB(ch->r_mem, offs + 10); res->sector_count = ATA_INB(ch->r_mem, offs + 12); res->sector_count_exp = ATA_INB(ch->r_mem, offs + 13); } else bzero(res, sizeof(*res)); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->ataio.resid = ccb->ataio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->csio.resid = ccb->csio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } /* Set proper result status. */ if (et != SIIS_ERR_NONE || ch->recovery) { ch->eslots |= (1 << slot->slot); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } /* In case of error, freeze device for proper recovery. */ if (et != SIIS_ERR_NONE && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case SIIS_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case SIIS_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case SIIS_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case SIIS_ERR_TFE: case SIIS_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case SIIS_ERR_SATA: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case SIIS_ERR_TIMEOUT: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = SIIS_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != SIIS_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { siis_process_read_log(dev, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { siis_process_request_sense(dev, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == SIIS_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else xpt_done(ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there were timeouts or fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { siis_reset(dev); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) siis_portinit(dev); /* if there commands on hold, we can do recovery. */ if (!ch->recoverycmd && ch->numhslots) siis_issue_recovery(dev); } /* If all the reset of commands are in timeout - abort them. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != SIIS_ERR_TIMEOUT) siis_rearm_timeout(dev); /* Unfreeze frozen command. */ if (ch->frozen && !siis_check_collision(dev, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; siis_begin_transaction(dev, fccb); xpt_release_simq(ch->sim, TRUE); } } static void siis_issue_recovery(device_t dev) { struct siis_channel *ch = device_get_softc(dev); union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i]) break; } if (i == SIIS_MAX_SLOTS) return; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } siis_reset(dev); return; } xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path, ch->hold[i]->ccb_h.pinfo.priority); if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_SIIS, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } ch->recoverycmd = 1; siis_begin_transaction(dev, ccb); } static void siis_process_read_log(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_SIIS); xpt_free_ccb(ccb); } static void siis_process_request_sense(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); } static void siis_portinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; ch->eslots = 0; ch->recovery = 0; ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_RESUME); for (i = 0; i < 16; i++) { ATA_OUTL(ch->r_mem, SIIS_P_PMPSTS(i), 0), ATA_OUTL(ch->r_mem, SIIS_P_PMPQACT(i), 0); } ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_INIT); siis_wait_ready(dev, 1000); } static int siis_devreset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_DEV_RESET); while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_DEV_RESET) != 0) { DELAY(100); if (timeout++ > 1000) { device_printf(dev, "device reset stuck " "(timeout 100ms) status = %08x\n", val); return (EBUSY); } } return (0); } static int siis_wait_ready(device_t dev, int t) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_READY) == 0) { DELAY(1000); if (timeout++ > t) { device_printf(dev, "port is not ready (timeout %dms) " "status = %08x\n", t, val); return (EBUSY); } } return (0); } static void siis_reset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i, retry = 0, sata_rev; uint32_t val; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(dev, "SIIS reset...\n"); if (!ch->recoverycmd && !ch->recovery) xpt_freeze_simq(ch->sim, ch->numrslots); /* Requeue frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } /* Requeue all running commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ siis_end_transaction(&ch->slot[i], SIIS_ERR_INNOCENT); } /* Finish all held commands as-is. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->recovery = 0; ch->toslots = 0; ch->fatalerr = 0; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IECLR, 0x0000FFFF); /* Set speed limit. */ sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, SIIS_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); retry: siis_devreset(dev); /* Reset and reconnect PHY, */ if (!siis_sata_connect(ch)) { ch->devices = 0; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: phy reset found no device\n"); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); return; } /* Wait for port ready status. */ if (siis_wait_ready(dev, 1000)) { device_printf(dev, "port ready timeout\n"); if (!retry) { device_printf(dev, "trying full port reset ...\n"); /* Get port to the reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); DELAY(10000); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); siis_wait_ready(dev, 5000); retry = 1; goto retry; } } ch->devices = 1; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IS, 0xFFFFFFFF); ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: devices=%08x\n", ch->devices); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); } static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag) { struct siis_channel *ch = device_get_softc(dev); u_int8_t *fis = &ctp->fis[0]; bzero(fis, 24); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bzero(ctp->u.atapi.ccb, 16); bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->u.atapi.ccb, ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; fis[12] = ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] &= 0x07; fis[12] |= tag << 3; } fis[13] = ccb->ataio.cmd.sector_count_exp; if (ccb->ataio.ata_flags & ATA_FLAG_ICC) fis[14] = ccb->ataio.icc; fis[15] = ATA_A_4BIT; if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } } else { /* Soft reset. */ } return (20); } static int siis_sata_connect(struct siis_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, SIIS_P_SERR, 0xffffffff); return (1); } static int siis_check_ids(device_t dev, union ccb *ccb) { if (ccb->ccb_h.target_id > 15) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } return (0); } static void siisaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct siis_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("siisaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct siis_channel *)cam_sim_softc(sim); dev = ch->dev; mtx_assert(&ch->mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (siis_check_ids(dev, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (siis_check_collision(dev, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } siis_begin_transaction(dev, ccb); return; case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(SIIS_MAX_SLOTS, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) { ch->pm_present = cts->xport_specific.sata.pm_present; if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; uint32_t status; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ch->quirks & SIIS_Q_SNTF) == 0) cts->xport_specific.sata.caps &= ~CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ siis_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_ATA_EXT; cpi->hba_eng_cnt = 0; cpi->max_target = 15; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "SIIS", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = maxphys; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void siispoll(struct cam_sim *sim) { struct siis_channel *ch = (struct siis_channel *)cam_sim_softc(sim); siis_ch_intr(ch->dev); } diff --git a/sys/dev/sound/pci/csa.c b/sys/dev/sound/pci/csa.c index 7bb7967b74e7..7191f0cc4bf9 100644 --- a/sys/dev/sound/pci/csa.c +++ b/sys/dev/sound/pci/csa.c @@ -1,1108 +1,1106 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Seigo Tanimura * All rights reserved. * * Portions of this source are based on cwcealdr.cpp and dhwiface.cpp in * cwcealdr1.zip, the sample sources by Crystal Semiconductor. * Copyright (c) 1996-1998 Crystal Semiconductor Corp. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include /* This is the pci device id. */ #define CS4610_PCI_ID 0x60011013 #define CS4614_PCI_ID 0x60031013 #define CS4615_PCI_ID 0x60041013 /* Here is the parameter structure per a device. */ struct csa_softc { device_t dev; /* device */ csa_res res; /* resources */ device_t pcm; /* pcm device */ driver_intr_t* pcmintr; /* pcm intr */ void *pcmintr_arg; /* pcm intr arg */ device_t midi; /* midi device */ driver_intr_t* midiintr; /* midi intr */ void *midiintr_arg; /* midi intr arg */ void *ih; /* cookie */ struct csa_card *card; struct csa_bridgeinfo binfo; /* The state of this bridge. */ }; typedef struct csa_softc *sc_p; static int csa_probe(device_t dev); static int csa_attach(device_t dev); static struct resource *csa_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); -static int csa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r); +static int csa_release_resource(device_t bus, device_t child, struct resource *r); static int csa_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); static int csa_teardown_intr(device_t bus, device_t child, struct resource *irq, void *cookie); static driver_intr_t csa_intr; static int csa_initialize(sc_p scp); static int csa_downloadimage(csa_res *resp); static int csa_transferimage(csa_res *resp, u_int32_t *src, u_long dest, u_long len); static void amp_none(void) { } static void amp_voyetra(void) { } static int clkrun_hack(int run) { #ifdef __i386__ devclass_t pci_devclass; device_t *pci_devices, *pci_children, *busp, *childp; int pci_count = 0, pci_childcount = 0; int i, j, port; u_int16_t control; bus_space_tag_t btag; if ((pci_devclass = devclass_find("pci")) == NULL) { return ENXIO; } devclass_get_devices(pci_devclass, &pci_devices, &pci_count); for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { pci_childcount = 0; if (device_get_children(*busp, &pci_children, &pci_childcount)) continue; for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) { if (pci_get_vendor(*childp) == 0x8086 && pci_get_device(*childp) == 0x7113) { port = (pci_read_config(*childp, 0x41, 1) << 8) + 0x10; /* XXX */ btag = X86_BUS_SPACE_IO; control = bus_space_read_2(btag, 0x0, port); control &= ~0x2000; control |= run? 0 : 0x2000; bus_space_write_2(btag, 0x0, port, control); free(pci_devices, M_TEMP); free(pci_children, M_TEMP); return 0; } } free(pci_children, M_TEMP); } free(pci_devices, M_TEMP); return ENXIO; #else return 0; #endif } static struct csa_card cards_4610[] = { {0, 0, "Unknown/invalid SSID (CS4610)", NULL, NULL, NULL, 0}, }; static struct csa_card cards_4614[] = { {0x1489, 0x7001, "Genius Soundmaker 128 value", amp_none, NULL, NULL, 0}, {0x5053, 0x3357, "Turtle Beach Santa Cruz", amp_voyetra, NULL, NULL, 1}, {0x1071, 0x6003, "Mitac MI6020/21", amp_voyetra, NULL, NULL, 0}, {0x14AF, 0x0050, "Hercules Game Theatre XP", NULL, NULL, NULL, 0}, {0x1681, 0x0050, "Hercules Game Theatre XP", NULL, NULL, NULL, 0}, {0x1014, 0x0132, "Thinkpad 570", amp_none, NULL, NULL, 0}, {0x1014, 0x0153, "Thinkpad 600X/A20/T20", amp_none, NULL, clkrun_hack, 0}, {0x1014, 0x1010, "Thinkpad 600E (unsupported)", NULL, NULL, NULL, 0}, {0x153b, 0x1136, "Terratec SiXPack 5.1+", NULL, NULL, NULL, 0}, {0, 0, "Unknown/invalid SSID (CS4614)", NULL, NULL, NULL, 0}, }; static struct csa_card cards_4615[] = { {0, 0, "Unknown/invalid SSID (CS4615)", NULL, NULL, NULL, 0}, }; static struct csa_card nocard = {0, 0, "unknown", NULL, NULL, NULL, 0}; struct card_type { u_int32_t devid; char *name; struct csa_card *cards; }; static struct card_type cards[] = { {CS4610_PCI_ID, "CS4610/CS4611", cards_4610}, {CS4614_PCI_ID, "CS4280/CS4614/CS4622/CS4624/CS4630", cards_4614}, {CS4615_PCI_ID, "CS4615", cards_4615}, {0, NULL, NULL}, }; static struct card_type * csa_findcard(device_t dev) { int i; i = 0; while (cards[i].devid != 0) { if (pci_get_devid(dev) == cards[i].devid) return &cards[i]; i++; } return NULL; } struct csa_card * csa_findsubcard(device_t dev) { int i; struct card_type *card; struct csa_card *subcard; card = csa_findcard(dev); if (card == NULL) return &nocard; subcard = card->cards; i = 0; while (subcard[i].subvendor != 0) { if (pci_get_subvendor(dev) == subcard[i].subvendor && pci_get_subdevice(dev) == subcard[i].subdevice) { return &subcard[i]; } i++; } return &subcard[i]; } static int csa_probe(device_t dev) { struct card_type *card; card = csa_findcard(dev); if (card) { device_set_desc(dev, card->name); return BUS_PROBE_DEFAULT; } return ENXIO; } static int csa_attach(device_t dev) { sc_p scp; csa_res *resp; struct sndcard_func *func; int error = ENXIO; scp = device_get_softc(dev); /* Fill in the softc. */ bzero(scp, sizeof(*scp)); scp->dev = dev; pci_enable_busmaster(dev); /* Allocate the resources. */ resp = &scp->res; scp->card = csa_findsubcard(dev); scp->binfo.card = scp->card; printf("csa: card is %s\n", scp->card->name); resp->io_rid = PCIR_BAR(0); resp->io = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &resp->io_rid, RF_ACTIVE); if (resp->io == NULL) return (ENXIO); resp->mem_rid = PCIR_BAR(1); resp->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &resp->mem_rid, RF_ACTIVE); if (resp->mem == NULL) goto err_io; resp->irq_rid = 0; resp->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &resp->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (resp->irq == NULL) goto err_mem; /* Enable interrupt. */ if (snd_setup_intr(dev, resp->irq, 0, csa_intr, scp, &scp->ih)) goto err_intr; #if 0 if ((csa_readio(resp, BA0_HISR) & HISR_INTENA) == 0) csa_writeio(resp, BA0_HICR, HICR_IEV | HICR_CHGM); #endif /* Initialize the chip. */ if (csa_initialize(scp)) goto err_teardown; /* Reset the Processor. */ csa_resetdsp(resp); /* Download the Processor Image to the processor. */ if (csa_downloadimage(resp)) goto err_teardown; /* Attach the children. */ /* PCM Audio */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto err_teardown; } func->varinfo = &scp->binfo; func->func = SCF_PCM; scp->pcm = device_add_child(dev, "pcm", -1); device_set_ivars(scp->pcm, func); /* Midi Interface */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto err_teardown; } func->varinfo = &scp->binfo; func->func = SCF_MIDI; scp->midi = device_add_child(dev, "midi", -1); device_set_ivars(scp->midi, func); bus_generic_attach(dev); return (0); err_teardown: bus_teardown_intr(dev, resp->irq, scp->ih); err_intr: bus_release_resource(dev, SYS_RES_IRQ, resp->irq_rid, resp->irq); err_mem: bus_release_resource(dev, SYS_RES_MEMORY, resp->mem_rid, resp->mem); err_io: bus_release_resource(dev, SYS_RES_MEMORY, resp->io_rid, resp->io); return (error); } static int csa_detach(device_t dev) { csa_res *resp; sc_p scp; struct sndcard_func *func; int err; scp = device_get_softc(dev); resp = &scp->res; if (scp->midi != NULL) { func = device_get_ivars(scp->midi); err = device_delete_child(dev, scp->midi); if (err != 0) return err; if (func != NULL) free(func, M_DEVBUF); scp->midi = NULL; } if (scp->pcm != NULL) { func = device_get_ivars(scp->pcm); err = device_delete_child(dev, scp->pcm); if (err != 0) return err; if (func != NULL) free(func, M_DEVBUF); scp->pcm = NULL; } bus_teardown_intr(dev, resp->irq, scp->ih); bus_release_resource(dev, SYS_RES_IRQ, resp->irq_rid, resp->irq); bus_release_resource(dev, SYS_RES_MEMORY, resp->mem_rid, resp->mem); bus_release_resource(dev, SYS_RES_MEMORY, resp->io_rid, resp->io); return bus_generic_detach(dev); } static int csa_resume(device_t dev) { csa_res *resp; sc_p scp; scp = device_get_softc(dev); resp = &scp->res; /* Initialize the chip. */ if (csa_initialize(scp)) return (ENXIO); /* Reset the Processor. */ csa_resetdsp(resp); /* Download the Processor Image to the processor. */ if (csa_downloadimage(resp)) return (ENXIO); return (bus_generic_resume(dev)); } static struct resource * csa_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { sc_p scp; csa_res *resp; struct resource *res; scp = device_get_softc(bus); resp = &scp->res; switch (type) { case SYS_RES_IRQ: if (*rid != 0) return (NULL); res = resp->irq; break; case SYS_RES_MEMORY: switch (*rid) { case PCIR_BAR(0): res = resp->io; break; case PCIR_BAR(1): res = resp->mem; break; default: return (NULL); } break; default: return (NULL); } return res; } static int -csa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +csa_release_resource(device_t bus, device_t child, struct resource *r) { return (0); } /* * The following three functions deal with interrupt handling. * An interrupt is primarily handled by the bridge driver. * The bridge driver then determines the child devices to pass * the interrupt. Certain information of the device can be read * only once(eg the value of HISR). The bridge driver is responsible * to pass such the information to the children. */ static int csa_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { sc_p scp; csa_res *resp; struct sndcard_func *func; if (filter != NULL) { printf("ata-csa.c: we cannot use a filter here\n"); return (EINVAL); } scp = device_get_softc(bus); resp = &scp->res; /* * Look at the function code of the child to determine * the appropriate handler for it. */ func = device_get_ivars(child); if (func == NULL || irq != resp->irq) return (EINVAL); switch (func->func) { case SCF_PCM: scp->pcmintr = intr; scp->pcmintr_arg = arg; break; case SCF_MIDI: scp->midiintr = intr; scp->midiintr_arg = arg; break; default: return (EINVAL); } *cookiep = scp; if ((csa_readio(resp, BA0_HISR) & HISR_INTENA) == 0) csa_writeio(resp, BA0_HICR, HICR_IEV | HICR_CHGM); return (0); } static int csa_teardown_intr(device_t bus, device_t child, struct resource *irq, void *cookie) { sc_p scp; csa_res *resp; struct sndcard_func *func; scp = device_get_softc(bus); resp = &scp->res; /* * Look at the function code of the child to determine * the appropriate handler for it. */ func = device_get_ivars(child); if (func == NULL || irq != resp->irq || cookie != scp) return (EINVAL); switch (func->func) { case SCF_PCM: scp->pcmintr = NULL; scp->pcmintr_arg = NULL; break; case SCF_MIDI: scp->midiintr = NULL; scp->midiintr_arg = NULL; break; default: return (EINVAL); } return (0); } /* The interrupt handler */ static void csa_intr(void *arg) { sc_p scp = arg; csa_res *resp; u_int32_t hisr; resp = &scp->res; /* Is this interrupt for us? */ hisr = csa_readio(resp, BA0_HISR); if ((hisr & 0x7fffffff) == 0) { /* Throw an eoi. */ csa_writeio(resp, BA0_HICR, HICR_IEV | HICR_CHGM); return; } /* * Pass the value of HISR via struct csa_bridgeinfo. * The children get access through their ivars. */ scp->binfo.hisr = hisr; /* Invoke the handlers of the children. */ if ((hisr & (HISR_VC0 | HISR_VC1)) != 0 && scp->pcmintr != NULL) { scp->pcmintr(scp->pcmintr_arg); hisr &= ~(HISR_VC0 | HISR_VC1); } if ((hisr & HISR_MIDI) != 0 && scp->midiintr != NULL) { scp->midiintr(scp->midiintr_arg); hisr &= ~HISR_MIDI; } /* Throw an eoi. */ csa_writeio(resp, BA0_HICR, HICR_IEV | HICR_CHGM); } static int csa_initialize(sc_p scp) { int i; u_int32_t acsts, acisv; csa_res *resp; resp = &scp->res; /* * First, blast the clock control register to zero so that the PLL starts * out in a known state, and blast the master serial port control register * to zero so that the serial ports also start out in a known state. */ csa_writeio(resp, BA0_CLKCR1, 0); csa_writeio(resp, BA0_SERMC1, 0); /* * If we are in AC97 mode, then we must set the part to a host controlled * AC-link. Otherwise, we won't be able to bring up the link. */ #if 1 csa_writeio(resp, BA0_SERACC, SERACC_HSP | SERACC_CODEC_TYPE_1_03); /* 1.03 codec */ #else csa_writeio(resp, BA0_SERACC, SERACC_HSP | SERACC_CODEC_TYPE_2_0); /* 2.0 codec */ #endif /* 1 */ /* * Drive the ARST# pin low for a minimum of 1uS (as defined in the AC97 * spec) and then drive it high. This is done for non AC97 modes since * there might be logic external to the CS461x that uses the ARST# line * for a reset. */ csa_writeio(resp, BA0_ACCTL, 1); DELAY(50); csa_writeio(resp, BA0_ACCTL, 0); DELAY(50); csa_writeio(resp, BA0_ACCTL, ACCTL_RSTN); /* * The first thing we do here is to enable sync generation. As soon * as we start receiving bit clock, we'll start producing the SYNC * signal. */ csa_writeio(resp, BA0_ACCTL, ACCTL_ESYN | ACCTL_RSTN); /* * Now wait for a short while to allow the AC97 part to start * generating bit clock (so we don't try to start the PLL without an * input clock). */ DELAY(50000); /* * Set the serial port timing configuration, so that * the clock control circuit gets its clock from the correct place. */ csa_writeio(resp, BA0_SERMC1, SERMC1_PTC_AC97); DELAY(700000); /* * Write the selected clock control setup to the hardware. Do not turn on * SWCE yet (if requested), so that the devices clocked by the output of * PLL are not clocked until the PLL is stable. */ csa_writeio(resp, BA0_PLLCC, PLLCC_LPF_1050_2780_KHZ | PLLCC_CDR_73_104_MHZ); csa_writeio(resp, BA0_PLLM, 0x3a); csa_writeio(resp, BA0_CLKCR2, CLKCR2_PDIVS_8); /* * Power up the PLL. */ csa_writeio(resp, BA0_CLKCR1, CLKCR1_PLLP); /* * Wait until the PLL has stabilized. */ DELAY(5000); /* * Turn on clocking of the core so that we can setup the serial ports. */ csa_writeio(resp, BA0_CLKCR1, csa_readio(resp, BA0_CLKCR1) | CLKCR1_SWCE); /* * Fill the serial port FIFOs with silence. */ csa_clearserialfifos(resp); /* * Set the serial port FIFO pointer to the first sample in the FIFO. */ #ifdef notdef csa_writeio(resp, BA0_SERBSP, 0); #endif /* notdef */ /* * Write the serial port configuration to the part. The master * enable bit is not set until all other values have been written. */ csa_writeio(resp, BA0_SERC1, SERC1_SO1F_AC97 | SERC1_SO1EN); csa_writeio(resp, BA0_SERC2, SERC2_SI1F_AC97 | SERC1_SO1EN); csa_writeio(resp, BA0_SERMC1, SERMC1_PTC_AC97 | SERMC1_MSPE); /* * Wait for the codec ready signal from the AC97 codec. */ acsts = 0; for (i = 0 ; i < 1000 ; i++) { /* * First, lets wait a short while to let things settle out a bit, * and to prevent retrying the read too quickly. */ DELAY(125); /* * Read the AC97 status register to see if we've seen a CODEC READY * signal from the AC97 codec. */ acsts = csa_readio(resp, BA0_ACSTS); if ((acsts & ACSTS_CRDY) != 0) break; } /* * Make sure we sampled CODEC READY. */ if ((acsts & ACSTS_CRDY) == 0) return (ENXIO); /* * Assert the vaid frame signal so that we can start sending commands * to the AC97 codec. */ csa_writeio(resp, BA0_ACCTL, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); /* * Wait until we've sampled input slots 3 and 4 as valid, meaning that * the codec is pumping ADC data across the AC-link. */ acisv = 0; for (i = 0 ; i < 2000 ; i++) { /* * First, lets wait a short while to let things settle out a bit, * and to prevent retrying the read too quickly. */ #ifdef notdef DELAY(10000000L); /* clw */ #else DELAY(1000); #endif /* notdef */ /* * Read the input slot valid register and see if input slots 3 and * 4 are valid yet. */ acisv = csa_readio(resp, BA0_ACISV); if ((acisv & (ACISV_ISV3 | ACISV_ISV4)) == (ACISV_ISV3 | ACISV_ISV4)) break; } /* * Make sure we sampled valid input slots 3 and 4. If not, then return * an error. */ if ((acisv & (ACISV_ISV3 | ACISV_ISV4)) != (ACISV_ISV3 | ACISV_ISV4)) return (ENXIO); /* * Now, assert valid frame and the slot 3 and 4 valid bits. This will * commense the transfer of digital audio data to the AC97 codec. */ csa_writeio(resp, BA0_ACOSV, ACOSV_SLV3 | ACOSV_SLV4); /* * Power down the DAC and ADC. We will power them up (if) when we need * them. */ #ifdef notdef csa_writeio(resp, BA0_AC97_POWERDOWN, 0x300); #endif /* notdef */ /* * Turn off the Processor by turning off the software clock enable flag in * the clock control register. */ #ifdef notdef clkcr1 = csa_readio(resp, BA0_CLKCR1) & ~CLKCR1_SWCE; csa_writeio(resp, BA0_CLKCR1, clkcr1); #endif /* notdef */ /* * Enable interrupts on the part. */ #if 0 csa_writeio(resp, BA0_HICR, HICR_IEV | HICR_CHGM); #endif /* notdef */ return (0); } void csa_clearserialfifos(csa_res *resp) { int i, j, pwr; u_int8_t clkcr1, serbst; /* * See if the devices are powered down. If so, we must power them up first * or they will not respond. */ pwr = 1; clkcr1 = csa_readio(resp, BA0_CLKCR1); if ((clkcr1 & CLKCR1_SWCE) == 0) { csa_writeio(resp, BA0_CLKCR1, clkcr1 | CLKCR1_SWCE); pwr = 0; } /* * We want to clear out the serial port FIFOs so we don't end up playing * whatever random garbage happens to be in them. We fill the sample FIFOs * with zero (silence). */ csa_writeio(resp, BA0_SERBWP, 0); /* Fill all 256 sample FIFO locations. */ serbst = 0; for (i = 0 ; i < 256 ; i++) { /* Make sure the previous FIFO write operation has completed. */ for (j = 0 ; j < 5 ; j++) { DELAY(100); serbst = csa_readio(resp, BA0_SERBST); if ((serbst & SERBST_WBSY) == 0) break; } if ((serbst & SERBST_WBSY) != 0) { if (!pwr) csa_writeio(resp, BA0_CLKCR1, clkcr1); } /* Write the serial port FIFO index. */ csa_writeio(resp, BA0_SERBAD, i); /* Tell the serial port to load the new value into the FIFO location. */ csa_writeio(resp, BA0_SERBCM, SERBCM_WRC); } /* * Now, if we powered up the devices, then power them back down again. * This is kinda ugly, but should never happen. */ if (!pwr) csa_writeio(resp, BA0_CLKCR1, clkcr1); } void csa_resetdsp(csa_res *resp) { int i; /* * Write the reset bit of the SP control register. */ csa_writemem(resp, BA1_SPCR, SPCR_RSTSP); /* * Write the control register. */ csa_writemem(resp, BA1_SPCR, SPCR_DRQEN); /* * Clear the trap registers. */ for (i = 0 ; i < 8 ; i++) { csa_writemem(resp, BA1_DREG, DREG_REGID_TRAP_SELECT + i); csa_writemem(resp, BA1_TWPR, 0xffff); } csa_writemem(resp, BA1_DREG, 0); /* * Set the frame timer to reflect the number of cycles per frame. */ csa_writemem(resp, BA1_FRMT, 0xadf); } static int csa_downloadimage(csa_res *resp) { int ret; u_long ul, offset; for (ul = 0, offset = 0 ; ul < INKY_MEMORY_COUNT ; ul++) { /* * DMA this block from host memory to the appropriate * memory on the CSDevice. */ ret = csa_transferimage(resp, cs461x_firmware.BA1Array + offset, cs461x_firmware.MemoryStat[ul].ulDestAddr, cs461x_firmware.MemoryStat[ul].ulSourceSize); if (ret) return (ret); offset += cs461x_firmware.MemoryStat[ul].ulSourceSize >> 2; } return (0); } static int csa_transferimage(csa_res *resp, u_int32_t *src, u_long dest, u_long len) { u_long ul; /* * We do not allow DMAs from host memory to host memory (although the DMA * can do it) and we do not allow DMAs which are not a multiple of 4 bytes * in size (because that DMA can not do that). Return an error if either * of these conditions exist. */ if ((len & 0x3) != 0) return (EINVAL); /* Check the destination address that it is a multiple of 4 */ if ((dest & 0x3) != 0) return (EINVAL); /* Write the buffer out. */ for (ul = 0 ; ul < len ; ul += 4) csa_writemem(resp, dest + ul, src[ul >> 2]); return (0); } int csa_readcodec(csa_res *resp, u_long offset, u_int32_t *data) { int i; u_int32_t acctl, acsts; /* * Make sure that there is not data sitting around from a previous * uncompleted access. ACSDA = Status Data Register = 47Ch */ csa_readio(resp, BA0_ACSDA); /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the read. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * set CRW - Read command * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ /* * Get the actual AC97 register from the offset */ csa_writeio(resp, BA0_ACCAD, offset - BA0_AC97_RESET); csa_writeio(resp, BA0_ACCDA, 0); csa_writeio(resp, BA0_ACCTL, ACCTL_DCV | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); /* * Wait for the read to occur. */ acctl = 0; for (i = 0 ; i < 10 ; i++) { /* * First, we want to wait for a short time. */ DELAY(25); /* * Now, check to see if the read has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 17h */ acctl = csa_readio(resp, BA0_ACCTL); if ((acctl & ACCTL_DCV) == 0) break; } /* * Make sure the read completed. */ if ((acctl & ACCTL_DCV) != 0) return (EAGAIN); /* * Wait for the valid status bit to go active. */ acsts = 0; for (i = 0 ; i < 10 ; i++) { /* * Read the AC97 status register. * ACSTS = Status Register = 464h */ acsts = csa_readio(resp, BA0_ACSTS); /* * See if we have valid status. * VSTS - Valid Status */ if ((acsts & ACSTS_VSTS) != 0) break; /* * Wait for a short while. */ DELAY(25); } /* * Make sure we got valid status. */ if ((acsts & ACSTS_VSTS) == 0) return (EAGAIN); /* * Read the data returned from the AC97 register. * ACSDA = Status Data Register = 474h */ *data = csa_readio(resp, BA0_ACSDA); return (0); } int csa_writecodec(csa_res *resp, u_long offset, u_int32_t data) { int i; u_int32_t acctl; /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the write. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ /* * Get the actual AC97 register from the offset */ csa_writeio(resp, BA0_ACCAD, offset - BA0_AC97_RESET); csa_writeio(resp, BA0_ACCDA, data); csa_writeio(resp, BA0_ACCTL, ACCTL_DCV | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); /* * Wait for the write to occur. */ acctl = 0; for (i = 0 ; i < 10 ; i++) { /* * First, we want to wait for a short time. */ DELAY(25); /* * Now, check to see if the read has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 17h */ acctl = csa_readio(resp, BA0_ACCTL); if ((acctl & ACCTL_DCV) == 0) break; } /* * Make sure the write completed. */ if ((acctl & ACCTL_DCV) != 0) return (EAGAIN); return (0); } u_int32_t csa_readio(csa_res *resp, u_long offset) { u_int32_t ul; if (offset < BA0_AC97_RESET) return bus_space_read_4(rman_get_bustag(resp->io), rman_get_bushandle(resp->io), offset) & 0xffffffff; else { if (csa_readcodec(resp, offset, &ul)) ul = 0; return (ul); } } void csa_writeio(csa_res *resp, u_long offset, u_int32_t data) { if (offset < BA0_AC97_RESET) bus_space_write_4(rman_get_bustag(resp->io), rman_get_bushandle(resp->io), offset, data); else csa_writecodec(resp, offset, data); } u_int32_t csa_readmem(csa_res *resp, u_long offset) { return bus_space_read_4(rman_get_bustag(resp->mem), rman_get_bushandle(resp->mem), offset); } void csa_writemem(csa_res *resp, u_long offset, u_int32_t data) { bus_space_write_4(rman_get_bustag(resp->mem), rman_get_bushandle(resp->mem), offset, data); } static device_method_t csa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, csa_probe), DEVMETHOD(device_attach, csa_attach), DEVMETHOD(device_detach, csa_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, csa_resume), /* Bus interface */ DEVMETHOD(bus_alloc_resource, csa_alloc_resource), DEVMETHOD(bus_release_resource, csa_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, csa_setup_intr), DEVMETHOD(bus_teardown_intr, csa_teardown_intr), DEVMETHOD_END }; static driver_t csa_driver = { "csa", csa_methods, sizeof(struct csa_softc), }; /* * csa can be attached to a pci bus. */ DRIVER_MODULE(snd_csa, pci, csa_driver, 0, 0); MODULE_DEPEND(snd_csa, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_csa, 1); diff --git a/sys/dev/sound/pci/fm801.c b/sys/dev/sound/pci/fm801.c index fbde14f37afe..cbc74249c04d 100644 --- a/sys/dev/sound/pci/fm801.c +++ b/sys/dev/sound/pci/fm801.c @@ -1,763 +1,762 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Dmitry Dicky diwil@dataart.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS `AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #define PCI_VENDOR_FORTEMEDIA 0x1319 #define PCI_DEVICE_FORTEMEDIA1 0x08011319 /* Audio controller */ #define PCI_DEVICE_FORTEMEDIA2 0x08021319 /* Joystick controller */ #define FM_PCM_VOLUME 0x00 #define FM_FM_VOLUME 0x02 #define FM_I2S_VOLUME 0x04 #define FM_RECORD_SOURCE 0x06 #define FM_PLAY_CTL 0x08 #define FM_PLAY_RATE_MASK 0x0f00 #define FM_PLAY_BUF1_LAST 0x0001 #define FM_PLAY_BUF2_LAST 0x0002 #define FM_PLAY_START 0x0020 #define FM_PLAY_PAUSE 0x0040 #define FM_PLAY_STOPNOW 0x0080 #define FM_PLAY_16BIT 0x4000 #define FM_PLAY_STEREO 0x8000 #define FM_PLAY_DMALEN 0x0a #define FM_PLAY_DMABUF1 0x0c #define FM_PLAY_DMABUF2 0x10 #define FM_REC_CTL 0x14 #define FM_REC_RATE_MASK 0x0f00 #define FM_REC_BUF1_LAST 0x0001 #define FM_REC_BUF2_LAST 0x0002 #define FM_REC_START 0x0020 #define FM_REC_PAUSE 0x0040 #define FM_REC_STOPNOW 0x0080 #define FM_REC_16BIT 0x4000 #define FM_REC_STEREO 0x8000 #define FM_REC_DMALEN 0x16 #define FM_REC_DMABUF1 0x18 #define FM_REC_DMABUF2 0x1c #define FM_CODEC_CTL 0x22 #define FM_VOLUME 0x26 #define FM_VOLUME_MUTE 0x8000 #define FM_CODEC_CMD 0x2a #define FM_CODEC_CMD_READ 0x0080 #define FM_CODEC_CMD_VALID 0x0100 #define FM_CODEC_CMD_BUSY 0x0200 #define FM_CODEC_DATA 0x2c #define FM_IO_CTL 0x52 #define FM_CARD_CTL 0x54 #define FM_INTMASK 0x56 #define FM_INTMASK_PLAY 0x0001 #define FM_INTMASK_REC 0x0002 #define FM_INTMASK_VOL 0x0040 #define FM_INTMASK_MPU 0x0080 #define FM_INTSTATUS 0x5a #define FM_INTSTATUS_PLAY 0x0100 #define FM_INTSTATUS_REC 0x0200 #define FM_INTSTATUS_VOL 0x4000 #define FM_INTSTATUS_MPU 0x8000 #define FM801_DEFAULT_BUFSZ 4096 /* Other values do not work!!! */ /* debug purposes */ #define DPRINT if(0) printf /* static int fm801ch_setup(struct pcm_channel *c); */ static u_int32_t fmts[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps fm801ch_caps = { 5500, 48000, fmts, 0 }; struct fm801_info; struct fm801_chinfo { struct fm801_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t spd, dir, fmt; /* speed, direction, format */ u_int32_t shift; }; struct fm801_info { int type; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; device_t dev; int num; u_int32_t unit; struct resource *reg, *irq; int regtype, regid, irqid; void *ih; u_int32_t play_flip, play_nextblk, play_start, play_blksize, play_fmt, play_shift, play_size; u_int32_t rec_flip, rec_nextblk, rec_start, rec_blksize, rec_fmt, rec_shift, rec_size; unsigned int bufsz; struct fm801_chinfo pch, rch; device_t radio; }; /* Bus Read / Write routines */ static u_int32_t fm801_rd(struct fm801_info *fm801, int regno, int size) { switch(size) { case 1: return (bus_space_read_1(fm801->st, fm801->sh, regno)); case 2: return (bus_space_read_2(fm801->st, fm801->sh, regno)); case 4: return (bus_space_read_4(fm801->st, fm801->sh, regno)); default: return 0xffffffff; } } static void fm801_wr(struct fm801_info *fm801, int regno, u_int32_t data, int size) { switch(size) { case 1: bus_space_write_1(fm801->st, fm801->sh, regno, data); break; case 2: bus_space_write_2(fm801->st, fm801->sh, regno, data); break; case 4: bus_space_write_4(fm801->st, fm801->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* * ac97 codec routines */ #define TIMO 50 static int fm801_rdcd(kobj_t obj, void *devinfo, int regno) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: codec busy\n"); return 0; } fm801_wr(fm801,FM_CODEC_CMD, regno|FM_CODEC_CMD_READ,2); for (i = 0; i < TIMO && !(fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_VALID); i++) { DELAY(10000); DPRINT("fm801 rdcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: write codec invalid\n"); return 0; } return fm801_rd(fm801,FM_CODEC_DATA,2); } static int fm801_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; DPRINT("fm801_wrcd reg 0x%x val 0x%x\n",regno, data); /* if(regno == AC97_REG_RECSEL) return; */ /* Poll until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } fm801_wr(fm801,FM_CODEC_DATA,data, 2); fm801_wr(fm801,FM_CODEC_CMD, regno,2); /* wait until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 wrcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } DPRINT("fm801 wrcd release reg 0x%x val 0x%x\n",regno, data); return 0; } static kobj_method_t fm801_ac97_methods[] = { KOBJMETHOD(ac97_read, fm801_rdcd), KOBJMETHOD(ac97_write, fm801_wrcd), DEVMETHOD_END }; AC97_DECLARE(fm801_ac97); /* -------------------------------------------------------------------- */ /* * The interrupt handler */ static void fm801_intr(void *p) { struct fm801_info *fm801 = (struct fm801_info *)p; u_int32_t intsrc = fm801_rd(fm801, FM_INTSTATUS, 2); DPRINT("\nfm801_intr intsrc 0x%x ", intsrc); if(intsrc & FM_INTSTATUS_PLAY) { fm801->play_flip++; if(fm801->play_flip & 1) { fm801_wr(fm801, FM_PLAY_DMABUF1, fm801->play_start,4); } else fm801_wr(fm801, FM_PLAY_DMABUF2, fm801->play_nextblk,4); chn_intr(fm801->pch.channel); } if(intsrc & FM_INTSTATUS_REC) { fm801->rec_flip++; if(fm801->rec_flip & 1) { fm801_wr(fm801, FM_REC_DMABUF1, fm801->rec_start,4); } else fm801_wr(fm801, FM_REC_DMABUF2, fm801->rec_nextblk,4); chn_intr(fm801->rch.channel); } if ( intsrc & FM_INTSTATUS_MPU ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_MPU,2); } if ( intsrc & FM_INTSTATUS_VOL ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_VOL,2); } DPRINT("fm801_intr clear\n\n"); fm801_wr(fm801, FM_INTSTATUS, intsrc & (FM_INTSTATUS_PLAY | FM_INTSTATUS_REC), 2); } /* -------------------------------------------------------------------- */ /* channel interface */ static void * fm801ch_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; struct fm801_chinfo *ch = (dir == PCMDIR_PLAY)? &fm801->pch : &fm801->rch; DPRINT("fm801ch_init, direction = %d\n", dir); ch->parent = fm801; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, fm801->parent_dmat, 0, fm801->bufsz) != 0) return NULL; return (void *)ch; } static int fm801ch_setformat(kobj_t obj, void *data, u_int32_t format) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; DPRINT("fm801ch_setformat 0x%x : %s, %s, %s, %s\n", format, (AFMT_CHANNEL(format) > 1)?"stereo":"mono", (format & AFMT_16BIT) ? "16bit":"8bit", (format & AFMT_SIGNED)? "signed":"unsigned", (format & AFMT_BIGENDIAN)?"bigendiah":"littleendian" ); if(ch->dir == PCMDIR_PLAY) { fm801->play_fmt = (AFMT_CHANNEL(format) > 1)? FM_PLAY_STEREO : 0; fm801->play_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } if(ch->dir == PCMDIR_REC ) { fm801->rec_fmt = (AFMT_CHANNEL(format) > 1)? FM_REC_STEREO:0; fm801->rec_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } return 0; } struct { u_int32_t limit; u_int32_t rate; } fm801_rates[11] = { { 6600, 5500 }, { 8750, 8000 }, { 10250, 9600 }, { 13200, 11025 }, { 17500, 16000 }, { 20500, 19200 }, { 26500, 22050 }, { 35000, 32000 }, { 41000, 38400 }, { 46000, 44100 }, { 48000, 48000 }, /* anything above -> 48000 */ }; static u_int32_t fm801ch_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; int i; for (i = 0; i < 10 && fm801_rates[i].limit <= speed; i++) ; if(ch->dir == PCMDIR_PLAY) { fm801->pch.spd = fm801_rates[i].rate; fm801->play_shift = (i<<8); fm801->play_shift &= FM_PLAY_RATE_MASK; } if(ch->dir == PCMDIR_REC ) { fm801->rch.spd = fm801_rates[i].rate; fm801->rec_shift = (i<<8); fm801->rec_shift &= FM_REC_RATE_MASK; } ch->spd = fm801_rates[i].rate; return fm801_rates[i].rate; } static u_int32_t fm801ch_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; /* * Don't mind for play_flip, set the blocksize to the * desired values in any case - otherwise sound playback * breaks here. */ if(ch->dir == PCMDIR_PLAY) fm801->play_blksize = blocksize; if(ch->dir == PCMDIR_REC) fm801->rec_blksize = blocksize; DPRINT("fm801ch_setblocksize %d (dir %d)\n",blocksize, ch->dir); return blocksize; } static int fm801ch_trigger(kobj_t obj, void *data, int go) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t baseaddr = sndbuf_getbufaddr(ch->buffer); u_int32_t k1; DPRINT("fm801ch_trigger go %d , ", go); if (!PCMTRIG_COMMON(go)) return 0; if (ch->dir == PCMDIR_PLAY) { if (go == PCMTRIG_START) { fm801->play_start = baseaddr; fm801->play_nextblk = fm801->play_start + fm801->play_blksize; fm801->play_flip = 0; fm801_wr(fm801, FM_PLAY_DMALEN, fm801->play_blksize - 1, 2); fm801_wr(fm801, FM_PLAY_DMABUF1,fm801->play_start,4); fm801_wr(fm801, FM_PLAY_DMABUF2,fm801->play_nextblk,4); fm801_wr(fm801, FM_PLAY_CTL, FM_PLAY_START | FM_PLAY_STOPNOW | fm801->play_fmt | fm801->play_shift, 2 ); } else { fm801->play_flip = 0; k1 = fm801_rd(fm801, FM_PLAY_CTL,2); fm801_wr(fm801, FM_PLAY_CTL, (k1 & ~(FM_PLAY_STOPNOW | FM_PLAY_START)) | FM_PLAY_BUF1_LAST | FM_PLAY_BUF2_LAST, 2 ); } } else if(ch->dir == PCMDIR_REC) { if (go == PCMTRIG_START) { fm801->rec_start = baseaddr; fm801->rec_nextblk = fm801->rec_start + fm801->rec_blksize; fm801->rec_flip = 0; fm801_wr(fm801, FM_REC_DMALEN, fm801->rec_blksize - 1, 2); fm801_wr(fm801, FM_REC_DMABUF1,fm801->rec_start,4); fm801_wr(fm801, FM_REC_DMABUF2,fm801->rec_nextblk,4); fm801_wr(fm801, FM_REC_CTL, FM_REC_START | FM_REC_STOPNOW | fm801->rec_fmt | fm801->rec_shift, 2 ); } else { fm801->rec_flip = 0; k1 = fm801_rd(fm801, FM_REC_CTL,2); fm801_wr(fm801, FM_REC_CTL, (k1 & ~(FM_REC_STOPNOW | FM_REC_START)) | FM_REC_BUF1_LAST | FM_REC_BUF2_LAST, 2); } } return 0; } /* Almost ALSA copy */ static u_int32_t fm801ch_getptr(kobj_t obj, void *data) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t result = 0; if (ch->dir == PCMDIR_PLAY) { result = fm801_rd(fm801, (fm801->play_flip&1) ? FM_PLAY_DMABUF2:FM_PLAY_DMABUF1, 4) - fm801->play_start; } if (ch->dir == PCMDIR_REC) { result = fm801_rd(fm801, (fm801->rec_flip&1) ? FM_REC_DMABUF2:FM_REC_DMABUF1, 4) - fm801->rec_start; } return result; } static struct pcmchan_caps * fm801ch_getcaps(kobj_t obj, void *data) { return &fm801ch_caps; } static kobj_method_t fm801ch_methods[] = { KOBJMETHOD(channel_init, fm801ch_init), KOBJMETHOD(channel_setformat, fm801ch_setformat), KOBJMETHOD(channel_setspeed, fm801ch_setspeed), KOBJMETHOD(channel_setblocksize, fm801ch_setblocksize), KOBJMETHOD(channel_trigger, fm801ch_trigger), KOBJMETHOD(channel_getptr, fm801ch_getptr), KOBJMETHOD(channel_getcaps, fm801ch_getcaps), DEVMETHOD_END }; CHANNEL_DECLARE(fm801ch); /* -------------------------------------------------------------------- */ /* * Init routine is taken from an original NetBSD driver */ static int fm801_init(struct fm801_info *fm801) { u_int32_t k1; /* reset codec */ fm801_wr(fm801, FM_CODEC_CTL, 0x0020,2); DELAY(100000); fm801_wr(fm801, FM_CODEC_CTL, 0x0000,2); DELAY(100000); fm801_wr(fm801, FM_PCM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_FM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_I2S_VOLUME, 0x0808,2); fm801_wr(fm801, 0x40,0x107f,2); /* enable legacy audio */ fm801_wr((void *)fm801, FM_RECORD_SOURCE, 0x0000,2); /* Unmask playback, record and mpu interrupts, mask the rest */ k1 = fm801_rd((void *)fm801, FM_INTMASK,2); fm801_wr(fm801, FM_INTMASK, (k1 & ~(FM_INTMASK_PLAY | FM_INTMASK_REC | FM_INTMASK_MPU)) | FM_INTMASK_VOL,2); fm801_wr(fm801, FM_INTSTATUS, FM_INTSTATUS_PLAY | FM_INTSTATUS_REC | FM_INTSTATUS_MPU | FM_INTSTATUS_VOL,2); DPRINT("FM801 init Ok\n"); return 0; } static int fm801_pci_attach(device_t dev) { struct ac97_info *codec = NULL; struct fm801_info *fm801; int i; int mapped = 0; char status[SND_STATUSLEN]; fm801 = malloc(sizeof(*fm801), M_DEVBUF, M_WAITOK | M_ZERO); fm801->type = pci_get_devid(dev); pci_enable_busmaster(dev); for (i = 0; (mapped == 0) && (i < PCI_MAXMAPS_0); i++) { fm801->regid = PCIR_BAR(i); fm801->regtype = SYS_RES_MEMORY; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); if(!fm801->reg) { fm801->regtype = SYS_RES_IOPORT; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); } if(fm801->reg) { fm801->st = rman_get_bustag(fm801->reg); fm801->sh = rman_get_bushandle(fm801->reg); mapped++; } } if (mapped == 0) { device_printf(dev, "unable to map register space\n"); goto oops; } fm801->bufsz = pcm_getbuffersize(dev, 4096, FM801_DEFAULT_BUFSZ, 65536); fm801_init(fm801); codec = AC97_CREATE(dev, fm801, fm801_ac97); if (codec == NULL) goto oops; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto oops; fm801->irqid = 0; fm801->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &fm801->irqid, RF_ACTIVE | RF_SHAREABLE); if (!fm801->irq || snd_setup_intr(dev, fm801->irq, 0, fm801_intr, fm801, &fm801->ih)) { device_printf(dev, "unable to map interrupt\n"); goto oops; } if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/fm801->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &fm801->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto oops; } snprintf(status, SND_STATUSLEN, "%s 0x%jx irq %jd on %s", (fm801->regtype == SYS_RES_IOPORT)? "port" : "mem", rman_get_start(fm801->reg), rman_get_start(fm801->irq), device_get_nameunit(device_get_parent(dev))); #define FM801_MAXPLAYCH 1 if (pcm_register(dev, fm801, FM801_MAXPLAYCH, 1)) goto oops; pcm_addchan(dev, PCMDIR_PLAY, &fm801ch_class, fm801); pcm_addchan(dev, PCMDIR_REC, &fm801ch_class, fm801); pcm_setstatus(dev, status); fm801->radio = device_add_child(dev, "radio", -1); bus_generic_attach(dev); return 0; oops: if (codec) ac97_destroy(codec); if (fm801->reg) bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); if (fm801->ih) bus_teardown_intr(dev, fm801->irq, fm801->ih); if (fm801->irq) bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); if (fm801->parent_dmat) bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return ENXIO; } static int fm801_pci_detach(device_t dev) { int r; struct fm801_info *fm801; DPRINT("Forte Media FM801 detach\n"); fm801 = pcm_getdevinfo(dev); r = bus_generic_detach(dev); if (r) return r; if (fm801->radio != NULL) { r = device_delete_child(dev, fm801->radio); if (r) return r; fm801->radio = NULL; } r = pcm_unregister(dev); if (r) return r; bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); bus_teardown_intr(dev, fm801->irq, fm801->ih); bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return 0; } static int fm801_pci_probe( device_t dev ) { int id; if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA1 ) { device_set_desc(dev, "Forte Media FM801 Audio Controller"); return BUS_PROBE_DEFAULT; } /* if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA2 ) { device_set_desc(dev, "Forte Media FM801 Joystick (Not Supported)"); return ENXIO; } */ return ENXIO; } static struct resource * fm801_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fm801_info *fm801; fm801 = pcm_getdevinfo(bus); if (type == SYS_RES_IOPORT && *rid == PCIR_BAR(0)) return (fm801->reg); return (NULL); } static int -fm801_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +fm801_release_resource(device_t bus, device_t child, struct resource *r) { return (0); } static device_method_t fm801_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fm801_pci_probe), DEVMETHOD(device_attach, fm801_pci_attach), DEVMETHOD(device_detach, fm801_pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_alloc_resource, fm801_alloc_resource), DEVMETHOD(bus_release_resource, fm801_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD_END }; static driver_t fm801_driver = { "pcm", fm801_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_fm801, pci, fm801_driver, 0, 0); MODULE_DEPEND(snd_fm801, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_fm801, 1); diff --git a/sys/dev/vmd/vmd.c b/sys/dev/vmd/vmd.c index 904e615258a6..b52787fc45d3 100644 --- a/sys/dev/vmd/vmd.c +++ b/sys/dev/vmd/vmd.c @@ -1,755 +1,753 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alexander Motin * Copyright 2019 Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" struct vmd_type { u_int16_t vmd_vid; u_int16_t vmd_did; char *vmd_name; int flags; #define BUS_RESTRICT 1 #define VECTOR_OFFSET 2 #define CAN_BYPASS_MSI 4 }; #define VMD_CAP 0x40 #define VMD_BUS_RESTRICT 0x1 #define VMD_CONFIG 0x44 #define VMD_BYPASS_MSI 0x2 #define VMD_BUS_START(x) ((x >> 8) & 0x3) #define VMD_LOCK 0x70 SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Intel Volume Management Device tuning parameters"); /* * By default all VMD devices remap children MSI/MSI-X interrupts into their * own. It creates additional isolation, but also complicates things due to * sharing, etc. Fortunately some VMD devices can bypass the remapping. */ static int vmd_bypass_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, bypass_msi, CTLFLAG_RWTUN, &vmd_bypass_msi, 0, "Bypass MSI remapping on capable hardware"); /* * All MSIs within a group share address, so VMD can't distinguish them. * It makes no sense to use more than one per device, only if required by * some specific device drivers. */ static int vmd_max_msi = 1; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0, "Maximum number of MSI vectors per device"); /* * MSI-X can use different addresses, but we have limited number of MSI-X * we can route to, so use conservative default to try to avoid sharing. */ static int vmd_max_msix = 3; SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0, "Maximum number of MSI-X vectors per device"); static struct vmd_type vmd_devs[] = { { 0x8086, 0x201d, "Intel Volume Management Device", 0 }, { 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT | CAN_BYPASS_MSI }, { 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x7d0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xa77f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0x8086, 0xad0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET }, { 0, 0, NULL, 0 } }; static int vmd_probe(device_t dev) { struct vmd_type *t; uint16_t vid, did; vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) { device_set_desc(dev, t->vmd_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void vmd_free(struct vmd_softc *sc) { struct vmd_irq *vi; struct vmd_irq_user *u; int i; if (sc->psc.bus.rman.rm_end != 0) rman_fini(&sc->psc.bus.rman); if (sc->psc.mem.rman.rm_end != 0) rman_fini(&sc->psc.mem.rman); while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) { LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); } if (sc->vmd_irq != NULL) { for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; if (vi->vi_res == NULL) continue; bus_teardown_intr(sc->psc.dev, vi->vi_res, vi->vi_handle); bus_release_resource(sc->psc.dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); } } free(sc->vmd_irq, M_DEVBUF); sc->vmd_irq = NULL; pci_release_msi(sc->psc.dev); for (i = 0; i < VMD_MAX_BAR; i++) { if (sc->vmd_regs_res[i] != NULL) bus_release_resource(sc->psc.dev, SYS_RES_MEMORY, sc->vmd_regs_rid[i], sc->vmd_regs_res[i]); } } /* Hidden PCI Roots are hidden in BAR(0). */ static uint32_t vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return (0xffffffff); offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_read_4(sc->vmd_regs_res[0], offset)); case 2: return (bus_read_2(sc->vmd_regs_res[0], offset)); case 1: return (bus_read_1(sc->vmd_regs_res[0], offset)); default: __assert_unreachable(); return (0xffffffff); } } static void vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { struct vmd_softc *sc; bus_addr_t offset; sc = device_get_softc(dev); if (b < sc->vmd_bus_start || b > sc->vmd_bus_end) return; offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg; switch (width) { case 4: return (bus_write_4(sc->vmd_regs_res[0], offset, val)); case 2: return (bus_write_2(sc->vmd_regs_res[0], offset, val)); case 1: return (bus_write_1(sc->vmd_regs_res[0], offset, val)); default: __assert_unreachable(); } } static void vmd_set_msi_bypass(device_t dev, bool enable) { uint16_t val; val = pci_read_config(dev, VMD_CONFIG, 2); if (enable) val |= VMD_BYPASS_MSI; else val &= ~VMD_BYPASS_MSI; pci_write_config(dev, VMD_CONFIG, val, 2); } static int vmd_intr(void *arg) { /* * We have nothing to do here, but we have to register some interrupt * handler to make PCI code setup and enable the MSI-X vector. */ return (FILTER_STRAY); } static int vmd_attach(device_t dev) { struct vmd_softc *sc; struct pcib_secbus *bus; struct pcib_window *w; struct vmd_type *t; struct vmd_irq *vi; uint16_t vid, did; uint32_t bar; int i, j, error; char buf[64]; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->psc.dev = dev; sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev); pci_enable_busmaster(dev); for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) { sc->vmd_regs_rid[i] = PCIR_BAR(j); bar = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) j++; if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate resources\n"); goto fail; } } vid = pci_get_vendor(dev); did = pci_get_device(dev); for (t = vmd_devs; t->vmd_name != NULL; t++) { if (vid == t->vmd_vid && did == t->vmd_did) break; } sc->vmd_bus_start = 0; if ((t->flags & BUS_RESTRICT) && (pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) { switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) { case 0: sc->vmd_bus_start = 0; break; case 1: sc->vmd_bus_start = 128; break; case 2: sc->vmd_bus_start = 224; break; default: device_printf(dev, "Unknown bus offset\n"); goto fail; } } sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start + (rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1); bus = &sc->psc.bus; bus->sec = sc->vmd_bus_start; bus->sub = sc->vmd_bus_end; bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) { device_printf(dev, "Failed to initialize bus rman\n"); bus->rman.rm_end = 0; goto fail; } error = rman_manage_region(&bus->rman, sc->vmd_bus_start, sc->vmd_bus_end); if (error) { device_printf(dev, "Failed to add resource to bus rman\n"); goto fail; } w = &sc->psc.mem; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev)); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) { device_printf(dev, "Failed to initialize memory rman\n"); w->rman.rm_end = 0; goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[1]), rman_get_end(sc->vmd_regs_res[1])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } error = rman_manage_region(&w->rman, rman_get_start(sc->vmd_regs_res[2]) + 0x2000, rman_get_end(sc->vmd_regs_res[2])); if (error) { device_printf(dev, "Failed to add resource to memory rman\n"); goto fail; } LIST_INIT(&sc->vmd_users); sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0; sc->vmd_msix_count = pci_msix_count(dev); if (vmd_bypass_msi && (t->flags & CAN_BYPASS_MSI)) { sc->vmd_msix_count = 0; vmd_set_msi_bypass(dev, true); } else if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) { sc->vmd_irq = malloc(sizeof(struct vmd_irq) * sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->vmd_msix_count; i++) { vi = &sc->vmd_irq[i]; vi->vi_rid = i + 1; vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &vi->vi_rid, RF_ACTIVE | RF_SHAREABLE); if (vi->vi_res == NULL) { device_printf(dev, "Failed to allocate irq\n"); goto fail; } vi->vi_irq = rman_get_start(vi->vi_res); if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC | INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) { device_printf(dev, "Can't set up interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, vi->vi_rid, vi->vi_res); vi->vi_res = NULL; goto fail; } } vmd_set_msi_bypass(dev, false); } sc->vmd_dma_tag = bus_get_dma_tag(dev); sc->psc.child = device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); fail: vmd_free(sc); return (ENXIO); } static int vmd_detach(device_t dev) { struct vmd_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); error = device_delete_children(dev); if (error) return (error); if (sc->vmd_msix_count == 0) vmd_set_msi_bypass(dev, false); vmd_free(sc); return (0); } static bus_dma_tag_t vmd_get_dma_tag(device_t dev, device_t child) { struct vmd_softc *sc = device_get_softc(dev); return (sc->vmd_dma_tag); } static struct rman * vmd_get_rman(device_t dev, int type, u_int flags) { struct vmd_softc *sc = device_get_softc(dev); switch (type) { case SYS_RES_MEMORY: return (&sc->psc.mem.rman); case PCI_RES_BUS: return (&sc->psc.bus.rman); default: /* VMD hardware does not support I/O ports. */ return (NULL); } } static struct resource * vmd_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (type == SYS_RES_IRQ) { /* VMD hardware does not support legacy interrupts. */ if (*rid == 0) return (NULL); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags | RF_SHAREABLE)); } res = bus_generic_rman_alloc_resource(dev, child, type, rid, start, end, count, flags); if (bootverbose && res != NULL) { switch (type) { case SYS_RES_MEMORY: device_printf(dev, "allocated memory range (%#jx-%#jx) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; case PCI_RES_BUS: device_printf(dev, "allocated bus range (%ju-%ju) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); break; } } return (res); } static int vmd_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (rman_get_type(r) == SYS_RES_IRQ) { return (bus_generic_adjust_resource(dev, child, r, start, end)); } return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); } static int -vmd_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +vmd_release_resource(device_t dev, device_t child, struct resource *r) { - if (type == SYS_RES_IRQ) { - return (bus_generic_release_resource(dev, child, type, rid, - r)); + if (rman_get_type(r) == SYS_RES_IRQ) { + return (bus_generic_release_resource(dev, child, r)); } - return (bus_generic_rman_release_resource(dev, child, type, rid, r)); + return (bus_generic_rman_release_resource(dev, child, r)); } static int vmd_activate_resource(device_t dev, device_t child, struct resource *r) { if (rman_get_type(r) == SYS_RES_IRQ) { return (bus_generic_activate_resource(dev, child, r)); } return (bus_generic_rman_activate_resource(dev, child, r)); } static int vmd_deactivate_resource(device_t dev, device_t child, struct resource *r) { if (rman_get_type(r) == SYS_RES_IRQ) { return (bus_generic_deactivate_resource(dev, child, r)); } return (bus_generic_rman_deactivate_resource(dev, child, r)); } static struct resource * vmd_find_parent_resource(struct vmd_softc *sc, struct resource *r) { for (int i = 1; i < 3; i++) { if (rman_get_start(sc->vmd_regs_res[i]) <= rman_get_start(r) && rman_get_end(sc->vmd_regs_res[i]) >= rman_get_end(r)) return (sc->vmd_regs_res[i]); } return (NULL); } static int vmd_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); struct resource_map_request args; struct resource *pres; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); pres = vmd_find_parent_resource(sc, r); if (pres == NULL) return (ENOENT); args.offset = start - rman_get_start(pres); args.length = length; return (bus_generic_map_resource(dev, child, pres, &args, map)); } static int vmd_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { struct vmd_softc *sc = device_get_softc(dev); r = vmd_find_parent_resource(sc, r); if (r == NULL) return (ENOENT); return (bus_generic_unmap_resource(dev, child, r, map)); } static int vmd_route_interrupt(device_t dev, device_t child, int pin) { /* VMD hardware does not support legacy interrupts. */ return (PCI_INVALID_IRQ); } static int vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSI(device_get_parent(device_get_parent(dev)), child, count, maxcount, irqs)); } if (count > vmd_max_msi) return (ENOSPC); LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) return (EBUSY); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers += count; for (i = 0; i < count; i++) irqs[i] = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msi(device_t dev, device_t child, int count, int *irqs) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)), child, count, irqs)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) { sc->vmd_irq[u->viu_vector].vi_nusers -= count; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_alloc_msix(device_t dev, device_t child, int *irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; int i, ibest = 0, best = INT_MAX; if (sc->vmd_msix_count == 0) { return (PCIB_ALLOC_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } i = 0; LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child) i++; } if (i >= vmd_max_msix) return (ENOSPC); for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (best > sc->vmd_irq[i].vi_nusers) { best = sc->vmd_irq[i].vi_nusers; ibest = i; } } u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO); u->viu_child = child; u->viu_vector = ibest; LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link); sc->vmd_irq[ibest].vi_nusers++; *irq = sc->vmd_irq[ibest].vi_irq; return (0); } static int vmd_release_msix(device_t dev, device_t child, int irq) { struct vmd_softc *sc = device_get_softc(dev); struct vmd_irq_user *u; if (sc->vmd_msix_count == 0) { return (PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)), child, irq)); } LIST_FOREACH(u, &sc->vmd_users, viu_link) { if (u->viu_child == child && sc->vmd_irq[u->viu_vector].vi_irq == irq) { sc->vmd_irq[u->viu_vector].vi_nusers--; LIST_REMOVE(u, viu_link); free(u, M_DEVBUF); return (0); } } return (EINVAL); } static int vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct vmd_softc *sc = device_get_softc(dev); int i; if (sc->vmd_msix_count == 0) { return (PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)), child, irq, addr, data)); } for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) { if (sc->vmd_irq[i].vi_irq == irq) break; } if (i >= sc->vmd_msix_count) return (EINVAL); *addr = MSI_INTEL_ADDR_BASE | (i << 12); *data = 0; return (0); } static device_method_t vmd_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vmd_probe), DEVMETHOD(device_attach, vmd_attach), DEVMETHOD(device_detach, vmd_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag), DEVMETHOD(bus_get_rman, vmd_get_rman), DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, vmd_alloc_resource), DEVMETHOD(bus_adjust_resource, vmd_adjust_resource), DEVMETHOD(bus_release_resource, vmd_release_resource), DEVMETHOD(bus_activate_resource, vmd_activate_resource), DEVMETHOD(bus_deactivate_resource, vmd_deactivate_resource), DEVMETHOD(bus_map_resource, vmd_map_resource), DEVMETHOD(bus_unmap_resource, vmd_unmap_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, vmd_read_config), DEVMETHOD(pcib_write_config, vmd_write_config), DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt), DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi), DEVMETHOD(pcib_release_msi, vmd_release_msi), DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix), DEVMETHOD(pcib_release_msix, vmd_release_msix), DEVMETHOD(pcib_map_msi, vmd_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc)); DRIVER_MODULE(vmd, pci, vmd_pci_driver, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd, vmd_devs, nitems(vmd_devs) - 1); diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c index b6f99a0922e5..22c473108e2f 100644 --- a/sys/isa/isa_common.c +++ b/sys/isa/isa_common.c @@ -1,1136 +1,1134 @@ /*- * SPDX-License-Identifier: BSD-2-Clause AND MIT * * Copyright (c) 1999 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Modifications for Intel architecture by Garrett A. Wollman. * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Parts of the ISA bus implementation common to all architectures. */ #include #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int isa_print_child(device_t bus, device_t dev); static MALLOC_DEFINE(M_ISADEV, "isadev", "ISA device"); static int isa_running; /* * At 'probe' time, we add all the devices which we know about to the * bus. The generic attach routine will probe and attach them if they * are alive. */ static int isa_probe(device_t dev) { device_set_desc(dev, "ISA bus"); isa_init(dev); /* Allow machdep code to initialise */ return (0); } extern device_t isa_bus_device; static int isa_attach(device_t dev) { /* * Arrange for isa_probe_children(dev) to be called later. XXX */ isa_bus_device = dev; return (0); } /* * Find a working set of memory regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_memory(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NMEM]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NMEM; i++) { bus_delete_resource(child, SYS_RES_MEMORY, i); res[i] = NULL; } success = 1; result->ic_nmem = config->ic_nmem; for (i = 0; i < config->ic_nmem; i++) { uint32_t start, end, size, align; size = config->ic_mem[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_mem[i].ir_start = 0; result->ic_mem[i].ir_end = 0; result->ic_mem[i].ir_size = 0; result->ic_mem[i].ir_align = 0; continue; } for (start = config->ic_mem[i].ir_start, end = config->ic_mem[i].ir_end, align = config->ic_mem[i].ir_align; start + size - 1 <= end && start + size > start; start += MAX(align, 1)) { bus_set_resource(child, SYS_RES_MEMORY, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_MEMORY, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_mem[i].ir_start = start; result->ic_mem[i].ir_end = start + size - 1; result->ic_mem[i].ir_size = size; result->ic_mem[i].ir_align = align; break; } } /* * If we didn't find a place for memory range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NMEM; i++) { if (res[i]) bus_release_resource(child, SYS_RES_MEMORY, i, res[i]); } return (success); } /* * Find a working set of port regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_port(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NPORT]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NPORT; i++) { bus_delete_resource(child, SYS_RES_IOPORT, i); res[i] = NULL; } success = 1; result->ic_nport = config->ic_nport; for (i = 0; i < config->ic_nport; i++) { uint32_t start, end, size, align; size = config->ic_port[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_port[i].ir_start = 0; result->ic_port[i].ir_end = 0; result->ic_port[i].ir_size = 0; result->ic_port[i].ir_align = 0; continue; } for (start = config->ic_port[i].ir_start, end = config->ic_port[i].ir_end, align = config->ic_port[i].ir_align; start + size - 1 <= end; start += align) { bus_set_resource(child, SYS_RES_IOPORT, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_IOPORT, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_port[i].ir_start = start; result->ic_port[i].ir_end = start + size - 1; result->ic_port[i].ir_size = size; result->ic_port[i].ir_align = align; break; } } /* * If we didn't find a place for port range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NPORT; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IOPORT, i, res[i]); } return success; } /* * Return the index of the first bit in the mask (or -1 if mask is empty. */ static int find_first_bit(uint32_t mask) { return (ffs(mask) - 1); } /* * Return the index of the next bit in the mask, or -1 if there are no more. */ static int find_next_bit(uint32_t mask, int bit) { return (find_first_bit(mask & (-2 << bit))); } /* * Find a working set of irqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * irqs was found. */ static int isa_find_irq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NIRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NIRQ; i++) { bus_delete_resource(child, SYS_RES_IRQ, i); res[i] = NULL; } success = 1; result->ic_nirq = config->ic_nirq; for (i = 0; i < config->ic_nirq; i++) { uint32_t mask = config->ic_irqmask[i]; int irq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_irqmask[i] = 0; continue; } for (irq = find_first_bit(mask); irq != -1; irq = find_next_bit(mask, irq)) { bus_set_resource(child, SYS_RES_IRQ, i, irq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_IRQ, &i, 0 /* !RF_ACTIVE */ ); if (res[i]) { result->ic_irqmask[i] = (1 << irq); break; } } /* * If we didn't find a place for irq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NIRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IRQ, i, res[i]); } return (success); } /* * Find a working set of drqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * drqs was found. */ static int isa_find_drq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NDRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NDRQ; i++) { bus_delete_resource(child, SYS_RES_DRQ, i); res[i] = NULL; } success = 1; result->ic_ndrq = config->ic_ndrq; for (i = 0; i < config->ic_ndrq; i++) { uint32_t mask = config->ic_drqmask[i]; int drq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_drqmask[i] = 0; continue; } for (drq = find_first_bit(mask); drq != -1; drq = find_next_bit(mask, drq)) { bus_set_resource(child, SYS_RES_DRQ, i, drq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_DRQ, &i, 0 /* !RF_ACTIVE */); if (res[i]) { result->ic_drqmask[i] = (1 << drq); break; } } /* * If we didn't find a place for drq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NDRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_DRQ, i, res[i]); } return (success); } /* * Attempt to find a working set of resources for a device. Return * non-zero if a working configuration is found. */ static int isa_assign_resources(device_t child) { struct isa_device *idev = DEVTOISA(child); struct isa_config_entry *ice; struct isa_config *cfg; const char *reason; reason = "Empty ISA id_configs"; cfg = malloc(sizeof(struct isa_config), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) return(0); TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { reason = "memory"; if (!isa_find_memory(child, &ice->ice_config, cfg)) continue; reason = "port"; if (!isa_find_port(child, &ice->ice_config, cfg)) continue; reason = "irq"; if (!isa_find_irq(child, &ice->ice_config, cfg)) continue; reason = "drq"; if (!isa_find_drq(child, &ice->ice_config, cfg)) continue; /* * A working configuration was found enable the device * with this configuration. */ reason = "no callback"; if (idev->id_config_cb) { idev->id_config_cb(idev->id_config_arg, cfg, 1); free(cfg, M_TEMP); return (1); } } /* * Disable the device. */ bus_print_child_header(device_get_parent(child), child); printf(" can't assign resources (%s)\n", reason); if (bootverbose) isa_print_child(device_get_parent(child), child); bzero(cfg, sizeof (*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); device_disable(child); free(cfg, M_TEMP); return (0); } /* * Claim any unallocated resources to keep other devices from using * them. */ static void isa_claim_resources(device_t dev, device_t child) { struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; int rid; STAILQ_FOREACH(rle, rl, link) { if (!rle->res) { rid = rle->rid; resource_list_alloc(rl, dev, child, rle->type, &rid, 0, ~0, 1, 0); } } } /* * Called after other devices have initialised to probe for isa devices. */ void isa_probe_children(device_t dev) { struct isa_device *idev; device_t *children, child; struct isa_config *cfg; int nchildren, i, err; /* * Create all the non-hinted children by calling drivers' * identify methods. */ bus_generic_probe(dev); if (device_get_children(dev, &children, &nchildren)) return; /* * First disable all pnp devices so that they don't get * matched by legacy probes. */ if (bootverbose) printf("isa_probe_children: disabling PnP devices\n"); cfg = malloc(sizeof(*cfg), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) { free(children, M_TEMP); return; } for (i = 0; i < nchildren; i++) { idev = DEVTOISA(children[i]); bzero(cfg, sizeof(*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); } free(cfg, M_TEMP); /* * Next, probe all the PnP BIOS devices so they can subsume any * hints. */ for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (idev->id_order > ISA_ORDER_PNPBIOS) continue; if (!TAILQ_EMPTY(&idev->id_configs) && !isa_assign_resources(child)) continue; if (device_probe_and_attach(child) == 0) isa_claim_resources(dev, child); } free(children, M_TEMP); /* * Next, enumerate hinted devices and probe all non-pnp devices so * that they claim their resources first. */ bus_enumerate_hinted_children(dev); if (device_get_children(dev, &children, &nchildren)) return; if (bootverbose) printf("isa_probe_children: probing non-PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || !TAILQ_EMPTY(&idev->id_configs)) continue; err = device_probe_and_attach(child); if (err == 0 && idev->id_vendorid == 0 && strcmp(kern_ident, "GENERIC") == 0 && device_is_attached(child)) device_printf(child, "non-PNP ISA device will be removed from GENERIC in FreeBSD 15.\n"); } /* * Finally assign resource to pnp devices and probe them. */ if (bootverbose) printf("isa_probe_children: probing PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || TAILQ_EMPTY(&idev->id_configs)) continue; if (isa_assign_resources(child)) { device_probe_and_attach(child); isa_claim_resources(dev, child); } } free(children, M_TEMP); isa_running = 1; } /* * Add a new child with default ivars. */ static device_t isa_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct isa_device *idev; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); idev = malloc(sizeof(struct isa_device), M_ISADEV, M_NOWAIT | M_ZERO); if (!idev) return (0); resource_list_init(&idev->id_resources); TAILQ_INIT(&idev->id_configs); idev->id_order = order; device_set_ivars(child, idev); return (child); } static int isa_print_all_resources(device_t dev) { struct isa_device *idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; int retval = 0; if (STAILQ_FIRST(rl) || device_get_flags(dev)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); if (idev->id_vendorid) retval += printf(" pnpid %s", pnp_eisaformat(idev->id_vendorid)); return (retval); } static int isa_print_child(device_t bus, device_t dev) { int retval = 0; retval += bus_print_child_header(bus, dev); retval += isa_print_all_resources(dev); retval += bus_print_child_footer(bus, dev); return (retval); } static void isa_probe_nomatch(device_t dev, device_t child) { if (bootverbose) { bus_print_child_header(dev, child); printf(" failed to probe"); isa_print_all_resources(child); bus_print_child_footer(dev, child); } return; } static int isa_read_ivar(device_t bus, device_t dev, int index, uintptr_t * result) { struct isa_device* idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; switch (index) { case ISA_IVAR_PORT_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORT_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORTSIZE_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_PORTSIZE_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MADDR_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MADDR_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MEMSIZE_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MEMSIZE_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_IRQ_0: rle = resource_list_find(rl, SYS_RES_IRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_IRQ_1: rle = resource_list_find(rl, SYS_RES_IRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_0: rle = resource_list_find(rl, SYS_RES_DRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_1: rle = resource_list_find(rl, SYS_RES_DRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_VENDORID: *result = idev->id_vendorid; break; case ISA_IVAR_SERIAL: *result = idev->id_serial; break; case ISA_IVAR_LOGICALID: *result = idev->id_logicalid; break; case ISA_IVAR_COMPATID: *result = idev->id_compatid; break; case ISA_IVAR_CONFIGATTR: *result = idev->id_config_attr; break; case ISA_IVAR_PNP_CSN: *result = idev->id_pnp_csn; break; case ISA_IVAR_PNP_LDN: *result = idev->id_pnp_ldn; break; case ISA_IVAR_PNPBIOS_HANDLE: *result = idev->id_pnpbios_handle; break; default: return (ENOENT); } return (0); } static int isa_write_ivar(device_t bus, device_t dev, int index, uintptr_t value) { struct isa_device* idev = DEVTOISA(dev); switch (index) { case ISA_IVAR_PORT_0: case ISA_IVAR_PORT_1: case ISA_IVAR_PORTSIZE_0: case ISA_IVAR_PORTSIZE_1: case ISA_IVAR_MADDR_0: case ISA_IVAR_MADDR_1: case ISA_IVAR_MEMSIZE_0: case ISA_IVAR_MEMSIZE_1: case ISA_IVAR_IRQ_0: case ISA_IVAR_IRQ_1: case ISA_IVAR_DRQ_0: case ISA_IVAR_DRQ_1: return (EINVAL); case ISA_IVAR_VENDORID: idev->id_vendorid = value; break; case ISA_IVAR_SERIAL: idev->id_serial = value; break; case ISA_IVAR_LOGICALID: idev->id_logicalid = value; break; case ISA_IVAR_COMPATID: idev->id_compatid = value; break; case ISA_IVAR_CONFIGATTR: idev->id_config_attr = value; break; default: return (ENOENT); } return (0); } /* * Free any resources which the driver missed or which we were holding for * it (see isa_probe_children). */ static void isa_child_detached(device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } static void isa_driver_added(device_t dev, driver_t *driver) { device_t *children; int nchildren, i; /* * Don't do anything if drivers are dynamically * added during autoconfiguration (cf. ymf724). * since that would end up calling identify * twice. */ if (!isa_running) return; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { device_t child = children[i]; struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; if (device_get_state(child) != DS_NOTPRESENT) continue; if (!device_is_enabled(child)) continue; /* * Free resources which we were holding on behalf of * the device. */ STAILQ_FOREACH(rle, &idev->id_resources, link) { if (rle->res) resource_list_release(rl, dev, child, - rle->type, - rle->rid, rle->res); } if (TAILQ_FIRST(&idev->id_configs)) if (!isa_assign_resources(child)) continue; device_probe_and_attach(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } free(children, M_TEMP); } static int isa_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= ISA_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= ISA_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= ISA_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= ISA_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); return (0); } static struct resource_list * isa_get_resource_list (device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (!rl) return (NULL); return (rl); } static int isa_add_config(device_t dev, device_t child, int priority, struct isa_config *config) { struct isa_device* idev = DEVTOISA(child); struct isa_config_entry *newice, *ice; newice = malloc(sizeof *ice, M_DEVBUF, M_NOWAIT); if (!newice) return (ENOMEM); newice->ice_priority = priority; newice->ice_config = *config; TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { if (ice->ice_priority > priority) break; } if (ice) TAILQ_INSERT_BEFORE(ice, newice, ice_link); else TAILQ_INSERT_TAIL(&idev->id_configs, newice, ice_link); return (0); } static void isa_set_config_callback(device_t dev, device_t child, isa_config_cb *fn, void *arg) { struct isa_device* idev = DEVTOISA(child); idev->id_config_cb = fn; idev->id_config_arg = arg; } static int isa_pnp_probe(device_t dev, device_t child, struct isa_pnp_id *ids) { struct isa_device* idev = DEVTOISA(child); if (!idev->id_vendorid) return (ENOENT); while (ids && ids->ip_id) { /* * Really ought to support >1 compat id per device. */ if (idev->id_logicalid == ids->ip_id || idev->id_compatid == ids->ip_id) { if (ids->ip_desc) device_set_desc(child, ids->ip_desc); return (0); } ids++; } return (ENXIO); } static int isa_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) sbuf_printf(sb, "pnpid=%s", pnp_eisaformat(idev->id_vendorid)); return (0); } static int isa_child_location(device_t bus, device_t child, struct sbuf *sb) { #if 0 /* id_pnphandle isn't there yet */ struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) sbuf_printf(sbuf, "pnphandle=%d", idev->id_pnphandle); #endif return (0); } static device_method_t isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isa_probe), DEVMETHOD(device_attach, isa_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, isa_add_child), DEVMETHOD(bus_print_child, isa_print_child), DEVMETHOD(bus_probe_nomatch, isa_probe_nomatch), DEVMETHOD(bus_read_ivar, isa_read_ivar), DEVMETHOD(bus_write_ivar, isa_write_ivar), DEVMETHOD(bus_child_detached, isa_child_detached), DEVMETHOD(bus_driver_added, isa_driver_added), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_resource_list,isa_get_resource_list), DEVMETHOD(bus_alloc_resource, isa_alloc_resource), DEVMETHOD(bus_release_resource, isa_release_resource), DEVMETHOD(bus_set_resource, isa_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_child_pnpinfo, isa_child_pnpinfo), DEVMETHOD(bus_child_location, isa_child_location), DEVMETHOD(bus_hinted_child, isa_hinted_child), DEVMETHOD(bus_hint_device_unit, isa_hint_device_unit), /* ISA interface */ DEVMETHOD(isa_add_config, isa_add_config), DEVMETHOD(isa_set_config_callback, isa_set_config_callback), DEVMETHOD(isa_pnp_probe, isa_pnp_probe), { 0, 0 } }; DEFINE_CLASS_0(isa, isa_driver, isa_methods, 0); /* * ISA can be attached to a PCI-ISA bridge, or other locations on some * platforms. */ DRIVER_MODULE(isa, isab, isa_driver, 0, 0); DRIVER_MODULE(isa, eisab, isa_driver, 0, 0); MODULE_VERSION(isa, 1); /* * Code common to ISA bridges. */ int isab_attach(device_t dev) { device_t child; child = device_add_child(dev, "isa", 0); if (child != NULL) return (bus_generic_attach(dev)); return (ENXIO); } char * pnp_eisaformat(uint32_t id) { uint8_t *data; static char idbuf[8]; const char hextoascii[] = "0123456789abcdef"; id = htole32(id); data = (uint8_t *)&id; idbuf[0] = '@' + ((data[0] & 0x7c) >> 2); idbuf[1] = '@' + (((data[0] & 0x3) << 3) + ((data[1] & 0xe0) >> 5)); idbuf[2] = '@' + (data[1] & 0x1f); idbuf[3] = hextoascii[(data[2] >> 4)]; idbuf[4] = hextoascii[(data[2] & 0xf)]; idbuf[5] = hextoascii[(data[3] >> 4)]; idbuf[6] = hextoascii[(data[3] & 0xf)]; idbuf[7] = 0; return(idbuf); } diff --git a/sys/isa/isa_common.h b/sys/isa/isa_common.h index 9c235c68347e..767b132965dc 100644 --- a/sys/isa/isa_common.h +++ b/sys/isa/isa_common.h @@ -1,77 +1,77 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Parts of the ISA bus implementation common to all architectures. * * Drivers must not depend on information in this file as it can change * without notice. */ /* * PNP configurations are kept in a tailq. */ TAILQ_HEAD(isa_config_list, isa_config_entry); struct isa_config_entry { TAILQ_ENTRY(isa_config_entry) ice_link; int ice_priority; struct isa_config ice_config; }; /* * The structure used to attach devices to the isa bus. */ struct isa_device { struct resource_list id_resources; uint32_t id_vendorid; /* pnp vendor id */ uint32_t id_serial; /* pnp serial */ uint32_t id_logicalid; /* pnp logical device id */ uint32_t id_compatid; /* pnp compat device id */ struct isa_config_list id_configs; /* pnp config alternatives */ isa_config_cb *id_config_cb; /* callback function */ void *id_config_arg; /* callback argument */ int id_config_attr; /* pnp config attributes */ int id_pnpbios_handle; /* pnp handle, if any */ int id_pnp_csn; /* pnp Card Number */ int id_pnp_ldn; /* pnp Logical device on card */ int id_order; }; #define DEVTOISA(dev) ((struct isa_device *) device_get_ivars(dev)) /* * These functions are architecture dependent. */ extern void isa_init(device_t dev); extern struct resource *isa_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); extern int isa_release_resource(device_t bus, device_t child, - int type, int rid, struct resource *r); + struct resource *r); extern driver_t isa_driver; diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index 26745be6eb34..e42dc6d31441 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -1,971 +1,967 @@ #- # Copyright (c) 1998-2004 Doug Rabson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # #include #include #include /** * @defgroup BUS bus - KObj methods for drivers of devices with children * @brief A set of methods required device drivers that support * child devices. * @{ */ INTERFACE bus; # # Default implementations of some methods. # CODE { static struct resource * null_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (0); } static int null_remap_intr(device_t bus, device_t dev, u_int irq) { if (dev != NULL) return (BUS_REMAP_INTR(dev, NULL, irq)); return (ENXIO); } static device_t null_add_child(device_t bus, int order, const char *name, int unit) { panic("bus_add_child is not implemented"); } static int null_reset_post(device_t bus, device_t dev) { return (0); } static int null_reset_prepare(device_t bus, device_t dev) { return (0); } static struct rman * null_get_rman(device_t bus, int type, u_int flags) { return (NULL); } }; /** * @brief Print a description of a child device * * This is called from system code which prints out a description of a * device. It should describe the attachment that the child has with * the parent. For instance the TurboLaser bus prints which node the * device is attached to. See bus_generic_print_child() for more * information. * * @param _dev the device whose child is being printed * @param _child the child device to describe * * @returns the number of characters output. */ METHOD int print_child { device_t _dev; device_t _child; } DEFAULT bus_generic_print_child; /** * @brief Print a notification about an unprobed child device. * * Called for each child device that did not succeed in probing for a * driver. * * @param _dev the device whose child was being probed * @param _child the child device which failed to probe */ METHOD void probe_nomatch { device_t _dev; device_t _child; }; /** * @brief Read the value of a bus-specific attribute of a device * * This method, along with BUS_WRITE_IVAR() manages a bus-specific set * of instance variables of a child device. The intention is that * each different type of bus defines a set of appropriate instance * variables (such as ports and irqs for ISA bus etc.) * * This information could be given to the child device as a struct but * that makes it hard for a bus to add or remove variables without * forcing an edit and recompile for all drivers which may not be * possible for vendor supplied binary drivers. * * This method copies the value of an instance variable to the * location specified by @p *_result. * * @param _dev the device whose child was being examined * @param _child the child device whose instance variable is * being read * @param _index the instance variable to read * @param _result a location to receive the instance variable * value * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev */ METHOD int read_ivar { device_t _dev; device_t _child; int _index; uintptr_t *_result; }; /** * @brief Write the value of a bus-specific attribute of a device * * This method sets the value of an instance variable to @p _value. * * @param _dev the device whose child was being updated * @param _child the child device whose instance variable is * being written * @param _index the instance variable to write * @param _value the value to write to that instance variable * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev * @retval EINVAL the instance variable was recognised but * contains a read-only value */ METHOD int write_ivar { device_t _dev; device_t _child; int _indx; uintptr_t _value; }; /** * @brief Notify a bus that a child was deleted * * Called at the beginning of device_delete_child() to allow the parent * to teardown any bus-specific state for the child. * * @param _dev the device whose child is being deleted * @param _child the child device which is being deleted */ METHOD void child_deleted { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a child was detached * * Called after the child's DEVICE_DETACH() method to allow the parent * to reclaim any resources allocated on behalf of the child. * * @param _dev the device whose child changed state * @param _child the child device which changed state */ METHOD void child_detached { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a new driver was added * * Called when a new driver is added to the devclass which owns this * bus. The generic implementation of this method attempts to probe and * attach any un-matched children of the bus. * * @param _dev the device whose devclass had a new driver * added to it * @param _driver the new driver which was added */ METHOD void driver_added { device_t _dev; driver_t *_driver; } DEFAULT bus_generic_driver_added; /** * @brief Create a new child device * * For buses which use use drivers supporting DEVICE_IDENTIFY() to * enumerate their devices, this method is used to create new * device instances. The new device will be added after the last * existing child with the same order. Implementations of bus_add_child * call device_add_child_ordered to add the child and often add * a suitable ivar to the device specific to that bus. * * @param _dev the bus device which will be the parent of the * new child device * @param _order a value which is used to partially sort the * children of @p _dev - devices created using * lower values of @p _order appear first in @p * _dev's list of children * @param _name devclass name for new device or @c NULL if not * specified * @param _unit unit number for new device or @c -1 if not * specified */ METHOD device_t add_child { device_t _dev; u_int _order; const char *_name; int _unit; } DEFAULT null_add_child; /** * @brief Rescan the bus * * This method is called by a parent bridge or devctl to trigger a bus * rescan. The rescan should delete devices no longer present and * enumerate devices that have newly arrived. * * @param _dev the bus device */ METHOD int rescan { device_t _dev; } DEFAULT bus_null_rescan; /** * @brief Allocate a system resource * * This method is called by child devices of a bus to allocate resources. * The types are defined in ; the meaning of the * resource-ID field varies from bus to bus (but @p *rid == 0 is always * valid if the resource type is). If a resource was allocated and the * caller did not use the RF_ACTIVE to specify that it should be * activated immediately, the caller is responsible for calling * BUS_ACTIVATE_RESOURCE() when it actually uses the resource. * * @param _dev the parent device of @p _child * @param _child the device which is requesting an allocation * @param _type the type of resource to allocate * @param _rid a pointer to the resource identifier * @param _start hint at the start of the resource range - pass * @c 0 for any start address * @param _end hint at the end of the resource range - pass * @c ~0 for any end address * @param _count hint at the size of range required - pass @c 1 * for any size * @param _flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ METHOD struct resource * alloc_resource { device_t _dev; device_t _child; int _type; int *_rid; rman_res_t _start; rman_res_t _end; rman_res_t _count; u_int _flags; } DEFAULT null_alloc_resource; /** * @brief Activate a resource * * Activate a resource previously allocated with * BUS_ALLOC_RESOURCE(). This may enable decoding of this resource in a * device for instance. It will also establish a mapping for the resource * unless RF_UNMAPPED was set when allocating the resource. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource to activate */ METHOD int activate_resource { device_t _dev; device_t _child; struct resource *_r; }; /** * @brief Map a resource * * Allocate a mapping for a range of an active resource. The mapping * is described by a struct resource_map object. This may for instance * map a memory region into the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource to map * @param _args optional attributes of the mapping * @param _map the mapping */ METHOD int map_resource { device_t _dev; device_t _child; struct resource *_r; struct resource_map_request *_args; struct resource_map *_map; } DEFAULT bus_generic_map_resource; /** * @brief Unmap a resource * * Release a mapping previously allocated with * BUS_MAP_RESOURCE(). This may for instance unmap a memory region * from the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource * @param _map the mapping to release */ METHOD int unmap_resource { device_t _dev; device_t _child; struct resource *_r; struct resource_map *_map; } DEFAULT bus_generic_unmap_resource; /** * @brief Deactivate a resource * * Deactivate a resource previously allocated with * BUS_ALLOC_RESOURCE(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _r the resource to deactivate */ METHOD int deactivate_resource { device_t _dev; device_t _child; struct resource *_r; }; /** * @brief Adjust a resource * * Adjust the start and/or end of a resource allocated by * BUS_ALLOC_RESOURCE. At least part of the new address range must overlap * with the existing address range. If the successful, the resource's range * will be adjusted to [start, end] on return. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _res the resource to adjust * @param _start the new starting address of the resource range * @param _end the new ending address of the resource range */ METHOD int adjust_resource { device_t _dev; device_t _child; struct resource *_res; rman_res_t _start; rman_res_t _end; }; /** * @brief translate a resource value * * Give a bus driver the opportunity to translate resource ranges. If * successful, the host's view of the resource starting at @p _start is * returned in @p _newstart, otherwise an error is returned. * * @param _dev the device associated with the resource * @param _type the type of resource * @param _start the starting address of the resource range * @param _newstart the new starting address of the resource range */ METHOD int translate_resource { device_t _dev; int _type; rman_res_t _start; rman_res_t *_newstart; } DEFAULT bus_generic_translate_resource; /** * @brief Release a resource * * Free a resource allocated by the BUS_ALLOC_RESOURCE. The @p _rid * value must be the same as the one returned by BUS_ALLOC_RESOURCE() * (which is not necessarily the same as the one the client passed). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource - * @param _type the type of resource - * @param _rid the resource identifier * @param _r the resource to release */ METHOD int release_resource { device_t _dev; device_t _child; - int _type; - int _rid; struct resource *_res; }; /** * @brief Install an interrupt handler * * This method is used to associate an interrupt handler function with * an irq resource. When the interrupt triggers, the function @p _intr * will be called with the value of @p _arg as its single * argument. The value returned in @p *_cookiep is used to cancel the * interrupt handler - the caller should save this value to use in a * future call to BUS_TEARDOWN_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _flags a set of bits from enum intr_type specifying * the class of interrupt * @param _intr the function to call when the interrupt * triggers * @param _arg a value to use as the single argument in calls * to @p _intr * @param _cookiep a pointer to a location to receive a cookie * value that may be used to remove the interrupt * handler */ METHOD int setup_intr { device_t _dev; device_t _child; struct resource *_irq; int _flags; driver_filter_t *_filter; driver_intr_t *_intr; void *_arg; void **_cookiep; }; /** * @brief Uninstall an interrupt handler * * This method is used to disassociate an interrupt handler function * with an irq resource. The value of @p _cookie must be the value * returned from a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered */ METHOD int teardown_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; }; /** * @brief Suspend an interrupt handler * * This method is used to mark a handler as suspended in the case * that the associated device is powered down and cannot be a source * for the, typically shared, interrupt. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int suspend_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_suspend_intr; /** * @brief Resume an interrupt handler * * This method is used to clear suspended state of a handler when * the associated device is powered up and can be an interrupt source * again. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int resume_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_resume_intr; /** * @brief Define a resource which can be allocated with * BUS_ALLOC_RESOURCE(). * * This method is used by some buses (typically ISA) to allow a * driver to describe a resource range that it would like to * allocate. The resource defined by @p _type and @p _rid is defined * to start at @p _start and to include @p _count indices in its * range. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the start of the resource range * @param _count the size of the resource range */ METHOD int set_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t _start; rman_res_t _count; }; /** * @brief Describe a resource * * This method allows a driver to examine the range used for a given * resource without actually allocating it. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the address of a location to receive the start * index of the resource range * @param _count the address of a location to receive the size * of the resource range */ METHOD int get_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t *_startp; rman_res_t *_countp; }; /** * @brief Delete a resource. * * Use this to delete a resource (possibly one previously added with * BUS_SET_RESOURCE()). * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier */ METHOD void delete_resource { device_t _dev; device_t _child; int _type; int _rid; }; /** * @brief Return a struct resource_list. * * Used by drivers which use bus_generic_rl_alloc_resource() etc. to * implement their resource handling. It should return the resource * list of the given child device. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource list */ METHOD struct resource_list * get_resource_list { device_t _dev; device_t _child; } DEFAULT bus_generic_get_resource_list; /** * @brief Return a struct rman. * * Used by drivers which use bus_generic_rman_alloc_resource() etc. to * implement their resource handling. It should return the resource * manager used for the given resource type. * * @param _dev the bus device * @param _type the resource type * @param _flags resource flags (@c RF_XXX flags in * ) */ METHOD struct rman * get_rman { device_t _dev; int _type; u_int _flags; } DEFAULT null_get_rman; /** * @brief Is the hardware described by @p _child still attached to the * system? * * This method should return 0 if the device is not present. It * should return -1 if it is present. Any errors in determining * should be returned as a normal errno value. Client drivers are to * assume that the device is present, even if there is an error * determining if it is there. Buses are to try to avoid returning * errors, but newcard will return an error if the device fails to * implement this method. * * @param _dev the parent device of @p _child * @param _child the device which is being examined */ METHOD int child_present { device_t _dev; device_t _child; } DEFAULT bus_generic_child_present; /** * @brief Returns the pnp info for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_pnpinfo { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_pnpinfo; /** * @brief Returns the location for this device. * * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined * @param _sb sbuf for results string */ METHOD int child_location { device_t _dev; device_t _child; struct sbuf *_sb; } DEFAULT bus_generic_child_location; /** * @brief Allow drivers to request that an interrupt be bound to a specific * CPU. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cpu the CPU to bind the interrupt to */ METHOD int bind_intr { device_t _dev; device_t _child; struct resource *_irq; int _cpu; } DEFAULT bus_generic_bind_intr; /** * @brief Allow (bus) drivers to specify the trigger mode and polarity * of the specified interrupt. * * @param _dev the bus device * @param _irq the interrupt number to modify * @param _trig the trigger mode required * @param _pol the interrupt polarity required */ METHOD int config_intr { device_t _dev; int _irq; enum intr_trigger _trig; enum intr_polarity _pol; } DEFAULT bus_generic_config_intr; /** * @brief Allow drivers to associate a description with an active * interrupt handler. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered * @param _descr the description to associate with the interrupt */ METHOD int describe_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; const char *_descr; } DEFAULT bus_generic_describe_intr; /** * @brief Notify a (bus) driver about a child that the hints mechanism * believes it has discovered. * * The bus is responsible for then adding the child in the right order * and discovering other things about the child. The bus driver is * free to ignore this hint, to do special things, etc. It is all up * to the bus driver to interpret. * * This method is only called in response to the parent bus asking for * hinted devices to be enumerated. * * @param _dev the bus device * @param _dname the name of the device w/o unit numbers * @param _dunit the unit number of the device */ METHOD void hinted_child { device_t _dev; const char *_dname; int _dunit; }; /** * @brief Returns bus_dma_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_dma_tag_t get_dma_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_dma_tag; /** * @brief Returns bus_space_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_space_tag_t get_bus_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_bus_tag; /** * @brief Allow the bus to determine the unit number of a device. * * @param _dev the parent device of @p _child * @param _child the device whose unit is to be wired * @param _name the name of the device's new devclass * @param _unitp a pointer to the device's new unit value */ METHOD void hint_device_unit { device_t _dev; device_t _child; const char *_name; int *_unitp; }; /** * @brief Notify a bus that the bus pass level has been changed * * @param _dev the bus device */ METHOD void new_pass { device_t _dev; } DEFAULT bus_generic_new_pass; /** * @brief Notify a bus that specified child's IRQ should be remapped. * * @param _dev the bus device * @param _child the child device * @param _irq the irq number */ METHOD int remap_intr { device_t _dev; device_t _child; u_int _irq; } DEFAULT null_remap_intr; /** * @brief Suspend a given child * * @param _dev the parent device of @p _child * @param _child the device to suspend */ METHOD int suspend_child { device_t _dev; device_t _child; } DEFAULT bus_generic_suspend_child; /** * @brief Resume a given child * * @param _dev the parent device of @p _child * @param _child the device to resume */ METHOD int resume_child { device_t _dev; device_t _child; } DEFAULT bus_generic_resume_child; /** * @brief Get the VM domain handle for the given bus and child. * * @param _dev the bus device * @param _child the child device * @param _domain a pointer to the bus's domain handle identifier */ METHOD int get_domain { device_t _dev; device_t _child; int *_domain; } DEFAULT bus_generic_get_domain; /** * @brief Request a set of CPUs * * @param _dev the bus device * @param _child the child device * @param _op type of CPUs to request * @param _setsize the size of the set passed in _cpuset * @param _cpuset a pointer to a cpuset to receive the requested * set of CPUs */ METHOD int get_cpus { device_t _dev; device_t _child; enum cpu_sets _op; size_t _setsize; struct _cpuset *_cpuset; } DEFAULT bus_generic_get_cpus; /** * @brief Prepares the given child of the bus for reset * * Typically bus detaches or suspends children' drivers, and then * calls this method to save bus-specific information, for instance, * PCI config space, which is damaged by reset. * * The bus_helper_reset_prepare() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_prepare { device_t _dev; device_t _child; } DEFAULT null_reset_prepare; /** * @brief Restores the child operations after the reset * * The bus_helper_reset_post() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_post { device_t _dev; device_t _child; } DEFAULT null_reset_post; /** * @brief Performs reset of the child * * @param _dev the bus device * @param _child the child device * @param _flags DEVF_RESET_ flags */ METHOD int reset_child { device_t _dev; device_t _child; int _flags; }; /** * @brief Gets child's specific property * * The bus_get_property can be used to access device * specific properties stored on the bus. If _propvalue * is NULL or _size is 0, then method only returns size * of the property. * * @param _dev the bus device * @param _child the child device * @param _propname property name * @param _propvalue property value destination * @param _size property value size * * @returns size of property if successful otherwise -1 */ METHOD ssize_t get_property { device_t _dev; device_t _child; const char *_propname; void *_propvalue; size_t _size; device_property_type_t type; } DEFAULT bus_generic_get_property; /** * @brief Gets a child's full path to the device * * The get_device_path method retrieves a device's * full path to the device using one of several * locators present in the system. * * @param _bus the bus device * @param _child the child device * @param _locator locator name * @param _sb buffer loaction string */ METHOD int get_device_path { device_t _bus; device_t _child; const char *_locator; struct sbuf *_sb; } DEFAULT bus_generic_get_device_path; diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 450fc2742dad..9df58cb987f6 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6106 +1,6097 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_bus.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); static bool disable_failed_devs = false; SYSCTL_BOOL(_hw_bus, OID_AUTO, disable_failed_devices, CTLFLAG_RWTUN, &disable_failed_devs, 0, "Do not retry attaching devices that return an error from DEVICE_ATTACH the first time"); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ u_int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(device_nomatch); EVENTHANDLER_LIST_DEFINE(dev_lookup); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); bus_topo_lock(); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: bus_child_location(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: bus_child_pnpinfo(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; default: error = EINVAL; goto out; } error = sbuf_finish(&sb); out: bus_topo_unlock(); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); void bus_topo_assert(void) { GIANT_REQUIRED; } struct mtx * bus_topo_mtx(void) { return (&Giant); } void bus_topo_lock(void) { mtx_lock(bus_topo_mtx()); } void bus_topo_unlock(void) { mtx_unlock(bus_topo_mtx()); } /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } static void device_handle_nomatch(device_t dev) { BUS_PROBE_NOMATCH(dev->parent, dev); EVENTHANDLER_DIRECT_INVOKE(device_nomatch, dev); dev->flags |= DF_DONENOMATCH; } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; devclass_t child_dc; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; child_dc = devclass_find_internal(driver->name, parentname, TRUE); if (dcp != NULL) *dcp = child_dc; dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { device_handle_nomatch(dev); } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MAX(1, MINALLOCSIZE / sizeof(device_t))); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* detach parent before deleting children, if any */ if ((error = device_detach(child)) != 0) return (error); /* remove children second */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; /* We should preserve the devclass (or lack of) set by the bus. */ int hasclass = (child->devclass != NULL); bus_topo_assert(); dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. */ if (child->state == DS_ALIVE) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } if (best == NULL) return (ENXIO); /* * If we found a driver, change state and initialise the devclass. */ if (pri < 0) { /* Set the winning driver, devclass, and flags. */ result = device_set_driver(child, best->driver); if (result != 0) return (result); if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) { (void)device_set_driver(child, NULL); return (result); } } resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); /* * A bit bogus. Call the probe method again to make sure * that we have the right description. */ result = DEVICE_PROBE(child); if (result > 0) { if (!hasclass) (void)device_set_devclass(child, NULL); (void)device_set_driver(child, NULL); return (result); } } child->state = DS_ALIVE; bus_data_generation_update(); return (0); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling log() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_log(device_t dev, int pri, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); log(pri, "%.*s", (int) sbuf_len(&sb), sbuf_data(&sb)); retval = sbuf_len(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char *desc, bool allocated) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (allocated && desc) dev->flags |= DF_DESCMALLOCED; dev->desc = __DECONST(char *, desc); bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char *desc) { device_set_desc_internal(dev, desc, false); } /** * @brief Set the device's description * * A printf-like version of device_set_desc(). */ void device_set_descf(device_t dev, const char *fmt, ...) { va_list ap; char *buf = NULL; va_start(ap, fmt); vasprintf(&buf, M_BUS, fmt, ap); va_end(ap); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char *desc) { char *buf; buf = strdup_flags(desc, M_BUS, M_NOWAIT); device_set_desc_internal(dev, buf, true); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { /* * Mark the device as busy, recursively up the tree if this busy count * goes 0->1. */ if (refcount_acquire(&dev->busy) == 0 && dev->parent != NULL) device_busy(dev->parent); } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { /* * Mark the device as unbsy, recursively if this is the last busy count. */ if (refcount_release(&dev->busy) && dev->parent != NULL) device_unbusy(dev->parent); } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type) { device_t bus = device_get_parent(dev); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_HANDLE: /* Size checks done in implementation. */ break; case DEVICE_PROP_UINT32: if (sz % 4 != 0) return (-1); break; case DEVICE_PROP_UINT64: if (sz % 8 != 0) return (-1); break; default: return (-1); } return (BUS_GET_PROPERTY(bus, dev, prop, val, sz, type)); } bool device_has_property(device_t dev, const char *prop) { return (device_get_property(dev, prop, NULL, 0, DEVICE_PROP_ANY) >= 0); } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; bus_topo_assert(); if (dev->state >= DS_ALIVE) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { device_handle_nomatch(dev); } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; bus_topo_assert(); error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (disable_failed_devs) { /* * When the user has asked to disable failed devices, we * directly disable the device, but leave it in the * attaching state. It will not try to probe/attach the * device further. This leaves the device numbering * intact for other similar devices in the system. It * can be removed from this state with devctl. */ device_disable(dev); } else { /* * Otherwise, when attach fails, tear down the state * around that so we can retry when, for example, new * drivers are loaded. */ if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; } return (error); } dev->flags |= DF_ATTACHED_ONCE; /* * We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; bus_topo_assert(); PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->busy > 0) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; if (unit == dev->unit) return (0); dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ /** * @brief Initialize a resource mapping request * * This is the internal implementation of the public API * resource_init_map_request. Callers may be using a different layout * of struct resource_map_request than the kernel, so callers pass in * the size of the structure they are using to identify the structure * layout. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Validate a resource mapping request * * Translate a device driver's mapping request (@p in) to a struct * resource_map_request using the current structure layout (@p out). * In addition, validate the offset and length from the mapping * request against the bounds of the resource @p r. If the offset or * length are invalid, fail with EINVAL. If the offset and length are * valid, the absolute starting address of the requested mapping is * returned in @p startp and the length of the requested mapping is * returned in @p lengthp. */ int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp) { rman_res_t end, length, start; /* * This assumes that any callers of this function are compiled * into the kernel and use the same version of the structure * as this file. */ MPASS(out->size == sizeof(struct resource_map_request)); if (in != NULL) bcopy(in, out, imin(in->size, out->size)); start = rman_get_start(r) + out->offset; if (out->length == 0) length = rman_get_size(r); else length = out->length; end = start + length - 1; if (start > rman_get_end(r) || start < rman_get_start(r)) return (EINVAL); if (end > rman_get_end(r) || end < start) return (EINVAL); *lengthp = length; *startp = start; return (0); } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release - * @param type the type of resource to release - * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, - int type, int rid, struct resource *res) + struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, - type, rid, res)); + res)); } - rle = resource_list_find(rl, type, rid); + rle = resource_list_find(rl, rman_get_type(res), rman_get_rid(res)); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, - rid, res); + error = bus_deactivate_resource(child, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } - error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, - type, rid, res); + error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; - error = resource_list_release(rl, bus, child, type, - rman_get_rid(rle->res), rle->res); + error = resource_list_release(rl, bus, child, rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; - return (resource_list_release(rl, bus, child, type, rid, rle->res)); + return (resource_list_release(rl, bus, child, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ int bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) { child->flags |= DF_SUSPENDED; } else { printf("DEVICE_SUSPEND(%s) failed: %d\n", device_get_nameunit(child), error); } return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Helper function for implementing BUS_GET_PROPERTY(). * * This simply calls the BUS_GET_PROPERTY of the parent of dev, * until a non-default implementation is found. */ ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { if (device_get_parent(dev) != NULL) return (BUS_GET_PROPERTY(device_get_parent(dev), child, propname, propvalue, size, type)); return (-1); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, r, start, end)); return (EINVAL); } /* * @brief Helper function for implementing BUS_TRANSLATE_RESOURCE(). * * This simple implementation of BUS_TRANSLATE_RESOURCE() simply calls the * BUS_TRANSLATE_RESOURCE() method of the parent of @p dev. If there is no * parent, no translation happens. */ int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent) return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); *newstart = start; return (0); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int -bus_generic_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +bus_generic_release_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) - return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, - r)); + return (BUS_RELEASE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int -bus_generic_rl_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bus_generic_rl_release_resource(device_t dev, device_t child, + struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) - return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, - type, rid, r)); + return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); - return (resource_list_release(rl, dev, child, type, rid, r)); + return (resource_list_release(rl, dev, child, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() allocates a * resource from a resource manager. It uses BUS_GET_RMAN() * to obtain the resource manager. */ struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *r; struct rman *rm; rm = BUS_GET_RMAN(dev, type, flags); if (rm == NULL) return (NULL); r = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (r == NULL) return (NULL); rman_set_rid(r, *rid); rman_set_type(r, type); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, r) != 0) { rman_release_resource(r); return (NULL); } } return (r); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This implementation of BUS_ADJUST_RESOURCE() adjusts resources only * if they were allocated from the resource manager returned by * BUS_GET_RMAN(). */ int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() releases resources * allocated by bus_generic_rman_alloc_resource. */ int -bus_generic_rman_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r) +bus_generic_rman_release_resource(device_t dev, device_t child, + struct resource *r) { #ifdef INVARIANTS struct rman *rm; #endif int error; #ifdef INVARIANTS - rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); + rm = BUS_GET_RMAN(dev, rman_get_type(r), rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif if (rman_get_flags(r) & RF_ACTIVE) { - error = bus_deactivate_resource(child, type, rid, r); + error = bus_deactivate_resource(child, r); if (error != 0) return (error); } return (rman_release_resource(r)); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This implementation of BUS_ACTIVATE_RESOURCE() activates resources * allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_activate_resource(device_t dev, device_t child, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error, type; type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_activate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map); if (error != 0) { rman_deactivate_resource(r); return (error); } rman_set_mapping(r, &map); } return (0); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This implementation of BUS_DEACTIVATE_RESOURCE() deactivates * resources allocated by bus_generic_rman_alloc_resource. */ int bus_generic_rman_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct resource_map map; #ifdef INVARIANTS struct rman *rm; #endif int error, type; type = rman_get_type(r); #ifdef INVARIANTS rm = BUS_GET_RMAN(dev, type, rman_get_flags(r)); KASSERT(rman_is_region_manager(r, rm), ("%s: rman %p doesn't match for resource %p", __func__, rm, r)); #endif error = rman_deactivate_resource(r); if (error != 0) return (error); if ((rman_get_flags(r) & RF_UNMAPPED) == 0 && (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT)) { rman_get_mapping(r, &map); BUS_UNMAP_RESOURCE(dev, child, r, &map); } return (0); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } /** * @brief Helper function for implementing BUS_GET_DOMAIN(). * * This simple implementation of BUS_GET_DOMAIN() calls the * BUS_GET_DOMAIN() method of the parent of @p dev. If @p dev * does not have a parent, the function fails with ENOENT. */ int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function to implement normal BUS_GET_DEVICE_PATH() * * This function knows how to (a) pass the request up the tree if there's * a parent and (b) Knows how to supply a FreeBSD locator. * * @param bus bus in the walk up the tree * @param child leaf node to print information about * @param locator BUS_LOCATOR_xxx string for locator * @param sb Buffer to print information into */ int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { int rv = 0; device_t parent; /* * We don't recurse on ACPI since either we know the handle for the * device or we don't. And if we're in the generic routine, we don't * have a ACPI override. All other locators build up a path by having * their parents create a path and then adding the path element for this * node. That's why we recurse with parent, bus rather than the typical * parent, child: each spot in the tree is independent of what our child * will do with this path. */ parent = device_get_parent(bus); if (parent != NULL && strcmp(locator, BUS_LOCATOR_ACPI) != 0) { rv = BUS_GET_DEVICE_PATH(parent, bus, locator, sb); } if (strcmp(locator, BUS_LOCATOR_FREEBSD) == 0) { if (rv == 0) { sbuf_printf(sb, "/%s", device_get_nameunit(child)); } return (rv); } /* * Don't know what to do. So assume we do nothing. Not sure that's * the right thing, but keeps us from having a big list here. */ return (0); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENODEV); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, r, start, end)); } int bus_adjust_resource_old(device_t dev, int type __unused, struct resource *r, rman_res_t start, rman_res_t end) { return (bus_adjust_resource(dev, r, start, end)); } /** * @brief Wrapper function for BUS_TRANSLATE_RESOURCE(). * * This function simply calls the BUS_TRANSLATE_RESOURCE() method of the * parent of @p dev. */ int bus_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart) { if (dev->parent == NULL) return (EINVAL); return (BUS_TRANSLATE_RESOURCE(dev->parent, type, start, newstart)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, r)); } int bus_activate_resource_old(device_t dev, int type, int rid, struct resource *r) { return (bus_activate_resource(dev, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, r)); } int bus_deactivate_resource_old(device_t dev, int type, int rid, struct resource *r) { return (bus_deactivate_resource(dev, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, r, args, map)); } int bus_map_resource_old(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { return (bus_map_resource(dev, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, r, map)); } int bus_unmap_resource_old(device_t dev, int type, struct resource *r, struct resource_map *map) { return (bus_unmap_resource(dev, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int -bus_release_resource(device_t dev, int type, int rid, struct resource *r) +bus_release_resource(device_t dev, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); - rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); + rv = BUS_RELEASE_RESOURCE(dev->parent, dev, r); return (rv); } int -bus_release_resource_new(device_t dev, struct resource *r) +bus_release_resource_old(device_t dev, int type, int rid, struct resource *r) { - return (bus_release_resource(dev, rman_get_type(r), rman_get_rid(r), - r)); + return (bus_release_resource(dev, r)); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO(). * * This function simply calls the BUS_CHILD_PNPINFO() method of the parent of @p * dev. */ int bus_child_pnpinfo(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_PNPINFO(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_pnpinfo * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_CHILD_LOCATION(). * * This function simply calls the BUS_CHILD_LOCATION() method of the parent of * @p dev. */ int bus_child_location(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (0); return (BUS_CHILD_LOCATION(parent, child, sb)); } /** * @brief Generic implementation that does nothing for bus_child_location * * This function has the right signature and returns 0 since the sbuf is passed * to us to append to. */ int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb) { return (0); } /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devctl2_init(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); if (udev == NULL) return (ENOMEM); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); bus_child_pnpinfo(dev, &sb); sbuf_putc(&sb, '\0'); bus_child_location(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { device_handle_nomatch(dev); } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int device_get_path(device_t dev, const char *locator, struct sbuf *sb) { device_t parent; int error; KASSERT(sb != NULL, ("sb is NULL")); parent = device_get_parent(dev); if (parent == NULL) { error = sbuf_putc(sb, '/'); } else { error = BUS_GET_DEVICE_PATH(parent, dev, locator, sb); if (error == 0) { error = sbuf_error(sb); if (error == 0 && sbuf_len(sb) <= 1) error = EIO; } } sbuf_finish(sb); return (error); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ bus_topo_lock(); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; case DEV_GET_PATH: error = find_device(req, &dev); break; default: error = ENOTTY; break; } if (error) { bus_topo_unlock(); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev)) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; case DEV_GET_PATH: { struct sbuf *sb; char locator[64]; ssize_t len; error = copyinstr(req->dr_buffer.buffer, locator, sizeof(locator), NULL); if (error != 0) break; sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL /* | SBUF_WAITOK */); error = device_get_path(dev, locator, sb); if (error == 0) { len = sbuf_len(sb); if (req->dr_buffer.length < len) { error = ENAMETOOLONG; } else { error = copyout(sbuf_data(sb), req->dr_buffer.buffer, len); } req->dr_buffer.length = len; } sbuf_delete(sb); break; } } bus_topo_unlock(); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0644, "devctl2"); } /* * For maintaining device 'at' location info to avoid recomputing it */ struct device_location_node { const char *dln_locator; const char *dln_path; TAILQ_ENTRY(device_location_node) dln_link; }; typedef TAILQ_HEAD(device_location_list, device_location_node) device_location_list_t; struct device_location_cache { device_location_list_t dlc_list; }; /* * Location cache for wired devices. */ device_location_cache_t * dev_wired_cache_init(void) { device_location_cache_t *dcp; dcp = malloc(sizeof(*dcp), M_BUS, M_WAITOK | M_ZERO); TAILQ_INIT(&dcp->dlc_list); return (dcp); } void dev_wired_cache_fini(device_location_cache_t *dcp) { struct device_location_node *dln, *tdln; TAILQ_FOREACH_SAFE(dln, &dcp->dlc_list, dln_link, tdln) { free(dln, M_BUS); } free(dcp, M_BUS); } static struct device_location_node * dev_wired_cache_lookup(device_location_cache_t *dcp, const char *locator) { struct device_location_node *dln; TAILQ_FOREACH(dln, &dcp->dlc_list, dln_link) { if (strcmp(locator, dln->dln_locator) == 0) return (dln); } return (NULL); } static struct device_location_node * dev_wired_cache_add(device_location_cache_t *dcp, const char *locator, const char *path) { struct device_location_node *dln; size_t loclen, pathlen; loclen = strlen(locator) + 1; pathlen = strlen(path) + 1; dln = malloc(sizeof(*dln) + loclen + pathlen, M_BUS, M_WAITOK | M_ZERO); dln->dln_locator = (char *)(dln + 1); memcpy(__DECONST(char *, dln->dln_locator), locator, loclen); dln->dln_path = dln->dln_locator + loclen; memcpy(__DECONST(char *, dln->dln_path), path, pathlen); TAILQ_INSERT_HEAD(&dcp->dlc_list, dln, dln_link); return (dln); } bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at) { struct sbuf *sb; const char *cp; char locator[32]; int error, len; struct device_location_node *res; cp = strchr(at, ':'); if (cp == NULL) return (false); len = cp - at; if (len > sizeof(locator) - 1) /* Skip too long locator */ return (false); memcpy(locator, at, len); locator[len] = '\0'; cp++; error = 0; /* maybe cache this inside device_t and look that up, but not yet */ res = dev_wired_cache_lookup(dcp, locator); if (res == NULL) { sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND | SBUF_INCLUDENUL | SBUF_NOWAIT); if (sb != NULL) { error = device_get_path(dev, locator, sb); if (error == 0) { res = dev_wired_cache_add(dcp, locator, sbuf_data(sb)); } sbuf_delete(sb); } } if (error != 0 || res == NULL || res->dln_path == NULL) return (false); return (strcmp(res->dln_path, cp) == 0); } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if obsolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/powerpc/mpc85xx/isa.c b/sys/powerpc/mpc85xx/isa.c index 8b092f895982..d730ecb6f3e9 100644 --- a/sys/powerpc/mpc85xx/isa.c +++ b/sys/powerpc/mpc85xx/isa.c @@ -1,83 +1,82 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Marcel Moolenaar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include void isa_init(device_t dev) { } struct resource * isa_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; int isdefault, passthrough, rids; isdefault = RMAN_IS_DEFAULT_RANGE(start, end) ? 1 : 0; passthrough = (device_get_parent(child) != bus) ? 1 : 0; if (!passthrough && !isdefault && resource_list_find(rl, type, *rid) == NULL) { switch (type) { case SYS_RES_IOPORT: rids = ISA_PNP_NPORT; break; case SYS_RES_IRQ: rids = ISA_PNP_NIRQ; break; case SYS_RES_MEMORY: rids = ISA_PNP_NMEM; break; default: rids = 0; break; } if (*rid < 0 || *rid >= rids) return (NULL); resource_list_add(rl, type, *rid, start, end, count); } return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } int -isa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +isa_release_resource(device_t bus, device_t child, struct resource *r) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; - return (resource_list_release(rl, bus, child, type, rid, r)); + return (resource_list_release(rl, bus, child, r)); } diff --git a/sys/powerpc/mpc85xx/lbc.c b/sys/powerpc/mpc85xx/lbc.c index 6884df611494..75614b550a85 100644 --- a/sys/powerpc/mpc85xx/lbc.c +++ b/sys/powerpc/mpc85xx/lbc.c @@ -1,928 +1,922 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2006-2008, Juniper Networks, Inc. * Copyright (c) 2008 Semihalf, Rafal Czubak * Copyright (c) 2009 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #include "lbc.h" #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif static MALLOC_DEFINE(M_LBC, "localbus", "localbus devices information"); static int lbc_probe(device_t); static int lbc_attach(device_t); static int lbc_shutdown(device_t); static int lbc_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int lbc_unmap_resource(device_t, device_t, struct resource *, struct resource_map *map); static int lbc_activate_resource(device_t bus, device_t child, struct resource *r); static int lbc_deactivate_resource(device_t bus, device_t child, struct resource *r); static struct rman *lbc_get_rman(device_t, int, u_int); static struct resource *lbc_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int lbc_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int lbc_print_child(device_t, device_t); -static int lbc_release_resource(device_t, device_t, int, int, - struct resource *); +static int lbc_release_resource(device_t, device_t, struct resource *); static const struct ofw_bus_devinfo *lbc_get_devinfo(device_t, device_t); /* * Bus interface definition */ static device_method_t lbc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lbc_probe), DEVMETHOD(device_attach, lbc_attach), DEVMETHOD(device_shutdown, lbc_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, lbc_print_child), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, NULL), DEVMETHOD(bus_get_rman, lbc_get_rman), DEVMETHOD(bus_alloc_resource, lbc_alloc_resource), DEVMETHOD(bus_adjust_resource, lbc_adjust_resource), DEVMETHOD(bus_release_resource, lbc_release_resource), DEVMETHOD(bus_activate_resource, lbc_activate_resource), DEVMETHOD(bus_deactivate_resource, lbc_deactivate_resource), DEVMETHOD(bus_map_resource, lbc_map_resource), DEVMETHOD(bus_unmap_resource, lbc_unmap_resource), /* OFW bus interface */ DEVMETHOD(ofw_bus_get_devinfo, lbc_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t lbc_driver = { "lbc", lbc_methods, sizeof(struct lbc_softc) }; EARLY_DRIVER_MODULE(lbc, ofwbus, lbc_driver, 0, 0, BUS_PASS_BUS); /* * Calculate address mask used by OR(n) registers. Use memory region size to * determine mask value. The size must be a power of two and within the range * of 32KB - 4GB. Otherwise error code is returned. Value representing * 4GB size can be passed as 0xffffffff. */ static uint32_t lbc_address_mask(uint32_t size) { int n = 15; if (size == ~0) return (0); while (n < 32) { if (size == (1U << n)) break; n++; } if (n == 32) return (EINVAL); return (0xffff8000 << (n - 15)); } static void lbc_banks_unmap(struct lbc_softc *sc) { int r; r = 0; while (r < LBC_DEV_MAX) { if (sc->sc_range[r].size == 0) return; pmap_unmapdev((void *)sc->sc_range[r].kva, sc->sc_range[r].size); law_disable(OCP85XX_TGTIF_LBC, sc->sc_range[r].addr, sc->sc_range[r].size); r++; } } static int lbc_banks_map(struct lbc_softc *sc) { vm_paddr_t end, start; vm_size_t size; u_int i, r, ranges, s; int error; bzero(sc->sc_range, sizeof(sc->sc_range)); /* * Determine number of discontiguous address ranges to program. */ ranges = 0; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { /* Avoid wrap-around bugs. */ end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start > 0 && end == start - 1) { sc->sc_range[r].size += size; break; } /* Avoid wrap-around bugs. */ end = start - 1 + size; if (sc->sc_range[r].addr > 0 && end == sc->sc_range[r].addr - 1) { sc->sc_range[r].addr = start; sc->sc_range[r].size += size; break; } } if (r == ranges) { /* New range; add using insertion sort */ r = 0; while (r < ranges && sc->sc_range[r].addr < start) r++; for (s = ranges; s > r; s--) sc->sc_range[s] = sc->sc_range[s-1]; sc->sc_range[r].addr = start; sc->sc_range[r].size = size; ranges++; } } /* * Ranges are sorted so quickly go over the list to merge ranges * that grew toward each other while building the ranges. */ r = 0; while (r < ranges - 1) { end = sc->sc_range[r].addr + sc->sc_range[r].size; if (end != sc->sc_range[r+1].addr) { r++; continue; } sc->sc_range[r].size += sc->sc_range[r+1].size; for (s = r + 1; s < ranges - 1; s++) sc->sc_range[s] = sc->sc_range[s+1]; bzero(&sc->sc_range[s], sizeof(sc->sc_range[s])); ranges--; } /* * Configure LAW for the LBC ranges and map the physical memory * range into KVA. */ for (r = 0; r < ranges; r++) { start = sc->sc_range[r].addr; size = sc->sc_range[r].size; error = law_enable(OCP85XX_TGTIF_LBC, start, size); if (error) return (error); sc->sc_range[r].kva = (vm_offset_t)pmap_mapdev(start, size); } /* XXX: need something better here? */ if (ranges == 0) return (EINVAL); /* Assign KVA to banks based on the enclosing range. */ for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; start = sc->sc_banks[i].addr; for (r = 0; r < ranges; r++) { end = sc->sc_range[r].addr - 1 + sc->sc_range[r].size; if (start >= sc->sc_range[r].addr && start - 1 + size <= end) break; } if (r < ranges) { sc->sc_banks[i].kva = sc->sc_range[r].kva + (start - sc->sc_range[r].addr); } } return (0); } static int lbc_banks_enable(struct lbc_softc *sc) { uint32_t size; uint32_t regval; int error, i; for (i = 0; i < LBC_DEV_MAX; i++) { size = sc->sc_banks[i].size; if (size == 0) continue; /* * Compute and program BR value. */ regval = sc->sc_banks[i].addr; switch (sc->sc_banks[i].width) { case 8: regval |= (1 << 11); break; case 16: regval |= (2 << 11); break; case 32: regval |= (3 << 11); break; default: error = EINVAL; goto fail; } regval |= (sc->sc_banks[i].decc << 9); regval |= (sc->sc_banks[i].wp << 8); regval |= (sc->sc_banks[i].msel << 5); regval |= (sc->sc_banks[i].atom << 2); regval |= 1; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(i), regval); /* * Compute and program OR value. */ regval = lbc_address_mask(size); switch (sc->sc_banks[i].msel) { case LBCRES_MSEL_GPCM: /* TODO Add flag support for option registers */ regval |= 0x0ff7; break; case LBCRES_MSEL_FCM: /* TODO Add flag support for options register */ regval |= 0x0796; break; case LBCRES_MSEL_UPMA: case LBCRES_MSEL_UPMB: case LBCRES_MSEL_UPMC: printf("UPM mode not supported yet!"); error = ENOSYS; goto fail; } bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(i), regval); } return (0); fail: lbc_banks_unmap(sc); return (error); } static void fdt_lbc_fixup(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { pcell_t width; int bank; if (OF_getprop(node, "bank-width", (void *)&width, sizeof(width)) <= 0) return; bank = di->di_bank; if (sc->sc_banks[bank].size == 0) return; /* Express width in bits. */ sc->sc_banks[bank].width = width * 8; } static int fdt_lbc_reg_decode(phandle_t node, struct lbc_softc *sc, struct lbc_devinfo *di) { rman_res_t start, end, count; pcell_t *reg, *regptr; pcell_t addr_cells, size_cells; int tuple_size, tuples; int i, j, rv, bank; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells) != 0) return (ENXIO); tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); tuples = OF_getencprop_alloc_multi(node, "reg", tuple_size, (void **)®); debugf("addr_cells = %d, size_cells = %d\n", addr_cells, size_cells); debugf("tuples = %d, tuple size = %d\n", tuples, tuple_size); if (tuples <= 0) /* No 'reg' property in this node. */ return (0); regptr = reg; for (i = 0; i < tuples; i++) { bank = fdt_data_get((void *)reg, 1); di->di_bank = bank; reg += 1; /* Get address/size. */ start = count = 0; for (j = 0; j < addr_cells - 1; j++) { start <<= 32; start |= reg[j]; } for (j = 0; j < size_cells; j++) { count <<= 32; count |= reg[addr_cells + j - 1]; } reg += addr_cells - 1 + size_cells; /* Calculate address range relative to VA base. */ start = sc->sc_banks[bank].kva + start; end = start + count - 1; debugf("reg addr bank = %d, start = %jx, end = %jx, " "count = %jx\n", bank, start, end, count); /* Use bank (CS) cell as rid. */ resource_list_add(&di->di_res, SYS_RES_MEMORY, bank, start, end, count); } rv = 0; OF_prop_free(regptr); return (rv); } static void lbc_intr(void *arg) { struct lbc_softc *sc = arg; uint32_t ltesr; ltesr = bus_space_read_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR); sc->sc_ltesr = ltesr; bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ltesr); wakeup(sc->sc_dev); } static int lbc_probe(device_t dev) { if (!(ofw_bus_is_compatible(dev, "fsl,lbc") || ofw_bus_is_compatible(dev, "fsl,elbc"))) return (ENXIO); device_set_desc(dev, "Freescale Local Bus Controller"); return (BUS_PROBE_DEFAULT); } static int lbc_attach(device_t dev) { struct lbc_softc *sc; struct lbc_devinfo *di; struct rman *rm; uintmax_t offset, size; vm_paddr_t start; device_t cdev; phandle_t node, child; pcell_t *ranges, *rangesptr; int tuple_size, tuples; int par_addr_cells; int bank, error, i, j; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mrid = 0; sc->sc_mres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mrid, RF_ACTIVE); if (sc->sc_mres == NULL) return (ENXIO); sc->sc_bst = rman_get_bustag(sc->sc_mres); sc->sc_bsh = rman_get_bushandle(sc->sc_mres); for (bank = 0; bank < LBC_DEV_MAX; bank++) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(bank), 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(bank), 0); } /* * Initialize configuration register: * - enable Local Bus * - set data buffer control signal function * - disable parity byte select * - set ECC parity type * - set bus monitor timing and timer prescale */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LBCR, 0); /* * Initialize clock ratio register: * - disable PLL bypass mode * - configure LCLK delay cycles for the assertion of LALE * - set system clock divider */ bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LCRR, 0x00030008); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEDR, 0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ~0); bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEIR, 0x64080001); sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_MISC | INTR_MPSAFE, NULL, lbc_intr, sc, &sc->sc_icookie); if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } sc->sc_ltesr = ~0; rangesptr = NULL; rm = &sc->sc_rman; rm->rm_type = RMAN_ARRAY; rm->rm_descr = "Local Bus Space"; error = rman_init(rm); if (error) goto fail; error = rman_manage_region(rm, rm->rm_start, rm->rm_end); if (error) { rman_fini(rm); goto fail; } /* * Process 'ranges' property. */ node = ofw_bus_get_node(dev); if ((fdt_addrsize_cells(node, &sc->sc_addr_cells, &sc->sc_size_cells)) != 0) { error = ENXIO; goto fail; } par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) { device_printf(dev, "unsupported parent #addr-cells\n"); error = ERANGE; goto fail; } tuple_size = sizeof(pcell_t) * (sc->sc_addr_cells + par_addr_cells + sc->sc_size_cells); tuples = OF_getencprop_alloc_multi(node, "ranges", tuple_size, (void **)&ranges); if (tuples < 0) { device_printf(dev, "could not retrieve 'ranges' property\n"); error = ENXIO; goto fail; } rangesptr = ranges; debugf("par addr_cells = %d, addr_cells = %d, size_cells = %d, " "tuple_size = %d, tuples = %d\n", par_addr_cells, sc->sc_addr_cells, sc->sc_size_cells, tuple_size, tuples); start = 0; size = 0; for (i = 0; i < tuples; i++) { /* The first cell is the bank (chip select) number. */ bank = fdt_data_get(ranges, 1); if (bank < 0 || bank > LBC_DEV_MAX) { device_printf(dev, "bank out of range: %d\n", bank); error = ERANGE; goto fail; } ranges += 1; /* * Remaining cells of the child address define offset into * this CS. */ offset = 0; for (j = 0; j < sc->sc_addr_cells - 1; j++) { offset <<= sizeof(pcell_t) * 8; offset |= *ranges; ranges++; } /* Parent bus start address of this bank. */ start = 0; for (j = 0; j < par_addr_cells; j++) { start <<= sizeof(pcell_t) * 8; start |= *ranges; ranges++; } size = fdt_data_get((void *)ranges, sc->sc_size_cells); ranges += sc->sc_size_cells; debugf("bank = %d, start = %jx, size = %jx\n", bank, (uintmax_t)start, size); sc->sc_banks[bank].addr = start + offset; sc->sc_banks[bank].size = size; /* * Attributes for the bank. * * XXX Note there are no DT bindings defined for them at the * moment, so we need to provide some defaults. */ sc->sc_banks[bank].width = 16; sc->sc_banks[bank].msel = LBCRES_MSEL_GPCM; sc->sc_banks[bank].decc = LBCRES_DECC_DISABLED; sc->sc_banks[bank].atom = LBCRES_ATOM_DISABLED; sc->sc_banks[bank].wp = 0; } /* * Initialize mem-mappings for the LBC banks (i.e. chip selects). */ error = lbc_banks_map(sc); if (error) goto fail; /* * Walk the localbus and add direct subordinates as our children. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { di = malloc(sizeof(*di), M_LBC, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&di->di_ofw, child) != 0) { free(di, M_LBC); device_printf(dev, "could not set up devinfo\n"); continue; } resource_list_init(&di->di_res); if (fdt_lbc_reg_decode(child, sc, di)) { device_printf(dev, "could not process 'reg' " "property\n"); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } fdt_lbc_fixup(child, sc, di); /* Add newbus device for this FDT node */ cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "could not add child: %s\n", di->di_ofw.obd_name); resource_list_free(&di->di_res); ofw_bus_gen_destroy_devinfo(&di->di_ofw); free(di, M_LBC); continue; } debugf("added child name='%s', node=%x\n", di->di_ofw.obd_name, child); device_set_ivars(cdev, di); } /* * Enable the LBC. */ lbc_banks_enable(sc); OF_prop_free(rangesptr); return (bus_generic_attach(dev)); fail: OF_prop_free(rangesptr); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mrid, sc->sc_mres); return (error); } static int lbc_shutdown(device_t dev) { /* TODO */ return(0); } static struct rman * lbc_get_rman(device_t bus, int type, u_int flags) { struct lbc_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: return (&sc->sc_rman); default: return (NULL); } } static struct resource * lbc_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct lbc_devinfo *di; struct resource_list_entry *rle; /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == SYS_RES_IRQ) return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; /* * XXX: We are supposed to return a value to the user, so this * doesn't seem right. */ rid = &di->di_bank; rle = resource_list_find(&di->di_res, type, *rid); if (rle == NULL) { device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; count = rle->count; end = start + count - 1; return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int lbc_print_child(device_t dev, device_t child) { struct lbc_devinfo *di; struct resource_list *rl; int rv; di = device_get_ivars(child); rl = &di->di_res; rv = 0; rv += bus_print_child_header(dev, child); rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); rv += bus_print_child_footer(dev, child); return (rv); } static int lbc_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(dev, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(dev, child, r, start, end)); default: return (EINVAL); } } static int -lbc_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *res) +lbc_release_resource(device_t dev, device_t child, struct resource *res) { - switch (type) { - case SYS_RES_IOPORT: - type = SYS_RES_MEMORY; - /* FALLTHROUGH */ + switch (rman_get_type(res)) { case SYS_RES_MEMORY: - return (bus_generic_rman_release_resource(dev, child, type, - rid, res)); + return (bus_generic_rman_release_resource(dev, child, res)); case SYS_RES_IRQ: - return (bus_generic_release_resource(dev, child, type, rid, res)); + return (bus_generic_release_resource(dev, child, res)); default: return (EINVAL); } } static int lbc_activate_resource(device_t bus, device_t child, struct resource *r) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, r)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, r)); default: return (EINVAL); } } static int lbc_deactivate_resource(device_t bus, device_t child, struct resource *r) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, r)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, r)); default: return (EINVAL); } } static int lbc_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_bustag = &bs_be_tag; map->r_bushandle = start; map->r_size = length; map->r_vaddr = NULL; return (0); } static int lbc_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } return (0); } static const struct ofw_bus_devinfo * lbc_get_devinfo(device_t bus, device_t child) { struct lbc_devinfo *di; di = device_get_ivars(child); return (&di->di_ofw); } void lbc_write_reg(device_t child, u_int off, uint32_t val) { device_t dev; struct lbc_softc *sc; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return; } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0u) { sc->sc_ltesr ^= (val & sc->sc_ltesr); return; } if (off == LBC85XX_LTEATR && (val & 1) == 0) sc->sc_ltesr = ~0u; bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); } uint32_t lbc_read_reg(device_t child, u_int off) { device_t dev; struct lbc_softc *sc; uint32_t val; dev = device_get_parent(child); if (off >= 0x1000) { device_printf(dev, "%s(%s): invalid offset %#x\n", __func__, device_get_nameunit(child), off); return (~0U); } sc = device_get_softc(dev); if (off == LBC85XX_LTESR && sc->sc_ltesr != ~0U) val = sc->sc_ltesr; else val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return (val); } diff --git a/sys/powerpc/powermac/macio.c b/sys/powerpc/powermac/macio.c index eb6980005904..b3d9e9f73f25 100644 --- a/sys/powerpc/powermac/macio.c +++ b/sys/powerpc/powermac/macio.c @@ -1,791 +1,787 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for KeyLargo/Pangea, the MacPPC south bridge ASIC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macio softc */ struct macio_softc { phandle_t sc_node; vm_offset_t sc_base; vm_offset_t sc_size; struct rman sc_mem_rman; /* FCR registers */ int sc_memrid; struct resource *sc_memr; /* GPIO offsets */ int sc_timebase; }; static MALLOC_DEFINE(M_MACIO, "macio", "macio device information"); static int macio_probe(device_t); static int macio_attach(device_t); static int macio_print_child(device_t dev, device_t child); static void macio_probe_nomatch(device_t, device_t); static struct rman *macio_get_rman(device_t, int, u_int); static struct resource *macio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macio_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int macio_activate_resource(device_t, device_t, struct resource *); static int macio_deactivate_resource(device_t, device_t, struct resource *); -static int macio_release_resource(device_t, device_t, int, int, - struct resource *); +static int macio_release_resource(device_t, device_t, struct resource *); static int macio_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int macio_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); static struct resource_list *macio_get_resource_list (device_t, device_t); static ofw_bus_get_devinfo_t macio_get_devinfo; #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t, bool); #endif /* * Bus interface definition */ static device_method_t macio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macio_probe), DEVMETHOD(device_attach, macio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macio_print_child), DEVMETHOD(bus_probe_nomatch, macio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, macio_get_rman), DEVMETHOD(bus_alloc_resource, macio_alloc_resource), DEVMETHOD(bus_adjust_resource, macio_adjust_resource), DEVMETHOD(bus_release_resource, macio_release_resource), DEVMETHOD(bus_activate_resource, macio_activate_resource), DEVMETHOD(bus_deactivate_resource, macio_deactivate_resource), DEVMETHOD(bus_map_resource, macio_map_resource), DEVMETHOD(bus_unmap_resource, macio_unmap_resource), DEVMETHOD(bus_get_resource_list, macio_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macio_pci_driver = { "macio", macio_methods, sizeof(struct macio_softc) }; EARLY_DRIVER_MODULE(macio, pci, macio_pci_driver, 0, 0, BUS_PASS_BUS); /* * PCI ID search table */ static struct macio_pci_dev { u_int32_t mpd_devid; char *mpd_desc; } macio_pci_devlist[] = { { 0x0017106b, "Paddington I/O Controller" }, { 0x0022106b, "KeyLargo I/O Controller" }, { 0x0025106b, "Pangea I/O Controller" }, { 0x003e106b, "Intrepid I/O Controller" }, { 0x0041106b, "K2 KeyLargo I/O Controller" }, { 0x004f106b, "Shasta I/O Controller" }, { 0, NULL } }; /* * Devices to exclude from the probe * XXX some of these may be required in the future... */ #define MACIO_QUIRK_IGNORE 0x00000001 #define MACIO_QUIRK_CHILD_HAS_INTR 0x00000002 #define MACIO_QUIRK_USE_CHILD_REG 0x00000004 struct macio_quirk_entry { const char *mq_name; int mq_quirks; }; static struct macio_quirk_entry macio_quirks[] = { { "escc-legacy", MACIO_QUIRK_IGNORE }, { "timer", MACIO_QUIRK_IGNORE }, { "escc", MACIO_QUIRK_CHILD_HAS_INTR }, { "i2s", MACIO_QUIRK_CHILD_HAS_INTR | MACIO_QUIRK_USE_CHILD_REG }, { NULL, 0 } }; static int macio_get_quirks(const char *name) { struct macio_quirk_entry *mqe; for (mqe = macio_quirks; mqe->mq_name != NULL; mqe++) if (strcmp(name, mqe->mq_name) == 0) return (mqe->mq_quirks); return (0); } /* * Add an interrupt to the dev's resource list if present */ static void macio_add_intr(phandle_t devnode, struct macio_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->mdi_ninterrupts >= 6) { printf("macio: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_getprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, irq, irq, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = irq; dinfo->mdi_ninterrupts++; } } static void macio_add_reg(phandle_t devnode, struct macio_devinfo *dinfo) { struct macio_reg *reg, *regp; phandle_t child; char buf[8]; int i, layout_id = 0, nreg, res; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; /* * Some G5's have broken properties in the i2s-a area. If so we try * to fix it. Right now we know of two different cases, one for * sound layout-id 36 and the other one for sound layout-id 76. * What is missing is the base address for the memory addresses. * We take them from the parent node (i2s) and use the size * information from the child. */ if (reg[0].mr_base == 0) { child = OF_child(devnode); while (child != 0) { res = OF_getprop(child, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "sound") == 0) break; child = OF_peer(child); } res = OF_getprop(child, "layout-id", &layout_id, sizeof(layout_id)); if (res > 0 && (layout_id == 36 || layout_id == 76)) { res = OF_getprop_alloc_multi(OF_parent(devnode), "reg", sizeof(*regp), (void **)®p); reg[0] = regp[0]; reg[1].mr_base = regp[1].mr_base; reg[2].mr_base = regp[1].mr_base + reg[1].mr_size; } } for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->mdi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } /* * PCI probe */ static int macio_probe(device_t dev) { int i; u_int32_t devid; devid = pci_get_devid(dev); for (i = 0; macio_pci_devlist[i].mpd_desc != NULL; i++) { if (devid == macio_pci_devlist[i].mpd_devid) { device_set_desc(dev, macio_pci_devlist[i].mpd_desc); return (0); } } return (ENXIO); } /* * PCI attach: scan Open Firmware child nodes, and attach these as children * of the macio bus */ static int macio_attach(device_t dev) { struct macio_softc *sc; struct macio_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t subchild; device_t cdev; u_int reg[3]; char compat[32]; int error, quirks; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); /* * Locate the device node and it's base address */ if (OF_getprop(root, "assigned-addresses", reg, sizeof(reg)) < (ssize_t)sizeof(reg)) { return (ENXIO); } /* Used later to see if we have to enable the I2S part. */ OF_getprop(root, "compatible", compat, sizeof(compat)); sc->sc_base = reg[2]; sc->sc_size = MACIO_REG_SIZE; sc->sc_memrid = PCIR_BAR(0); sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "MacIO Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACIO); continue; } quirks = macio_get_quirks(dinfo->mdi_obdinfo.obd_name); if ((quirks & MACIO_QUIRK_IGNORE) != 0) { ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } resource_list_init(&dinfo->mdi_resources); dinfo->mdi_ninterrupts = 0; macio_add_intr(child, dinfo); if ((quirks & MACIO_QUIRK_USE_CHILD_REG) != 0) macio_add_reg(OF_child(child), dinfo); else macio_add_reg(child, dinfo); if ((quirks & MACIO_QUIRK_CHILD_HAS_INTR) != 0) for (subchild = OF_child(child); subchild != 0; subchild = OF_peer(subchild)) macio_add_intr(subchild, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); resource_list_free(&dinfo->mdi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } device_set_ivars(cdev, dinfo); /* Set FCRs to enable some devices */ if (sc->sc_memr == NULL) continue; if (strcmp(ofw_bus_get_name(cdev), "bmac") == 0 || (ofw_bus_get_compat(cdev) != NULL && strcmp(ofw_bus_get_compat(cdev), "bmac+") == 0)) { uint32_t fcr; fcr = bus_read_4(sc->sc_memr, HEATHROW_FCR); fcr |= FCR_ENET_ENABLE & ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr |= FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr &= ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); } /* * Make sure the I2S0 and the I2S0_CLK are enabled. * On certain G5's they are not. */ if ((strcmp(ofw_bus_get_name(cdev), "i2s") == 0) && (strcmp(compat, "K2-Keylargo") == 0)) { uint32_t fcr1; fcr1 = bus_read_4(sc->sc_memr, KEYLARGO_FCR1); fcr1 |= FCR1_I2S0_CLK_ENABLE | FCR1_I2S0_ENABLE; bus_write_4(sc->sc_memr, KEYLARGO_FCR1, fcr1); } } #if !defined(__powerpc64__) && defined(SMP) /* * Detect an SMP G4 machine. * * On SMP G4, timebase freeze is via a GPIO on macio. * * When we are on an SMP G4, we need to install a handler to * perform timebase freeze/unfreeze on behalf of the platform. */ if ((child = OF_finddevice("/cpus/PowerPC,G4@0")) != -1 && OF_peer(child) != -1) { if (OF_getprop(child, "timebase-enable", &sc->sc_timebase, sizeof(sc->sc_timebase)) <= 0) sc->sc_timebase = KEYLARGO_GPIO_BASE + 0x09; powermac_register_timebase(dev, macio_freeze_timebase); device_printf(dev, "GPIO timebase control at 0x%x\n", sc->sc_timebase); } #endif return (bus_generic_attach(dev)); } static int macio_print_child(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macio_probe_nomatch(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * macio_get_rman(device_t bus, int type, u_int flags) { struct macio_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * macio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct macio_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, type, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments like macgpio */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->mdi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, start, start, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = start; dinfo->mdi_ninterrupts++; } return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int macio_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int -macio_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +macio_release_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rman_release_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_rl_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rl_release_resource(bus, child, res)); default: return (EINVAL); } } static int macio_activate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, res)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, res)); default: return (EINVAL); } } static int macio_deactivate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, res)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int macio_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct macio_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_base, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int macio_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * macio_get_resource_list (device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static const struct ofw_bus_devinfo * macio_get_devinfo(device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); } #if !defined(__powerpc64__) && defined(SMP) static void macio_freeze_timebase(device_t dev, bool freeze) { struct macio_softc *sc = device_get_softc(dev); if (freeze) { bus_write_1(sc->sc_memr, sc->sc_timebase, 4); } else { bus_write_1(sc->sc_memr, sc->sc_timebase, 0); } bus_read_1(sc->sc_memr, sc->sc_timebase); } #endif diff --git a/sys/powerpc/powermac/uninorth.c b/sys/powerpc/powermac/uninorth.c index 96caf4f34530..dac0dc46c666 100644 --- a/sys/powerpc/powermac/uninorth.c +++ b/sys/powerpc/powermac/uninorth.c @@ -1,722 +1,718 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Driver for the Uninorth chip itself. */ static MALLOC_DEFINE(M_UNIN, "unin", "unin device information"); /* * Device interface. */ static int unin_chip_probe(device_t); static int unin_chip_attach(device_t); /* * Bus interface. */ static int unin_chip_print_child(device_t dev, device_t child); static void unin_chip_probe_nomatch(device_t, device_t); static struct rman *unin_chip_get_rman(device_t, int, u_int); static struct resource *unin_chip_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int unin_chip_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int unin_chip_activate_resource(device_t, device_t, struct resource *); static int unin_chip_deactivate_resource(device_t, device_t, struct resource *); static int unin_chip_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int unin_chip_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); -static int unin_chip_release_resource(device_t, device_t, int, int, - struct resource *); +static int unin_chip_release_resource(device_t, device_t, struct resource *); static struct resource_list *unin_chip_get_resource_list (device_t, device_t); /* * OFW Bus interface */ static ofw_bus_get_devinfo_t unin_chip_get_devinfo; /* * Local routines */ static void unin_enable_gmac(device_t dev); static void unin_enable_mpic(device_t dev); /* * Driver methods. */ static device_method_t unin_chip_methods[] = { /* Device interface */ DEVMETHOD(device_probe, unin_chip_probe), DEVMETHOD(device_attach, unin_chip_attach), /* Bus interface */ DEVMETHOD(bus_print_child, unin_chip_print_child), DEVMETHOD(bus_probe_nomatch, unin_chip_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, unin_chip_get_rman), DEVMETHOD(bus_alloc_resource, unin_chip_alloc_resource), DEVMETHOD(bus_adjust_resource, unin_chip_adjust_resource), DEVMETHOD(bus_release_resource, unin_chip_release_resource), DEVMETHOD(bus_activate_resource, unin_chip_activate_resource), DEVMETHOD(bus_deactivate_resource, unin_chip_deactivate_resource), DEVMETHOD(bus_map_resource, unin_chip_map_resource), DEVMETHOD(bus_unmap_resource, unin_chip_unmap_resource), DEVMETHOD(bus_get_resource_list, unin_chip_get_resource_list), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, unin_chip_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t unin_chip_driver = { "unin", unin_chip_methods, sizeof(struct unin_chip_softc) }; /* * Assume there is only one unin chip in a PowerMac, so that pmu.c functions can * suspend the chip after the whole rest of the device tree is suspended, not * earlier. */ static device_t unin_chip; EARLY_DRIVER_MODULE(unin, ofwbus, unin_chip_driver, 0, 0, BUS_PASS_BUS); /* * Add an interrupt to the dev's resource list if present */ static void unin_chip_add_intr(phandle_t devnode, struct unin_chip_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->udi_ninterrupts >= 6) { printf("unin: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_searchprop(iparent, "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); if (icells > 1) { powerpc_config_intr(irq, (intr[i+1] & 1) ? INTR_TRIGGER_LEVEL : INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); } dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } } static void unin_chip_add_reg(phandle_t devnode, struct unin_chip_devinfo *dinfo) { struct unin_chip_reg *reg; int i, nreg; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->udi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } static void unin_update_reg(device_t dev, uint32_t regoff, uint32_t set, uint32_t clr) { volatile u_int *reg; struct unin_chip_softc *sc; u_int32_t tmpl; sc = device_get_softc(dev); reg = (void *)(sc->sc_addr + regoff); tmpl = inl(reg); tmpl &= ~clr; tmpl |= set; outl(reg, tmpl); } static void unin_enable_gmac(device_t dev) { unin_update_reg(dev, UNIN_CLOCKCNTL, UNIN_CLOCKCNTL_GMAC, 0); } static void unin_enable_mpic(device_t dev) { unin_update_reg(dev, UNIN_TOGGLE_REG, UNIN_MPIC_RESET | UNIN_MPIC_OUTPUT_ENABLE, 0); } static int unin_chip_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL) return (ENXIO); if (strcmp(name, "uni-n") != 0 && strcmp(name, "u3") != 0 && strcmp(name, "u4") != 0) return (ENXIO); device_set_desc(dev, "Apple UniNorth System Controller"); return (0); } static int unin_chip_attach(device_t dev) { struct unin_chip_softc *sc; struct unin_chip_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t iparent; device_t cdev; cell_t acells, scells; char compat[32]; char name[32]; u_int irq, reg[3]; int error, i = 0; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); if (OF_getprop(root, "reg", reg, sizeof(reg)) < 8) return (ENXIO); acells = scells = 1; OF_getprop(OF_parent(root), "#address-cells", &acells, sizeof(acells)); OF_getprop(OF_parent(root), "#size-cells", &scells, sizeof(scells)); i = 0; sc->sc_physaddr = reg[i++]; if (acells == 2) { sc->sc_physaddr <<= 32; sc->sc_physaddr |= reg[i++]; } sc->sc_size = reg[i++]; if (scells == 2) { sc->sc_size <<= 32; sc->sc_size |= reg[i++]; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "UniNorth Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, sc->sc_physaddr, sc->sc_physaddr + sc->sc_size - 1); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } if (unin_chip == NULL) unin_chip = dev; /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_UNIN, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->udi_obdinfo, child) != 0) { free(dinfo, M_UNIN); continue; } resource_list_init(&dinfo->udi_resources); dinfo->udi_ninterrupts = 0; unin_chip_add_intr(child, dinfo); /* * Some Apple machines do have a bug in OF, they miss * the interrupt entries on the U3 I2C node. That means they * do not have an entry with number of interrupts nor the * entry of the interrupt parent handle. * We define an interrupt and hardwire it to the /u3/mpic * handle. */ if (OF_getprop(child, "name", name, sizeof(name)) <= 0) device_printf(dev, "device has no name!\n"); if (dinfo->udi_ninterrupts == 0 && (strcmp(name, "i2c-bus") == 0 || strcmp(name, "i2c") == 0)) { if (OF_getprop(child, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) { iparent = OF_finddevice("/u3/mpic"); device_printf(dev, "Set /u3/mpic as iparent!\n"); } /* Add an interrupt number 0 to the parent. */ irq = MAP_IRQ(iparent, 0); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } unin_chip_add_reg(child, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->udi_obdinfo.obd_name); resource_list_free(&dinfo->udi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->udi_obdinfo); free(dinfo, M_UNIN); continue; } device_set_ivars(cdev, dinfo); } /* * Only map the first page, since that is where the registers * of interest lie. */ sc->sc_addr = (vm_offset_t)pmap_mapdev(sc->sc_physaddr, PAGE_SIZE); sc->sc_version = *(u_int *)sc->sc_addr; device_printf(dev, "Version %d\n", sc->sc_version); /* * Enable the GMAC Ethernet cell and the integrated OpenPIC * if Open Firmware says they are used. */ for (child = OF_child(root); child; child = OF_peer(child)) { memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); if (strcmp(compat, "chrp,open-pic") == 0) unin_enable_mpic(dev); } /* * GMAC lives under the PCI bus, so just check if enet is gmac. */ child = OF_finddevice("enet"); memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); return (bus_generic_attach(dev)); } static int unin_chip_print_child(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void unin_chip_probe_nomatch(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct rman * unin_chip_get_rman(device_t bus, int type, u_int flags) { struct unin_chip_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * unin_chip_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { rman_res_t adjstart, adjend, adjcount; struct unin_chip_devinfo *dinfo; struct resource_list_entry *rle; dinfo = device_get_ivars(child); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->udi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } rle->end = rle->end - 1; /* Hack? */ if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; return (bus_generic_rman_alloc_resource(bus, child, SYS_RES_MEMORY, rid, adjstart, adjend, adjcount, flags)); case SYS_RES_IRQ: /* Check for passthrough from subattachments. */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->udi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->udi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, start, start, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = start; dinfo->udi_ninterrupts++; } return (resource_list_alloc(&dinfo->udi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int unin_chip_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int -unin_chip_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +unin_chip_release_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: - return (bus_generic_rman_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rman_release_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_rl_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rl_release_resource(bus, child, res)); default: return (EINVAL); } } static int unin_chip_activate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, res)); case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, res)); default: return (EINVAL); } } static int unin_chip_deactivate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, res)); case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int unin_chip_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); if (bootverbose) printf("nexus mapdev: start %jx, len %jd\n", (uintmax_t)start, (uintmax_t)length); map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_be_tag; map->r_size = length; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int unin_chip_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); break; default: return (EINVAL); } return (0); } static struct resource_list * unin_chip_get_resource_list (device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_resources); } static const struct ofw_bus_devinfo * unin_chip_get_devinfo(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_obdinfo); } int unin_chip_wake(device_t dev) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_NORMAL, UNIN_PWR_MASK); DELAY(10); unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_RUNNING, 0); DELAY(100); return (0); } int unin_chip_sleep(device_t dev, int idle) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_SLEEPING, 0); DELAY(10); if (idle) unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_IDLE2, UNIN_PWR_MASK); else unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_SLEEP, UNIN_PWR_MASK); DELAY(10); return (0); } diff --git a/sys/powerpc/psim/ata_iobus.c b/sys/powerpc/psim/ata_iobus.c index 691e7b6fb958..5a0eea32055a 100644 --- a/sys/powerpc/psim/ata_iobus.c +++ b/sys/powerpc/psim/ata_iobus.c @@ -1,248 +1,246 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PSIM local bus ATA controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Define the iobus ata bus attachment. This creates a pseudo-bus that * the ATA device can be attached to */ static int ata_iobus_attach(device_t dev); static int ata_iobus_probe(device_t dev); static int ata_iobus_print_child(device_t dev, device_t child); struct resource *ata_iobus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); -static int ata_iobus_release_resource(device_t, device_t, int, int, - struct resource *); +static int ata_iobus_release_resource(device_t, device_t, struct resource *); static device_method_t ata_iobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ata_iobus_probe), DEVMETHOD(device_attach, ata_iobus_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus methods */ DEVMETHOD(bus_print_child, ata_iobus_print_child), DEVMETHOD(bus_alloc_resource, ata_iobus_alloc_resource), DEVMETHOD(bus_release_resource, ata_iobus_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t ata_iobus_driver = { "ataiobus", ata_iobus_methods, 0, }; DRIVER_MODULE(ataiobus, iobus, ata_iobus_driver, NULL, NULL); MODULE_DEPEND(ata, ata, 1, 1, 1); static int ata_iobus_probe(device_t dev) { char *type = iobus_get_name(dev); if (strncmp(type, "ata", 3) != 0) return (ENXIO); device_set_desc(dev, "PSIM ATA Controller"); return (0); } static int ata_iobus_attach(device_t dev) { /* * Add a single child per controller. Should be able * to add two */ device_add_child(dev, "ata", -1); return (bus_generic_attach(dev)); } static int ata_iobus_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); /* irq ? */ retval += bus_print_child_footer(dev, child); return (retval); } struct resource * ata_iobus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res = NULL; int myrid; u_int *ofw_regs; ofw_regs = iobus_get_regs(dev); /* * The reg array for the PSIM ata device has 6 start/size entries: * 0 - unused * 1/2/3 - unused * 4/5/6 - primary command * 7/8/9 - secondary command * 10/11/12 - primary control * 13/14/15 - secondary control * 16/17/18 - primary/secondary dma registers, unimplemented * * The resource values are calculated from these registers */ if (type == SYS_RES_IOPORT) { switch (*rid) { case ATA_IOADDR_RID: myrid = 0; start = ofw_regs[4]; end = start + ATA_IOSIZE - 1; count = ATA_IOSIZE; res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_MEMORY, &myrid, start, end, count, flags); break; case ATA_CTLADDR_RID: myrid = 0; start = ofw_regs[10]; end = start + ATA_CTLIOSIZE - 1; count = ATA_CTLIOSIZE; res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_MEMORY, &myrid, start, end, count, flags); break; case ATA_BMADDR_RID: /* DMA not properly supported by psim */ break; } return (res); } else if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) { /* * Pass this on to the parent */ return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IRQ, rid, 0, ~0, 1, flags)); } else { return (NULL); } } static int -ata_iobus_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +ata_iobus_release_resource(device_t dev, device_t child, struct resource *r) { /* no hotplug... */ return (0); } /* * Define the actual ATA device. This is a sub-bus to the ata-iobus layer * to allow the higher layer bus to massage the resource allocation. */ static int ata_iobus_sub_probe(device_t dev); static int ata_iobus_sub_setmode(device_t dev, int target, int mode); static device_method_t ata_iobus_sub_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ata_iobus_sub_probe), DEVMETHOD(device_attach, ata_attach), DEVMETHOD(device_detach, ata_detach), DEVMETHOD(device_resume, ata_resume), /* ATA interface */ DEVMETHOD(ata_setmode, ata_iobus_sub_setmode), DEVMETHOD_END }; static driver_t ata_iobus_sub_driver = { "ata", ata_iobus_sub_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, ataiobus, ata_iobus_sub_driver, NULL, NULL); static int ata_iobus_sub_probe(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* Only a single unit per controller thus far */ ch->unit = 0; ch->flags = (ATA_USE_16BIT|ATA_NO_SLAVE); ata_generic_hw(dev); return ata_probe(dev); } static int ata_iobus_sub_setmode(device_t parent, int target, int mode) { /* Only ever PIO mode here... */ return (ATA_PIO); } diff --git a/sys/powerpc/psim/iobus.c b/sys/powerpc/psim/iobus.c index cea6fc5fb15e..970ade9f886b 100644 --- a/sys/powerpc/psim/iobus.c +++ b/sys/powerpc/psim/iobus.c @@ -1,457 +1,454 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PSIM 'iobus' local bus. Should be set up in the device tree like: * * /iobus@0x80000000/name psim-iobus * * Code borrowed from various nexus.c and uninorth.c :-) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct iobus_softc { phandle_t sc_node; vm_offset_t sc_addr; vm_offset_t sc_size; struct rman sc_mem_rman; }; static MALLOC_DEFINE(M_IOBUS, "iobus", "iobus device information"); static int iobus_probe(device_t); static int iobus_attach(device_t); static int iobus_print_child(device_t dev, device_t child); static void iobus_probe_nomatch(device_t, device_t); static int iobus_read_ivar(device_t, device_t, int, uintptr_t *); static int iobus_write_ivar(device_t, device_t, int, uintptr_t); static struct rman *iobus_get_rman(device_t, int, u_int); static struct resource *iobus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int iobus_adjust_resource(device_t, device_t, struct resource *, rman_res_t, rman_res_t); static int iobus_activate_resource(device_t, device_t, struct resource *); static int iobus_deactivate_resource(device_t, device_t, struct resource *); static int iobus_map_resource(device_t, device_t, struct resource *, struct resource_map_request *, struct resource_map *); static int iobus_unmap_resource(device_t, device_t, struct resource *, struct resource_map *); -static int iobus_release_resource(device_t, device_t, int, int, - struct resource *); +static int iobus_release_resource(device_t, device_t, struct resource *); /* * Bus interface definition */ static device_method_t iobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iobus_probe), DEVMETHOD(device_attach, iobus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, iobus_print_child), DEVMETHOD(bus_probe_nomatch, iobus_probe_nomatch), DEVMETHOD(bus_read_ivar, iobus_read_ivar), DEVMETHOD(bus_write_ivar, iobus_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_rman, iobus_get_rman), DEVMETHOD(bus_alloc_resource, iobus_alloc_resource), DEVMETHOD(bus_adjust_resource, iobus_adjust_resource), DEVMETHOD(bus_release_resource, iobus_release_resource), DEVMETHOD(bus_activate_resource, iobus_activate_resource), DEVMETHOD(bus_deactivate_resource, iobus_deactivate_resource), DEVMETHOD(bus_map_resource, iobus_map_resource), DEVMETHOD(bus_unmap_resource, iobus_unmap_resource), { 0, 0 } }; static driver_t iobus_driver = { "iobus", iobus_methods, sizeof(struct iobus_softc) }; DRIVER_MODULE(iobus, ofwbus, iobus_driver, 0, 0); static int iobus_probe(device_t dev) { const char *type = ofw_bus_get_name(dev); if (strcmp(type, "psim-iobus") != 0) return (ENXIO); device_set_desc(dev, "PSIM local bus"); return (0); } /* * Add interrupt/addr range to the dev's resource list if present */ static void iobus_add_intr(phandle_t devnode, struct iobus_devinfo *dinfo) { u_int intr = -1; if (OF_getprop(devnode, "interrupt", &intr, sizeof(intr)) != -1) { resource_list_add(&dinfo->id_resources, SYS_RES_IRQ, 0, intr, intr, 1); } dinfo->id_interrupt = intr; } static void iobus_add_reg(phandle_t devnode, struct iobus_devinfo *dinfo, vm_offset_t iobus_off) { u_int size; int i; size = OF_getprop(devnode, "reg", dinfo->id_reg,sizeof(dinfo->id_reg)); if (size != -1) { dinfo->id_nregs = size / (sizeof(dinfo->id_reg[0])); for (i = 0; i < dinfo->id_nregs; i+= 3) { /* * Scale the absolute addresses back to iobus * relative offsets. This is to better simulate * macio */ dinfo->id_reg[i+1] -= iobus_off; resource_list_add(&dinfo->id_resources, SYS_RES_MEMORY, 0, dinfo->id_reg[i+1], dinfo->id_reg[i+1] + dinfo->id_reg[i+2], dinfo->id_reg[i+2]); } } } static int iobus_attach(device_t dev) { struct iobus_softc *sc; struct iobus_devinfo *dinfo; phandle_t root; phandle_t child; device_t cdev; char *name; u_int reg[2]; int size; sc = device_get_softc(dev); sc->sc_node = ofw_bus_get_node(dev); /* * Find the base addr/size of the iobus, and initialize the * resource manager */ size = OF_getprop(sc->sc_node, "reg", reg, sizeof(reg)); if (size == sizeof(reg)) { sc->sc_addr = reg[0]; sc->sc_size = reg[1]; } else { return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "IOBus Device Memory"; if (rman_init(&sc->sc_mem_rman) != 0) { device_printf(dev, "failed to init mem range resources\n"); return (ENXIO); } rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); /* * Iterate through the sub-devices */ root = sc->sc_node; for (child = OF_child(root); child != 0; child = OF_peer(child)) { OF_getprop_alloc(child, "name", (void **)&name); cdev = device_add_child(dev, NULL, -1); if (cdev != NULL) { dinfo = malloc(sizeof(*dinfo), M_IOBUS, M_WAITOK); memset(dinfo, 0, sizeof(*dinfo)); resource_list_init(&dinfo->id_resources); dinfo->id_node = child; dinfo->id_name = name; iobus_add_intr(child, dinfo); iobus_add_reg(child, dinfo, sc->sc_addr); device_set_ivars(cdev, dinfo); } else { OF_prop_free(name); } } return (bus_generic_attach(dev)); } static int iobus_print_child(device_t dev, device_t child) { struct iobus_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->id_resources; retval += bus_print_child_header(dev, child); retval += printf(" offset 0x%x", dinfo->id_reg[1]); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void iobus_probe_nomatch(device_t dev, device_t child) { } static int iobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct iobus_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == NULL) return (ENOENT); switch (which) { case IOBUS_IVAR_NODE: *result = dinfo->id_node; break; case IOBUS_IVAR_NAME: *result = (uintptr_t)dinfo->id_name; break; case IOBUS_IVAR_NREGS: *result = dinfo->id_nregs; break; case IOBUS_IVAR_REGS: *result = (uintptr_t)dinfo->id_reg; break; default: return (ENOENT); } return (0); } static int iobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct rman * iobus_get_rman(device_t bus, int type, u_int flags) { struct iobus_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&sc->sc_mem_rman); default: return (NULL); } } static struct resource * iobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); case SYS_RES_IRQ: return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } } static int iobus_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_adjust_resource(bus, child, r, start, end)); case SYS_RES_IRQ: return (bus_generic_adjust_resource(bus, child, r, start, end)); default: return (EINVAL); } } static int -iobus_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) +iobus_release_resource(device_t bus, device_t child, struct resource *res) { - switch (type) { + switch (rman_get_type(res)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: - return (bus_generic_rman_release_resource(bus, child, type, rid, - res)); + return (bus_generic_rman_release_resource(bus, child, res)); case SYS_RES_IRQ: - return (bus_generic_release_resource(bus, child, type, rid, res)); + return (bus_generic_release_resource(bus, child, res)); default: return (EINVAL); } } static int iobus_activate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_activate_resource(bus, child, res)); default: return (EINVAL); } } static int iobus_deactivate_resource(device_t bus, device_t child, struct resource *res) { switch (rman_get_type(res)) { case SYS_RES_IRQ: return (bus_generic_deactivate_resource(bus, child, res)); case SYS_RES_IOPORT: case SYS_RES_MEMORY: return (bus_generic_rman_deactivate_resource(bus, child, res)); default: return (EINVAL); } } static int iobus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct iobus_softc *sc; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sc = device_get_softc(bus); map->r_vaddr = pmap_mapdev_attr((vm_paddr_t)start + sc->sc_addr, (vm_size_t)length, args.memattr); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = &bs_le_tag; map->r_bushandle = (vm_offset_t)map->r_vaddr; map->r_size = length; return (0); } static int iobus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } diff --git a/sys/sys/bus.h b/sys/sys/bus.h index 76b1aa4ad2f5..e0ad2dc447fe 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -1,1082 +1,1080 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_BUS_H_ #define _SYS_BUS_H_ #include #include #include #include /** * @defgroup NEWBUS newbus - a generic framework for managing devices * @{ */ /** * @brief Interface information structure. */ struct u_businfo { int ub_version; /**< @brief interface version */ #define BUS_USER_VERSION 2 int ub_generation; /**< @brief generation count */ }; /** * @brief State of the device. */ typedef enum device_state { DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */ DS_ALIVE = 20, /**< @brief probe succeeded */ DS_ATTACHING = 25, /**< @brief currently attaching */ DS_ATTACHED = 30, /**< @brief attach method called */ } device_state_t; /** * @brief Device proprty types. * * Those are used by bus logic to encode requested properties, * e.g. in DT all properties are stored as BE and need to be converted * to host endianness. */ typedef enum device_property_type { DEVICE_PROP_ANY = 0, DEVICE_PROP_BUFFER = 1, DEVICE_PROP_UINT32 = 2, DEVICE_PROP_UINT64 = 3, DEVICE_PROP_HANDLE = 4, } device_property_type_t; /** * @brief Device information exported to userspace. * The strings are placed one after the other, separated by NUL characters. * Fields should be added after the last one and order maintained for compatibility */ #define BUS_USER_BUFFER (3*1024) struct u_device { uintptr_t dv_handle; uintptr_t dv_parent; uint32_t dv_devflags; /**< @brief API Flags for device */ uint16_t dv_flags; /**< @brief flags for dev state */ device_state_t dv_state; /**< @brief State of attachment */ char dv_fields[BUS_USER_BUFFER]; /**< @brief NUL terminated fields */ /* name (name of the device in tree) */ /* desc (driver description) */ /* drivername (Name of driver without unit number) */ /* pnpinfo (Plug and play information from bus) */ /* location (Location of device on parent */ /* NUL */ }; /* Flags exported via dv_flags. */ #define DF_ENABLED 0x01 /* device should be probed/attached */ #define DF_FIXEDCLASS 0x02 /* devclass specified at create time */ #define DF_WILDCARD 0x04 /* unit was originally wildcard */ #define DF_DESCMALLOCED 0x08 /* description was malloced */ #define DF_QUIET 0x10 /* don't print verbose attach message */ #define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */ #define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */ #define DF_SUSPENDED 0x100 /* Device is suspended. */ #define DF_QUIET_CHILDREN 0x200 /* Default to quiet for all my children */ #define DF_ATTACHED_ONCE 0x400 /* Has been attached at least once */ #define DF_NEEDNOMATCH 0x800 /* Has a pending NOMATCH event */ /** * @brief Device request structure used for ioctl's. * * Used for ioctl's on /dev/devctl2. All device ioctl's * must have parameter definitions which begin with dr_name. */ struct devreq_buffer { void *buffer; size_t length; }; struct devreq { char dr_name[128]; int dr_flags; /* request-specific flags */ union { struct devreq_buffer dru_buffer; void *dru_data; } dr_dru; #define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */ #define dr_data dr_dru.dru_data /* fixed-size buffer */ }; #define DEV_ATTACH _IOW('D', 1, struct devreq) #define DEV_DETACH _IOW('D', 2, struct devreq) #define DEV_ENABLE _IOW('D', 3, struct devreq) #define DEV_DISABLE _IOW('D', 4, struct devreq) #define DEV_SUSPEND _IOW('D', 5, struct devreq) #define DEV_RESUME _IOW('D', 6, struct devreq) #define DEV_SET_DRIVER _IOW('D', 7, struct devreq) #define DEV_CLEAR_DRIVER _IOW('D', 8, struct devreq) #define DEV_RESCAN _IOW('D', 9, struct devreq) #define DEV_DELETE _IOW('D', 10, struct devreq) #define DEV_FREEZE _IOW('D', 11, struct devreq) #define DEV_THAW _IOW('D', 12, struct devreq) #define DEV_RESET _IOW('D', 13, struct devreq) #define DEV_GET_PATH _IOWR('D', 14, struct devreq) /* Flags for DEV_DETACH and DEV_DISABLE. */ #define DEVF_FORCE_DETACH 0x0000001 /* Flags for DEV_SET_DRIVER. */ #define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_CLEAR_DRIVER. */ #define DEVF_CLEAR_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_DELETE. */ #define DEVF_FORCE_DELETE 0x0000001 /* Flags for DEV_RESET */ #define DEVF_RESET_DETACH 0x0000001 /* Detach drivers vs suspend device */ #ifdef _KERNEL #include #include #include #include /** * Device name parsers. Hook to allow device enumerators to map * scheme-specific names to a device. */ typedef void (*dev_lookup_fn)(void *arg, const char *name, device_t *result); EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn); /** * @brief A device driver. * * Provides an abstraction layer for driver dispatch. */ typedef struct kobj_class driver_t; /** * @brief A device class * * The devclass object has two main functions in the system. The first * is to manage the allocation of unit numbers for device instances * and the second is to hold the list of device drivers for a * particular bus type. Each devclass has a name and there cannot be * two devclasses with the same name. This ensures that unique unit * numbers are allocated to device instances. * * Drivers that support several different bus attachments (e.g. isa, * pci, pccard) should all use the same devclass to ensure that unit * numbers do not conflict. * * Each devclass may also have a parent devclass. This is used when * searching for device drivers to allow a form of inheritance. When * matching drivers with devices, first the driver list of the parent * device's devclass is searched. If no driver is found in that list, * the search continues in the parent devclass (if any). */ typedef struct devclass *devclass_t; /** * @brief A device method */ #define device_method_t kobj_method_t /** * @brief Driver interrupt filter return values * * If a driver provides an interrupt filter routine it must return an * integer consisting of oring together zero or more of the following * flags: * * FILTER_STRAY - this device did not trigger the interrupt * FILTER_HANDLED - the interrupt has been fully handled and can be EOId * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be * scheduled to execute * * If the driver does not provide a filter, then the interrupt code will * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it * is illegal to specify any other flag with FILTER_STRAY and that it is * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD * if FILTER_STRAY is not specified. */ #define FILTER_STRAY 0x01 #define FILTER_HANDLED 0x02 #define FILTER_SCHEDULE_THREAD 0x04 /** * @brief Driver interrupt service routines * * The filter routine is run in primary interrupt context and may not * block or use regular mutexes. It may only use spin mutexes for * synchronization. The filter may either completely handle the * interrupt or it may perform some of the work and defer more * expensive work to the regular interrupt handler. If a filter * routine is not registered by the driver, then the regular interrupt * handler is always used to handle interrupts from this device. * * The regular interrupt handler executes in its own thread context * and may use regular mutexes. However, it is prohibited from * sleeping on a sleep queue. */ typedef int driver_filter_t(void*); typedef void driver_intr_t(void*); /** * @brief Interrupt type bits. * * These flags may be passed by drivers to bus_setup_intr(9) when * registering a new interrupt handler. The field is overloaded to * specify both the interrupt's type and any special properties. * * The INTR_TYPE* bits will be passed to intr_priority(9) to determine * the scheduling priority of the handler's ithread. Historically, each * type was assigned a unique scheduling preference, but now only * INTR_TYPE_CLK receives a default priority higher than other * interrupts. See sys/priority.h. * * Buses may choose to modify or augment these flags as appropriate, * e.g. nexus may apply INTR_EXCL. */ enum intr_type { INTR_TYPE_TTY = 1, INTR_TYPE_BIO = 2, INTR_TYPE_NET = 4, INTR_TYPE_CAM = 8, INTR_TYPE_MISC = 16, INTR_TYPE_CLK = 32, INTR_TYPE_AV = 64, INTR_EXCL = 256, /* exclusive interrupt */ INTR_MPSAFE = 512, /* this interrupt is SMP safe */ INTR_ENTROPY = 1024, /* this interrupt provides entropy */ INTR_MD1 = 4096, /* flag reserved for MD use */ INTR_MD2 = 8192, /* flag reserved for MD use */ INTR_MD3 = 16384, /* flag reserved for MD use */ INTR_MD4 = 32768 /* flag reserved for MD use */ }; enum intr_trigger { INTR_TRIGGER_INVALID = -1, INTR_TRIGGER_CONFORM = 0, INTR_TRIGGER_EDGE = 1, INTR_TRIGGER_LEVEL = 2 }; enum intr_polarity { INTR_POLARITY_CONFORM = 0, INTR_POLARITY_HIGH = 1, INTR_POLARITY_LOW = 2 }; /** * CPU sets supported by bus_get_cpus(). Note that not all sets may be * supported for a given device. If a request is not supported by a * device (or its parents), then bus_get_cpus() will fail with EINVAL. */ enum cpu_sets { LOCAL_CPUS = 0, INTR_CPUS }; typedef int (*devop_t)(void); /** * @brief This structure is deprecated. * * Use the kobj(9) macro DEFINE_CLASS to * declare classes which implement device drivers. */ struct driver { KOBJ_CLASS_FIELDS; }; struct resource; /** * @brief A resource mapping. */ struct resource_map { bus_space_tag_t r_bustag; bus_space_handle_t r_bushandle; bus_size_t r_size; void *r_vaddr; }; /** * @brief Optional properties of a resource mapping request. */ struct resource_map_request { size_t size; rman_res_t offset; rman_res_t length; vm_memattr_t memattr; }; void resource_init_map_request_impl(struct resource_map_request *_args, size_t _sz); #define resource_init_map_request(rmr) \ resource_init_map_request_impl((rmr), sizeof(*(rmr))) int resource_validate_map_request(struct resource *r, struct resource_map_request *in, struct resource_map_request *out, rman_res_t *startp, rman_res_t *lengthp); /* * Definitions for drivers which need to keep simple lists of resources * for their child devices. */ /** * @brief An entry for a single resource in a resource list. */ struct resource_list_entry { STAILQ_ENTRY(resource_list_entry) link; int type; /**< @brief type argument to alloc_resource */ int rid; /**< @brief resource identifier */ int flags; /**< @brief resource flags */ struct resource *res; /**< @brief the real resource when allocated */ rman_res_t start; /**< @brief start of resource range */ rman_res_t end; /**< @brief end of resource range */ rman_res_t count; /**< @brief count within range */ }; STAILQ_HEAD(resource_list, resource_list_entry); #define RLE_RESERVED 0x0001 /* Reserved by the parent bus. */ #define RLE_ALLOCATED 0x0002 /* Reserved resource is allocated. */ #define RLE_PREFETCH 0x0004 /* Resource is a prefetch range. */ void resource_list_init(struct resource_list *rl); void resource_list_free(struct resource_list *rl); struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_busy(struct resource_list *rl, int type, int rid); int resource_list_reserved(struct resource_list *rl, int type, int rid); struct resource_list_entry* resource_list_find(struct resource_list *rl, int type, int rid); void resource_list_delete(struct resource_list *rl, int type, int rid); struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_release(struct resource_list *rl, device_t bus, device_t child, - int type, int rid, struct resource *res); + struct resource *res); int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type); struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid); void resource_list_purge(struct resource_list *rl); int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format); /* * The root bus, to which all top-level buses are attached. */ extern device_t root_bus; extern devclass_t root_devclass; void root_bus_configure(void); /* * Useful functions for implementing buses. */ struct _cpuset; int bus_generic_activate_resource(device_t dev, device_t child, struct resource *r); device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit); int bus_generic_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end); struct resource * bus_generic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart); int bus_generic_attach(device_t dev); int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu); int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_present(device_t dev, device_t child); int bus_generic_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); int bus_generic_deactivate_resource(device_t dev, device_t child, struct resource *r); int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); ssize_t bus_generic_get_property(device_t dev, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type); struct resource_list * bus_generic_get_resource_list(device_t, device_t); int bus_generic_map_resource(device_t dev, device_t child, struct resource *r, struct resource_map_request *args, struct resource_map *map); void bus_generic_new_pass(device_t dev); int bus_print_child_header(device_t dev, device_t child); int bus_print_child_domain(device_t dev, device_t child); int bus_print_child_footer(device_t dev, device_t child); int bus_generic_print_child(device_t dev, device_t child); int bus_generic_probe(device_t dev); int bus_generic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int bus_generic_release_resource(device_t bus, device_t child, - int type, int rid, struct resource *r); + struct resource *r); int bus_generic_resume(device_t dev); int bus_generic_resume_child(device_t dev, device_t child); int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); struct resource * bus_generic_rl_alloc_resource (device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); void bus_generic_rl_delete_resource (device_t, device_t, int, int); int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *, rman_res_t *); int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t, rman_res_t); -int bus_generic_rl_release_resource (device_t, device_t, int, int, - struct resource *); +int bus_generic_rl_release_resource (device_t, device_t, struct resource *); struct resource * bus_generic_rman_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_rman_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_generic_rman_release_resource(device_t dev, device_t child, - int type, int rid, struct resource *r); int bus_generic_rman_activate_resource(device_t dev, device_t child, struct resource *r); int bus_generic_rman_deactivate_resource(device_t dev, device_t child, struct resource *r); int bus_generic_shutdown(device_t dev); int bus_generic_suspend(device_t dev); int bus_generic_suspend_child(device_t dev, device_t child); int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_unmap_resource(device_t dev, device_t child, struct resource *r, struct resource_map *map); int bus_generic_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int bus_generic_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb); int bus_helper_reset_post(device_t dev, int flags); int bus_helper_reset_prepare(device_t dev, int flags); int bus_null_rescan(device_t dev); /* * Wrapper functions for the BUS_*_RESOURCE methods to make client code * a little simpler. */ struct resource_spec { int type; int rid; int flags; }; #define RESOURCE_SPEC_END {-1, 0, 0} int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res); void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res); int bus_adjust_resource(device_t child, struct resource *r, rman_res_t start, rman_res_t end); int bus_translate_resource(device_t child, int type, rman_res_t start, rman_res_t *newstart); struct resource *bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_activate_resource(device_t dev, struct resource *r); int bus_deactivate_resource(device_t dev, struct resource *r); int bus_map_resource(device_t dev, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource(device_t dev, struct resource *r, struct resource_map *map); int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_get_dma_tag(device_t dev); bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); -int bus_release_resource(device_t dev, int type, int rid, - struct resource *r); +int bus_release_resource(device_t dev, struct resource *r); int bus_free_resource(device_t dev, int type, struct resource *r); int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep); int bus_teardown_intr(device_t dev, struct resource *r, void *cookie); int bus_suspend_intr(device_t dev, struct resource *r); int bus_resume_intr(device_t dev, struct resource *r); int bus_bind_intr(device_t dev, struct resource *r, int cpu); int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) __printflike(4, 5); int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count); int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp); rman_res_t bus_get_resource_start(device_t dev, int type, int rid); rman_res_t bus_get_resource_count(device_t dev, int type, int rid); void bus_delete_resource(device_t dev, int type, int rid); int bus_child_present(device_t child); int bus_child_pnpinfo(device_t child, struct sbuf *sb); int bus_child_location(device_t child, struct sbuf *sb); void bus_enumerate_hinted_children(device_t bus); int bus_delayed_attach_children(device_t bus); static __inline struct resource * bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, 1, flags)); } static __inline struct resource * bus_alloc_resource_anywhere(device_t dev, int type, int *rid, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, count, flags)); } /* Compat shims for simpler bus resource API. */ int bus_adjust_resource_old(device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); int bus_activate_resource_old(device_t dev, int type, int rid, struct resource *r); int bus_deactivate_resource_old(device_t dev, int type, int rid, struct resource *r); int bus_map_resource_old(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource_old(device_t dev, int type, struct resource *r, struct resource_map *map); -int bus_release_resource_new(device_t dev, struct resource *r); +int bus_release_resource_old(device_t dev, int type, int rid, + struct resource *r); #define _BUS_API_MACRO(_1, _2, _3, _4, _5, NAME, ...) NAME #define bus_adjust_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, bus_adjust_resource_old, \ bus_adjust_resource)(__VA_ARGS__) #define bus_activate_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_activate_resource_old, \ INVALID, bus_activate_resource)(__VA_ARGS__) #define bus_deactivate_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_deactivate_resource_old, \ INVALID, bus_deactivate_resource)(__VA_ARGS__) #define bus_map_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, bus_map_resource_old, \ bus_map_resource)(__VA_ARGS__) #define bus_unmap_resource(...) \ _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_unmap_resource_old, \ bus_unmap_resource)(__VA_ARGS__) #define bus_release_resource(...) \ - _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_release_resource, \ - INVALID, bus_release_resource_new)(__VA_ARGS__) + _BUS_API_MACRO(__VA_ARGS__, INVALID, bus_release_resource_old, \ + INVALID, bus_release_resource)(__VA_ARGS__) /* * Access functions for device. */ device_t device_add_child(device_t dev, const char *name, int unit); device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit); void device_busy(device_t dev); int device_delete_child(device_t dev, device_t child); int device_delete_children(device_t dev); int device_attach(device_t dev); int device_detach(device_t dev); void device_disable(device_t dev); void device_enable(device_t dev); device_t device_find_child(device_t dev, const char *classname, int unit); const char *device_get_desc(device_t dev); devclass_t device_get_devclass(device_t dev); driver_t *device_get_driver(device_t dev); u_int32_t device_get_flags(device_t dev); device_t device_get_parent(device_t dev); int device_get_children(device_t dev, device_t **listp, int *countp); void *device_get_ivars(device_t dev); void device_set_ivars(device_t dev, void *ivars); const char *device_get_name(device_t dev); const char *device_get_nameunit(device_t dev); void *device_get_softc(device_t dev); device_state_t device_get_state(device_t dev); int device_get_unit(device_t dev); struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev); struct sysctl_oid *device_get_sysctl_tree(device_t dev); int device_has_quiet_children(device_t dev); int device_is_alive(device_t dev); /* did probe succeed? */ int device_is_attached(device_t dev); /* did attach succeed? */ int device_is_enabled(device_t dev); int device_is_suspended(device_t dev); int device_is_quiet(device_t dev); device_t device_lookup_by_name(const char *name); int device_print_prettyname(device_t dev); int device_printf(device_t dev, const char *, ...) __printflike(2, 3); int device_log(device_t dev, int pri, const char *, ...) __printflike(3, 4); int device_probe(device_t dev); int device_probe_and_attach(device_t dev); int device_probe_child(device_t bus, device_t dev); int device_quiesce(device_t dev); void device_quiet(device_t dev); void device_quiet_children(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_descf(device_t dev, const char* fmt, ...) __printflike(2, 3); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); int device_set_devclass_fixed(device_t dev, const char *classname); bool device_is_devclass_fixed(device_t dev); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); void device_free_softc(void *softc); void device_claim_softc(device_t dev); int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */ int device_shutdown(device_t dev); void device_unbusy(device_t dev); void device_verbose(device_t dev); ssize_t device_get_property(device_t dev, const char *prop, void *val, size_t sz, device_property_type_t type); bool device_has_property(device_t dev, const char *prop); /* * Access functions for devclass. */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp); devclass_t devclass_create(const char *classname); int devclass_delete_driver(devclass_t busclass, driver_t *driver); devclass_t devclass_find(const char *classname); const char *devclass_get_name(devclass_t dc); device_t devclass_get_device(devclass_t dc, int unit); void *devclass_get_softc(devclass_t dc, int unit); int devclass_get_devices(devclass_t dc, device_t **listp, int *countp); int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp); int devclass_get_count(devclass_t dc); int devclass_get_maxunit(devclass_t dc); int devclass_find_free_unit(devclass_t dc, int unit); void devclass_set_parent(devclass_t dc, devclass_t pdc); devclass_t devclass_get_parent(devclass_t dc); struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc); struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc); /* * Access functions for device resources. */ int resource_int_value(const char *name, int unit, const char *resname, int *result); int resource_long_value(const char *name, int unit, const char *resname, long *result); int resource_string_value(const char *name, int unit, const char *resname, const char **result); int resource_disabled(const char *name, int unit); int resource_find_match(int *anchor, const char **name, int *unit, const char *resname, const char *value); int resource_find_dev(int *anchor, const char *name, int *unit, const char *resname, const char *value); int resource_unset_value(const char *name, int unit, const char *resname); /* * Functions for maintaining and checking consistency of * bus information exported to userspace. */ int bus_data_generation_check(int generation); void bus_data_generation_update(void); /** * Some convenience defines for probe routines to return. These are just * suggested values, and there's nothing magical about them. * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no * possible other driver may exist (typically legacy drivers who don't follow * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the * suggested value that vendor supplied drivers use. This is for source or * binary drivers that are not yet integrated into the FreeBSD tree. Its use * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value * for drivers to use. It is intended that nearly all of the drivers in the * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that * have special requirements like when there are two drivers that support * overlapping series of hardware devices. In this case the one that supports * the older part of the line would return this value, while the one that * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC * is for drivers that wish to have a generic form and a specialized form, * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is * for those buses that implement a generic device placeholder for devices on * the bus that have no more specific driver for them (aka ugen). * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding * for a device node, but accepts only devices that its parent has told it * use this driver. */ #define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */ #define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */ #define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */ #define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */ #define BUS_PROBE_GENERIC (-100) /* generic driver for dev */ #define BUS_PROBE_HOOVER (-1000000) /* Driver for any dev on bus */ #define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */ /** * During boot, the device tree is scanned multiple times. Each scan, * or pass, drivers may be attached to devices. Each driver * attachment is assigned a pass number. Drivers may only probe and * attach to devices if their pass number is less than or equal to the * current system-wide pass number. The default pass is the last pass * and is used by most drivers. Drivers needed by the scheduler are * probed in earlier passes. */ #define BUS_PASS_ROOT 0 /* Used to attach root0. */ #define BUS_PASS_BUS 10 /* Buses and bridges. */ #define BUS_PASS_CPU 20 /* CPU devices. */ #define BUS_PASS_RESOURCE 30 /* Resource discovery. */ #define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */ #define BUS_PASS_TIMER 50 /* Timers and clocks. */ #define BUS_PASS_SCHEDULER 60 /* Start scheduler. */ #define BUS_PASS_SUPPORTDEV 100000 /* Drivers which support DEFAULT drivers. */ #define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */ #define BUS_PASS_ORDER_FIRST 0 #define BUS_PASS_ORDER_EARLY 2 #define BUS_PASS_ORDER_MIDDLE 5 #define BUS_PASS_ORDER_LATE 7 #define BUS_PASS_ORDER_LAST 9 #define BUS_LOCATOR_ACPI "ACPI" #define BUS_LOCATOR_FREEBSD "FreeBSD" #define BUS_LOCATOR_UEFI "UEFI" #define BUS_LOCATOR_OFW "OFW" extern int bus_current_pass; void bus_set_pass(int pass); /** * Routines to lock / unlock the newbus lock. * Must be taken out to interact with newbus. */ void bus_topo_lock(void); void bus_topo_unlock(void); struct mtx * bus_topo_mtx(void); void bus_topo_assert(void); /** * Shorthands for constructing method tables. */ #define DEVMETHOD KOBJMETHOD #define DEVMETHOD_END KOBJMETHOD_END /* * Some common device interfaces. */ #include "device_if.h" #include "bus_if.h" struct module; int driver_module_handler(struct module *, int, void *); /** * Module support for automatically adding drivers to buses. */ struct driver_module_data { int (*dmd_chainevh)(struct module *, int, void *); void *dmd_chainarg; const char *dmd_busname; kobj_class_t dmd_driver; devclass_t *dmd_devclass; int dmd_pass; }; #define EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, pass) \ \ static struct driver_module_data name##_##busname##_driver_mod = { \ evh, arg, \ #busname, \ (kobj_class_t) &driver, \ NULL, \ pass \ }; \ \ static moduledata_t name##_##busname##_mod = { \ #busname "/" #name, \ driver_module_handler, \ &name##_##busname##_driver_mod \ }; \ DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \ SI_SUB_DRIVERS, order) #define EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ SI_ORDER_MIDDLE, pass) #define DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, order) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, evh, arg, \ order, BUS_PASS_DEFAULT) #define DRIVER_MODULE(name, busname, driver, evh, arg) \ EARLY_DRIVER_MODULE(name, busname, driver, evh, arg, \ BUS_PASS_DEFAULT) /** * Generic ivar accessor generation macros for bus drivers */ #define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v; \ int e __diagused; \ e = BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ int e __diagused; \ e = BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ } struct device_location_cache; typedef struct device_location_cache device_location_cache_t; device_location_cache_t *dev_wired_cache_init(void); void dev_wired_cache_fini(device_location_cache_t *dcp); bool dev_wired_cache_match(device_location_cache_t *dcp, device_t dev, const char *at); /** * Shorthand macros, taking resource argument * Generated with sys/tools/bus_macro.sh */ #define bus_barrier(r, o, l, f) \ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f)) #define bus_poke_1(r, o, v) \ bus_space_poke_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_1(r, o, vp) \ bus_space_peek_1((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_1(r, o) \ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_1(r, o, d, c) \ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_1(r, o, d, c) \ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_1(r, o, v, c) \ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_1(r, o, v, c) \ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_1(r, o, v) \ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_1(r, o, d, c) \ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_1(r, o, d, c) \ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_1(r, o) \ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_1(r, o, d, c) \ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_1(r, o, d, c) \ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_1(r, o, v, c) \ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_1(r, o, v, c) \ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_1(r, o, v) \ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_1(r, o, d, c) \ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_1(r, o, d, c) \ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_2(r, o, v) \ bus_space_poke_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_2(r, o, vp) \ bus_space_peek_2((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_2(r, o, d, c) \ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_2(r, o, d, c) \ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_2(r, o, v, c) \ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_2(r, o, v, c) \ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_2(r, o, d, c) \ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_2(r, o, d, c) \ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_2(r, o) \ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_2(r, o, d, c) \ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_2(r, o, d, c) \ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_2(r, o, v, c) \ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_2(r, o, v, c) \ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_2(r, o, v) \ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_2(r, o, d, c) \ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_2(r, o, d, c) \ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_4(r, o, v) \ bus_space_poke_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_4(r, o, vp) \ bus_space_peek_4((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_4(r, o, d, c) \ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_4(r, o, d, c) \ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_4(r, o, v, c) \ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_4(r, o, v, c) \ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_4(r, o, d, c) \ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_4(r, o, d, c) \ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_4(r, o) \ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_4(r, o, d, c) \ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_4(r, o, d, c) \ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_4(r, o, v, c) \ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_4(r, o, v, c) \ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_4(r, o, v) \ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_4(r, o, d, c) \ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_4(r, o, d, c) \ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_8(r, o, v) \ bus_space_poke_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_8(r, o, vp) \ bus_space_peek_8((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_8(r, o) \ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_8(r, o, d, c) \ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_8(r, o, d, c) \ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_8(r, o, v, c) \ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_8(r, o, v, c) \ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_8(r, o, v) \ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_8(r, o, d, c) \ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_8(r, o, d, c) \ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_8(r, o) \ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_8(r, o, d, c) \ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_8(r, o, d, c) \ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_8(r, o, v, c) \ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_8(r, o, v, c) \ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_8(r, o, v) \ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_8(r, o, d, c) \ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_8(r, o, d, c) \ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #endif /* _KERNEL */ #endif /* !_SYS_BUS_H_ */ diff --git a/sys/x86/include/legacyvar.h b/sys/x86/include/legacyvar.h index 4f05315c3949..919650db6d71 100644 --- a/sys/x86/include/legacyvar.h +++ b/sys/x86/include/legacyvar.h @@ -1,75 +1,75 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _X86_LEGACYVAR_H_ #define _X86_LEGACYVAR_H_ enum legacy_device_ivars { LEGACY_IVAR_PCIDOMAIN, LEGACY_IVAR_PCIBUS, LEGACY_IVAR_PCISLOT, LEGACY_IVAR_PCIFUNC }; #define LEGACY_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(legacy, var, LEGACY, ivar, type) LEGACY_ACCESSOR(pcidomain, PCIDOMAIN, uint32_t) LEGACY_ACCESSOR(pcibus, PCIBUS, uint32_t) LEGACY_ACCESSOR(pcislot, PCISLOT, int) LEGACY_ACCESSOR(pcifunc, PCIFUNC, int) #undef LEGACY_ACCESSOR int legacy_pcib_maxslots(device_t dev); uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); struct resource *legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); -int legacy_pcib_release_resource(device_t dev, device_t child, int type, - int rid, struct resource *r); +int legacy_pcib_release_resource(device_t dev, device_t child, + struct resource *r); int legacy_pcib_activate_resource(device_t dev, device_t child, struct resource *r); int legacy_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r); int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); #endif /* !_X86_LEGACYVAR_H_ */ diff --git a/sys/x86/isa/isa.c b/sys/x86/isa/isa.c index 10da0fac0072..8e13f466a8e0 100644 --- a/sys/x86/isa/isa.c +++ b/sys/x86/isa/isa.c @@ -1,150 +1,149 @@ /*- * SPDX-License-Identifier: (BSD-2-Clause AND ISC) * * Copyright (c) 1998 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /*- * Modifications for Intel architecture by Garrett A. Wollman. * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include void isa_init(device_t dev) { } /* * This implementation simply passes the request up to the parent * bus, which in our case is the special i386 nexus, substituting any * configured values if the caller defaulted. We can get away with * this because there is no special mapping for ISA resources on an Intel * platform. When porting this code to another architecture, it may be * necessary to interpose a mapping layer here. */ struct resource * isa_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* * Consider adding a resource definition. */ int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; if (!passthrough && !isdefault) { rle = resource_list_find(rl, type, *rid); if (!rle) { if (*rid < 0) return 0; switch (type) { case SYS_RES_IRQ: if (*rid >= ISA_NIRQ) return 0; break; case SYS_RES_DRQ: if (*rid >= ISA_NDRQ) return 0; break; case SYS_RES_MEMORY: if (*rid >= ISA_NMEM) return 0; break; case SYS_RES_IOPORT: if (*rid >= ISA_NPORT) return 0; break; default: return 0; } resource_list_add(rl, type, *rid, start, end, count); } } return resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); } int -isa_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *r) +isa_release_resource(device_t bus, device_t child, struct resource *r) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; - return resource_list_release(rl, bus, child, type, rid, r); + return resource_list_release(rl, bus, child, r); } /* * On this platform, isa can also attach to the legacy bus. */ DRIVER_MODULE(isa, legacy, isa_driver, 0, 0); /* * Attach the ISA bus to the xenpv bus in order to get syscons. */ DRIVER_MODULE(isa, xenpv, isa_driver, 0, 0); diff --git a/sys/x86/pci/pci_bus.c b/sys/x86/pci/pci_bus.c index 67be4d61fa4a..4ac3e904f26a 100644 --- a/sys/x86/pci/pci_bus.c +++ b/sys/x86/pci/pci_bus.c @@ -1,775 +1,774 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_ELAN #include #endif #include #include #include #include "pcib_if.h" int legacy_pcib_maxslots(device_t dev) { return 31; } /* read configuration space register */ uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { return(pci_cfgregread(0, bus, slot, func, reg, bytes)); } /* write configuration space register */ void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { pci_cfgregwrite(0, bus, slot, func, reg, data, bytes); } /* route interrupt */ static int legacy_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { #ifdef __HAVE_PIR return (pci_pir_route_interrupt(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin)); #else /* No routing possible */ return (PCI_INVALID_IRQ); #endif } /* Pass MSI requests up to the nexus. */ int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus, hostb; int error, func, slot; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); slot = legacy_get_pcislot(pcib); func = legacy_get_pcifunc(pcib); if (slot == -1 || func == -1) return (0); hostb = pci_find_bsf(0, slot, func); KASSERT(hostb != NULL, ("%s: missing hostb for 0:%d:%d", __func__, slot, func)); pci_ht_map_msi(hostb, *addr); return (0); } static const char * legacy_pcib_is_host_bridge(int bus, int slot, int func, uint32_t id, uint8_t class, uint8_t subclass, uint8_t *busnum) { #ifdef __i386__ const char *s = NULL; static uint8_t pxb[4]; /* hack for 450nx */ *busnum = 0; switch (id) { case 0x12258086: s = "Intel 824?? host to PCI bridge"; /* XXX This is a guess */ /* *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x71208086: s = "Intel 82810 (i810 GMCH) Host To Hub bridge"; break; case 0x71228086: s = "Intel 82810-DC100 (i810-DC100 GMCH) Host To Hub bridge"; break; case 0x71248086: s = "Intel 82810E (i810E GMCH) Host To Hub bridge"; break; case 0x11308086: s = "Intel 82815 (i815 GMCH) Host To Hub bridge"; break; case 0x71808086: s = "Intel 82443LX (440 LX) host to PCI bridge"; break; case 0x71908086: s = "Intel 82443BX (440 BX) host to PCI bridge"; break; case 0x71928086: s = "Intel 82443BX host to PCI bridge (AGP disabled)"; break; case 0x71948086: s = "Intel 82443MX host to PCI bridge"; break; case 0x71a08086: s = "Intel 82443GX host to PCI bridge"; break; case 0x71a18086: s = "Intel 82443GX host to AGP bridge"; break; case 0x71a28086: s = "Intel 82443GX host to PCI bridge (AGP disabled)"; break; case 0x84c48086: s = "Intel 82454KX/GX (Orion) host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual buses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ pxb[0] = legacy_pcib_read_config(0, bus, slot, func, 0xd0, 1); /* BUSNO[0] */ pxb[1] = legacy_pcib_read_config(0, bus, slot, func, 0xd1, 1) + 1; /* SUBA[0]+1 */ pxb[2] = legacy_pcib_read_config(0, bus, slot, func, 0xd3, 1); /* BUSNO[1] */ pxb[3] = legacy_pcib_read_config(0, bus, slot, func, 0xd4, 1) + 1; /* SUBA[1]+1 */ return NULL; case 0x84cb8086: switch (slot) { case 0x12: s = "Intel 82454NX PXB#0, Bus#A"; *busnum = pxb[0]; break; case 0x13: s = "Intel 82454NX PXB#0, Bus#B"; *busnum = pxb[1]; break; case 0x14: s = "Intel 82454NX PXB#1, Bus#A"; *busnum = pxb[2]; break; case 0x15: s = "Intel 82454NX PXB#1, Bus#B"; *busnum = pxb[3]; break; } break; case 0x1A308086: s = "Intel 82845 Host to PCI bridge"; break; /* AMD -- vendor 0x1022 */ case 0x30001022: s = "AMD Elan SC520 host to PCI bridge"; #ifdef CPU_ELAN init_AMD_Elan_sc520(); #else printf( "*** WARNING: missing CPU_ELAN -- timekeeping may be wrong\n"); #endif break; case 0x70061022: s = "AMD-751 host to PCI bridge"; break; case 0x700e1022: s = "AMD-761 host to PCI bridge"; break; /* SiS -- vendor 0x1039 */ case 0x04961039: s = "SiS 85c496"; break; case 0x04061039: s = "SiS 85c501"; break; case 0x06011039: s = "SiS 85c601"; break; case 0x55911039: s = "SiS 5591 host to PCI bridge"; break; case 0x00011039: s = "SiS 5591 host to AGP bridge"; break; /* VLSI -- vendor 0x1004 */ case 0x00051004: s = "VLSI 82C592 Host to PCI bridge"; break; /* XXX Here is MVP3, I got the datasheet but NO M/B to test it */ /* totally. Please let me know if anything wrong. -F */ /* XXX need info on the MVP3 -- any takers? */ case 0x05981106: s = "VIA 82C598MVP (Apollo MVP3) host bridge"; break; /* AcerLabs -- vendor 0x10b9 */ /* Funny : The datasheet told me vendor id is "10b8",sub-vendor */ /* id is '10b9" but the register always shows "10b9". -Foxfair */ case 0x154110b9: s = "AcerLabs M1541 (Aladdin-V) PCI host bridge"; break; /* OPTi -- vendor 0x1045 */ case 0xc7011045: s = "OPTi 82C700 host to PCI bridge"; break; case 0xc8221045: s = "OPTi 82C822 host to PCI Bridge"; break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: s = "ServerWorks NB6536 2.0HE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00061166: /* FALLTHROUGH */ case 0x00081166: /* FALLTHROUGH */ case 0x02011166: /* FALLTHROUGH */ case 0x010f1014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00091166: s = "ServerWorks NB6635 3.0LE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00101166: s = "ServerWorks CIOB30 host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00111166: /* FALLTHROUGH */ case 0x03021014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks CMIC-HE host to PCI-X bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* XXX unknown chipset, but working */ case 0x00171166: /* FALLTHROUGH */ case 0x01011166: case 0x01101166: case 0x02251166: s = "ServerWorks host to PCI bridge(unknown chipset)"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: s = "Compaq/HP Model 6010 HotPlug PCI Bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0xc8, 1); break; /* Integrated Micro Solutions -- vendor 0x10e0 */ case 0x884910e0: s = "Integrated Micro Solutions VL Bridge"; break; default: if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; break; } return s; #else const char *s = NULL; *busnum = 0; if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; return s; #endif } /* * Scan the first pci bus for host-pci bridges and add pcib instances * to the nexus for each bridge. */ static void legacy_pcib_identify(driver_t *driver, device_t parent) { int bus, slot, func; uint8_t hdrtype; int found = 0; int pcifunchigh; int found824xx = 0; int found_orion = 0; device_t child; devclass_t pci_devclass; if (pci_cfgregopen() == 0) return; /* * Check to see if we haven't already had a PCI bus added * via some other means. If we have, bail since otherwise * we're going to end up duplicating it. */ if ((pci_devclass = devclass_find("pci")) && devclass_get_device(pci_devclass, 0)) return; bus = 0; retry: for (slot = 0; slot <= PCI_SLOTMAX; slot++) { func = 0; hdrtype = legacy_pcib_read_config(0, bus, slot, func, PCIR_HDRTYPE, 1); /* * When enumerating bus devices, the standard says that * one should check the header type and ignore the slots whose * header types that the software doesn't know about. We use * this to filter out devices. */ if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if ((hdrtype & PCIM_MFDEV) && (!found_orion || hdrtype != 0xff)) pcifunchigh = PCI_FUNCMAX; else pcifunchigh = 0; for (func = 0; func <= pcifunchigh; func++) { /* * Read the IDs and class from the device. */ uint32_t id; uint8_t class, subclass, busnum; const char *s; device_t *devs; int ndevs, i; id = legacy_pcib_read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == -1) continue; class = legacy_pcib_read_config(0, bus, slot, func, PCIR_CLASS, 1); subclass = legacy_pcib_read_config(0, bus, slot, func, PCIR_SUBCLASS, 1); s = legacy_pcib_is_host_bridge(bus, slot, func, id, class, subclass, &busnum); if (s == NULL) continue; /* * Check to see if the physical bus has already * been seen. Eg: hybrid 32 and 64 bit host * bridges to the same logical bus. */ if (device_get_children(parent, &devs, &ndevs) == 0) { for (i = 0; s != NULL && i < ndevs; i++) { if (strcmp(device_get_name(devs[i]), "pcib") != 0) continue; if (legacy_get_pcibus(devs[i]) == busnum) s = NULL; } free(devs, M_TEMP); } if (s == NULL) continue; /* * Add at priority 100 to make sure we * go after any motherboard resources */ child = BUS_ADD_CHILD(parent, 100, "pcib", busnum); device_set_desc(child, s); legacy_set_pcibus(child, busnum); legacy_set_pcislot(child, slot); legacy_set_pcifunc(child, func); found = 1; if (id == 0x12258086) found824xx = 1; if (id == 0x84c48086) found_orion = 1; } } if (found824xx && bus == 0) { bus++; goto retry; } /* * Make sure we add at least one bridge since some old * hardware doesn't actually have a host-pci bridge device. * Note that pci_cfgregopen() thinks we have PCI devices.. */ if (!found) { #ifndef NO_LEGACY_PCIB if (bootverbose) printf( "legacy_pcib_identify: no bridge found, adding pcib0 anyway\n"); child = BUS_ADD_CHILD(parent, 100, "pcib", 0); legacy_set_pcibus(child, 0); #endif } } static int legacy_pcib_probe(device_t dev) { if (pci_cfgregopen() == 0) return ENXIO; return -100; } static int legacy_pcib_attach(device_t dev) { #ifdef __HAVE_PIR device_t pir; int bus; bus = pcib_get_bus(dev); /* * Look for a PCI BIOS interrupt routing table as that will be * our method of routing interrupts if we have one. */ if (pci_pir_probe(bus, 0)) { pir = BUS_ADD_CHILD(device_get_parent(dev), 0, "pir", 0); if (pir != NULL) device_probe_and_attach(pir); } #endif device_add_child(dev, "pci", -1); return bus_generic_attach(dev); } int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return 0; case PCIB_IVAR_BUS: *result = legacy_get_pcibus(dev); return 0; } return ENOENT; } int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return EINVAL; case PCIB_IVAR_BUS: legacy_set_pcibus(dev, value); return 0; } return ENOENT; } /* * Helper routine for x86 Host-PCI bridge driver resource allocation. * This is used to adjust the start address of wildcard allocation * requests to avoid low addresses that are known to be problematic. * * If no memory preference is given, use upper 32MB slot most BIOSes * use for their memory window. This is typically only used on older * laptops that don't have PCI buses behind a PCI bridge, so assuming * > 32MB is likely OK. * * However, this can cause problems for other chipsets, so we make * this tunable by hw.pci.host_mem_start. */ SYSCTL_DECL(_hw_pci); static unsigned long host_mem_start = 0x80000000; SYSCTL_ULONG(_hw_pci, OID_AUTO, host_mem_start, CTLFLAG_RDTUN, &host_mem_start, 0, "Limit the host bridge memory to being above this address."); rman_res_t hostb_alloc_start(int type, rman_res_t start, rman_res_t end, rman_res_t count) { if (start + count - 1 != end) { if (type == SYS_RES_MEMORY && start < host_mem_start) start = host_mem_start; if (type == SYS_RES_IOPORT && start < 0x1000) start = 0x1000; } return (start); } struct resource * legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif start = hostb_alloc_start(type, start, end, count); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) int legacy_pcib_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); return (bus_generic_adjust_resource(dev, child, r, start, end)); } int -legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, - struct resource *r) +legacy_pcib_release_resource(device_t dev, device_t child, struct resource *r) { - if (type == PCI_RES_BUS) - return (pci_domain_release_bus(0, child, rid, r)); - return (bus_generic_release_resource(dev, child, type, rid, r)); + if (rman_get_type(r) == PCI_RES_BUS) + return (pci_domain_release_bus(0, child, r)); + return (bus_generic_release_resource(dev, child, r)); } int legacy_pcib_activate_resource(device_t dev, device_t child, struct resource *r) { if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_activate_bus(0, child, r)); return (bus_generic_activate_resource(dev, child, r)); } int legacy_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r) { if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_deactivate_bus(0, child, r)); return (bus_generic_deactivate_resource(dev, child, r)); } #endif static device_method_t legacy_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_identify, legacy_pcib_identify), DEVMETHOD(device_probe, legacy_pcib_probe), DEVMETHOD(device_attach, legacy_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), DEVMETHOD(bus_activate_resource, legacy_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, legacy_pcib_deactivate_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), #endif DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, legacy_pcib_driver, legacy_pcib_methods, 1); DRIVER_MODULE(pcib, legacy, legacy_pcib_driver, 0, 0); /* * Install placeholder to claim the resources owned by the * PCI bus interface. This could be used to extract the * config space registers in the extreme case where the PnP * ID is available and the PCI BIOS isn't, but for now we just * eat the PnP ID and do nothing else. * * we silence this probe, as it will generally confuse people. */ static struct isa_pnp_id pcibus_pnp_ids[] = { { 0x030ad041 /* PNP0A03 */, "PCI Bus" }, { 0x080ad041 /* PNP0A08 */, "PCIe Bus" }, { 0 } }; static int pcibus_pnp_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, pcibus_pnp_ids)) <= 0) device_quiet(dev); return(result); } static int pcibus_pnp_attach(device_t dev) { return(0); } static device_method_t pcibus_pnp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibus_pnp_probe), DEVMETHOD(device_attach, pcibus_pnp_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; DEFINE_CLASS_0(pcibus_pnp, pcibus_pnp_driver, pcibus_pnp_methods, 1); DRIVER_MODULE(pcibus_pnp, isa, pcibus_pnp_driver, 0, 0); #ifdef __HAVE_PIR /* * Provide a PCI-PCI bridge driver for PCI buses behind PCI-PCI bridges * that appear in the PCIBIOS Interrupt Routing Table to use the routing * table for interrupt routing when possible. */ static int pcibios_pcib_probe(device_t bus); static device_method_t pcibios_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibios_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), {0, 0} }; DEFINE_CLASS_1(pcib, pcibios_pcib_driver, pcibios_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(pcibios_pcib, pci, pcibios_pcib_driver, 0, 0); ISA_PNP_INFO(pcibus_pnp_ids); static int pcibios_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (!pci_pir_probe(bus, 1)) return (ENXIO); device_set_desc(dev, "PCIBIOS PCI-PCI bridge"); return (-2000); } #endif