diff --git a/sys/amd64/acpica/acpi_machdep.c b/sys/amd64/acpica/acpi_machdep.c index 081fe89537bb..67e71cb2de55 100644 --- a/sys/amd64/acpica/acpi_machdep.c +++ b/sys/amd64/acpica/acpi_machdep.c @@ -1,286 +1,286 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Mitsuru IWASAKI * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include int acpi_resume_beep; SYSCTL_INT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RWTUN, &acpi_resume_beep, 0, "Beep the PC speaker when resuming"); int acpi_reset_video; TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video); static int intr_model = ACPI_INTR_PIC; int acpi_machdep_init(device_t dev) { struct acpi_softc *sc; sc = device_get_softc(dev); acpi_apm_init(sc); acpi_install_wakeup_handler(sc); if (intr_model != ACPI_INTR_PIC) acpi_SetIntrModel(intr_model); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "reset_video", CTLFLAG_RW, &acpi_reset_video, 0, "Call the VESA reset BIOS vector on the resume path"); return (0); } void acpi_SetDefaultIntrModel(int model) { intr_model = model; } int acpi_machdep_quirks(int *quirks) { return (0); } /* * Map a table. First map the header to determine the table length and then map * the entire table. */ static void * map_table(vm_paddr_t pa, const char *sig) { ACPI_TABLE_HEADER *header; vm_size_t length; void *table; header = pmap_mapbios(pa, sizeof(ACPI_TABLE_HEADER)); if (strncmp(header->Signature, sig, ACPI_NAMESEG_SIZE) != 0) { pmap_unmapbios(header, sizeof(ACPI_TABLE_HEADER)); return (NULL); } length = header->Length; pmap_unmapbios(header, sizeof(ACPI_TABLE_HEADER)); table = pmap_mapbios(pa, length); if (ACPI_FAILURE(AcpiUtChecksum(table, length))) { if (bootverbose) printf("ACPI: Failed checksum for table %s\n", sig); #if (ACPI_CHECKSUM_ABORT) pmap_unmapbios(table, length); return (NULL); #endif } return (table); } /* * See if a given ACPI table is the requested table. Returns the * length of the table if it matches or zero on failure. */ static int probe_table(vm_paddr_t address, const char *sig) { ACPI_TABLE_HEADER *table; int ret; table = pmap_mapbios(address, sizeof(ACPI_TABLE_HEADER)); ret = strncmp(table->Signature, sig, ACPI_NAMESEG_SIZE) == 0; pmap_unmapbios(table, sizeof(ACPI_TABLE_HEADER)); return (ret); } /* * Try to map a table at a given physical address previously returned * by acpi_find_table(). */ void * acpi_map_table(vm_paddr_t pa, const char *sig) { return (map_table(pa, sig)); } /* Unmap a table previously mapped via acpi_map_table(). */ void acpi_unmap_table(void *table) { ACPI_TABLE_HEADER *header; header = (ACPI_TABLE_HEADER *)table; pmap_unmapbios(table, header->Length); } /* * Return the physical address of the requested table or zero if one * is not found. */ vm_paddr_t acpi_find_table(const char *sig) { ACPI_PHYSICAL_ADDRESS rsdp_ptr; ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_RSDT *rsdt; ACPI_TABLE_XSDT *xsdt; ACPI_TABLE_HEADER *table; vm_paddr_t addr; int i, count; if (resource_disabled("acpi", 0)) return (0); /* * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn * calls pmap_mapbios() to find the RSDP, we assume that we can use * pmap_mapbios() to map the RSDP. */ if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) return (0); rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP)); if (rsdp == NULL) { printf("ACPI: Failed to map RSDP\n"); return (0); } /* * For ACPI >= 2.0, use the XSDT if it is available. * Otherwise, use the RSDT. */ addr = 0; if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) { /* * AcpiOsGetRootPointer only verifies the checksum for * the version 1.0 portion of the RSDP. Version 2.0 has * an additional checksum that we verify first. */ if (AcpiUtChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) { printf("ACPI: RSDP failed extended checksum\n"); pmap_unmapbios(rsdp, sizeof(ACPI_TABLE_RSDP)); return (0); } xsdt = map_table(rsdp->XsdtPhysicalAddress, ACPI_SIG_XSDT); if (xsdt == NULL) { printf("ACPI: Failed to map XSDT\n"); pmap_unmapbios(rsdp, sizeof(ACPI_TABLE_RSDP)); return (0); } count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(UINT64); for (i = 0; i < count; i++) if (probe_table(xsdt->TableOffsetEntry[i], sig)) { addr = xsdt->TableOffsetEntry[i]; break; } acpi_unmap_table(xsdt); } else { rsdt = map_table(rsdp->RsdtPhysicalAddress, ACPI_SIG_RSDT); if (rsdt == NULL) { printf("ACPI: Failed to map RSDT\n"); pmap_unmapbios(rsdp, sizeof(ACPI_TABLE_RSDP)); return (0); } count = (rsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(UINT32); for (i = 0; i < count; i++) if (probe_table(rsdt->TableOffsetEntry[i], sig)) { addr = rsdt->TableOffsetEntry[i]; break; } acpi_unmap_table(rsdt); } pmap_unmapbios(rsdp, sizeof(ACPI_TABLE_RSDP)); if (addr == 0) return (0); /* * Verify that we can map the full table and that its checksum is * correct, etc. */ table = map_table(addr, sig); if (table == NULL) return (0); acpi_unmap_table(table); return (addr); } /* * ACPI nexus(4) driver. */ static int nexus_acpi_probe(device_t dev) { int error; error = acpi_identify(); if (error) return (error); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_acpi_attach(device_t dev) { nexus_init_resources(); - bus_generic_probe(dev); + bus_identify_children(dev); if (BUS_ADD_CHILD(dev, 10, "acpi", 0) == NULL) panic("failed to add acpi0 device"); return (bus_generic_attach(dev)); } static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), { 0, 0 } }; DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, 0, 0); diff --git a/sys/arm/arm/gic_acpi.c b/sys/arm/arm/gic_acpi.c index 6fd6f5271375..f30f51e1a1b7 100644 --- a/sys/arm/arm/gic_acpi.c +++ b/sys/arm/arm/gic_acpi.c @@ -1,360 +1,360 @@ /*- * Copyright (c) 2011,2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include struct gic_acpi_devinfo { struct resource_list rl; }; static device_identify_t gic_acpi_identify; static device_probe_t gic_acpi_probe; static device_attach_t gic_acpi_attach; static bus_get_resource_list_t gic_acpi_get_resource_list; static bool arm_gic_add_children(device_t); static device_method_t gic_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, gic_acpi_identify), DEVMETHOD(device_probe, gic_acpi_probe), DEVMETHOD(device_attach, gic_acpi_attach), /* Bus interface */ DEVMETHOD(bus_get_resource_list, gic_acpi_get_resource_list), DEVMETHOD_END, }; DEFINE_CLASS_1(gic, gic_acpi_driver, gic_acpi_methods, sizeof(struct arm_gic_softc), arm_gic_driver); EARLY_DRIVER_MODULE(gic, acpi, gic_acpi_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); struct madt_table_data { device_t parent; ACPI_MADT_GENERIC_DISTRIBUTOR *dist; ACPI_MADT_GENERIC_INTERRUPT *intr[GIC_MAXCPU]; }; static void madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { struct madt_table_data *madt_data; ACPI_MADT_GENERIC_INTERRUPT *intr; madt_data = (struct madt_table_data *)arg; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR: if (madt_data->dist != NULL) { if (bootverbose) device_printf(madt_data->parent, "gic: Already have a distributor table"); } else madt_data->dist = (ACPI_MADT_GENERIC_DISTRIBUTOR *)entry; break; case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; if (intr->CpuInterfaceNumber < GIC_MAXCPU) madt_data->intr[intr->CpuInterfaceNumber] = intr; break; } } static void gic_acpi_identify(driver_t *driver, device_t parent) { struct madt_table_data madt_data; ACPI_MADT_GENERIC_INTERRUPT *intr; ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; device_t dev; int i; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return; madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { device_printf(parent, "gic: Unable to map the MADT\n"); return; } bzero(&madt_data, sizeof(madt_data)); madt_data.parent = parent; madt_data.dist = NULL; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_handler, &madt_data); /* Check the version of the GIC we have */ switch (madt_data.dist->Version) { case ACPI_MADT_GIC_VERSION_NONE: case ACPI_MADT_GIC_VERSION_V1: case ACPI_MADT_GIC_VERSION_V2: break; default: goto out; } intr = NULL; for (i = 0; i < GIC_MAXCPU; i++) { if (madt_data.intr[i] != NULL) { if (intr == NULL) { intr = madt_data.intr[i]; } else if (intr->BaseAddress != madt_data.intr[i]->BaseAddress) { device_printf(parent, "gic: Not all CPU interfaces at the same address, this may fail\n"); } } } if (intr == NULL) { device_printf(parent, "gic: No CPU interfaces found\n"); goto out; } dev = BUS_ADD_CHILD(parent, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE, "gic", -1); if (dev == NULL) { device_printf(parent, "add gic child failed\n"); goto out; } BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 0, madt_data.dist->BaseAddress, 4 * 1024); BUS_SET_RESOURCE(parent, dev, SYS_RES_MEMORY, 1, intr->BaseAddress, 4 * 1024); acpi_set_private(dev, (void *)(uintptr_t)madt_data.dist->Version); out: acpi_unmap_table(madt); } static int gic_acpi_probe(device_t dev) { switch((uintptr_t)acpi_get_private(dev)) { case ACPI_MADT_GIC_VERSION_NONE: case ACPI_MADT_GIC_VERSION_V1: case ACPI_MADT_GIC_VERSION_V2: break; default: return (ENXIO); } device_set_desc(dev, "ARM Generic Interrupt Controller"); return (BUS_PROBE_NOWILDCARD); } static int gic_acpi_attach(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); intptr_t xref; int err; sc->gic_bus = GIC_BUS_ACPI; err = arm_gic_attach(dev); if (err != 0) return (err); xref = ACPI_INTR_XREF; /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); goto cleanup; } /* * Controller is root: */ if (intr_pic_claim_root(dev, xref, arm_gic_intr, sc, INTR_ROOT_IRQ) != 0) { device_printf(dev, "could not set PIC as a root\n"); intr_pic_deregister(dev, xref); goto cleanup; } #ifdef SMP if (intr_ipi_pic_register(dev, 0) != 0) { device_printf(dev, "could not register for IPIs\n"); goto cleanup; } #endif /* If we have children probe and attach them */ if (arm_gic_add_children(dev)) { - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } return (0); cleanup: arm_gic_detach(dev); return(ENXIO); } static struct resource_list * gic_acpi_get_resource_list(device_t bus, device_t child) { struct gic_acpi_devinfo *di; di = device_get_ivars(child); KASSERT(di != NULL, ("gic_acpi_get_resource_list: No devinfo")); return (&di->rl); } static void madt_gicv2m_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { struct arm_gic_softc *sc; ACPI_MADT_GENERIC_MSI_FRAME *msi; struct gic_acpi_devinfo *dinfo; device_t dev, cdev; if (entry->Type == ACPI_MADT_TYPE_GENERIC_MSI_FRAME) { sc = arg; dev = sc->gic_dev; msi = (ACPI_MADT_GENERIC_MSI_FRAME *)entry; device_printf(dev, "frame: %x %lx %x %u %u\n", msi->MsiFrameId, msi->BaseAddress, msi->Flags, msi->SpiCount, msi->SpiBase); cdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (cdev == NULL) return; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&dinfo->rl); resource_list_add(&dinfo->rl, SYS_RES_MEMORY, 0, msi->BaseAddress, msi->BaseAddress + PAGE_SIZE - 1, PAGE_SIZE); device_set_ivars(cdev, dinfo); } } static bool arm_gic_add_children(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; /* This should return a valid address as it did in gic_acpi_identify */ physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return (false); madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { device_printf(dev, "gic: Unable to map the MADT\n"); return (false); } acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_gicv2m_handler, sc); acpi_unmap_table(madt); return (true); } static int arm_gicv2m_acpi_probe(device_t dev) { if (gic_get_bus(dev) != GIC_BUS_ACPI) return (EINVAL); if (gic_get_hw_rev(dev) > 2) return (EINVAL); device_set_desc(dev, "ARM Generic Interrupt Controller MSI/MSIX"); return (BUS_PROBE_DEFAULT); } static int arm_gicv2m_acpi_attach(device_t dev) { struct arm_gicv2m_softc *sc; sc = device_get_softc(dev); sc->sc_xref = ACPI_MSI_XREF; return (arm_gicv2m_attach(dev)); } static device_method_t arm_gicv2m_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arm_gicv2m_acpi_probe), DEVMETHOD(device_attach, arm_gicv2m_acpi_attach), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(gicv2m, arm_gicv2m_acpi_driver, arm_gicv2m_acpi_methods, sizeof(struct arm_gicv2m_softc), arm_gicv2m_driver); EARLY_DRIVER_MODULE(gicv2m_acpi, gic, arm_gicv2m_acpi_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm/arm/gic_fdt.c b/sys/arm/arm/gic_fdt.c index 06a869eaae42..6c5b5be43175 100644 --- a/sys/arm/arm/gic_fdt.c +++ b/sys/arm/arm/gic_fdt.c @@ -1,364 +1,364 @@ /*- * Copyright (c) 2011,2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include struct arm_gic_devinfo { struct ofw_bus_devinfo obdinfo; struct resource_list rl; }; struct arm_gic_fdt_softc { struct arm_gic_softc base; pcell_t addr_cells; pcell_t size_cells; }; static device_probe_t gic_fdt_probe; static device_attach_t gic_fdt_attach; static ofw_bus_get_devinfo_t gic_ofw_get_devinfo; static bus_get_resource_list_t gic_fdt_get_resource_list; static bool arm_gic_add_children(device_t); static struct ofw_compat_data compat_data[] = { {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */ {"arm,gic-400", true}, {"arm,cortex-a15-gic", true}, {"arm,cortex-a9-gic", true}, {"arm,cortex-a7-gic", true}, {"arm,arm11mp-gic", true}, {"brcm,brahma-b15-gic", true}, {"qcom,msm-qgic2", true}, {NULL, false} }; static device_method_t gic_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gic_fdt_probe), DEVMETHOD(device_attach, gic_fdt_attach), /* Bus interface */ DEVMETHOD(bus_get_resource_list,gic_fdt_get_resource_list), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, gic_ofw_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END, }; DEFINE_CLASS_1(gic, gic_fdt_driver, gic_fdt_methods, sizeof(struct arm_gic_fdt_softc), arm_gic_driver); EARLY_DRIVER_MODULE(gic, simplebus, gic_fdt_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); EARLY_DRIVER_MODULE(gic, ofwbus, gic_fdt_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); static int gic_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ARM Generic Interrupt Controller"); return (BUS_PROBE_DEFAULT); } static int gic_fdt_attach(device_t dev) { struct arm_gic_fdt_softc *sc = device_get_softc(dev); phandle_t pxref; intptr_t xref; int err; sc->base.gic_bus = GIC_BUS_FDT; err = arm_gic_attach(dev); if (err != 0) return (err); xref = OF_xref_from_node(ofw_bus_get_node(dev)); /* * Now, when everything is initialized, it's right time to * register interrupt controller to interrupt framefork. */ if (intr_pic_register(dev, xref) == NULL) { device_printf(dev, "could not register PIC\n"); goto cleanup; } /* * Controller is root if: * - doesn't have interrupt parent * - his interrupt parent is this controller */ pxref = ofw_bus_find_iparent(ofw_bus_get_node(dev)); if (pxref == 0 || xref == pxref) { if (intr_pic_claim_root(dev, xref, arm_gic_intr, sc, INTR_ROOT_IRQ) != 0) { device_printf(dev, "could not set PIC as a root\n"); intr_pic_deregister(dev, xref); goto cleanup; } #ifdef SMP if (intr_ipi_pic_register(dev, 0) != 0) { device_printf(dev, "could not register for IPIs\n"); goto cleanup; } #endif } else { if (sc->base.gic_res[2] == NULL) { device_printf(dev, "not root PIC must have defined interrupt\n"); intr_pic_deregister(dev, xref); goto cleanup; } if (bus_setup_intr(dev, sc->base.gic_res[2], INTR_TYPE_CLK, arm_gic_intr, NULL, sc, &sc->base.gic_intrhand)) { device_printf(dev, "could not setup irq handler\n"); intr_pic_deregister(dev, xref); goto cleanup; } } OF_device_register_xref(xref, dev); /* If we have children probe and attach them */ if (arm_gic_add_children(dev)) { - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } return (0); cleanup: arm_gic_detach(dev); return(ENXIO); } static struct resource_list * gic_fdt_get_resource_list(device_t bus, device_t child) { struct arm_gic_devinfo *di; di = device_get_ivars(child); KASSERT(di != NULL, ("gic_fdt_get_resource_list: No devinfo")); return (&di->rl); } static int arm_gic_fill_ranges(phandle_t node, struct arm_gic_fdt_softc *sc) { pcell_t host_cells; cell_t *base_ranges; ssize_t nbase_ranges; int i, j, k; host_cells = 1; OF_getencprop(OF_parent(node), "#address-cells", &host_cells, sizeof(host_cells)); sc->addr_cells = 2; OF_getencprop(node, "#address-cells", &sc->addr_cells, sizeof(sc->addr_cells)); sc->size_cells = 2; OF_getencprop(node, "#size-cells", &sc->size_cells, sizeof(sc->size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->base.nranges = nbase_ranges / sizeof(cell_t) / (sc->addr_cells + host_cells + sc->size_cells); if (sc->base.nranges == 0) return (0); sc->base.ranges = malloc(sc->base.nranges * sizeof(sc->base.ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->base.nranges; i++) { sc->base.ranges[i].bus = 0; for (k = 0; k < sc->addr_cells; k++) { sc->base.ranges[i].bus <<= 32; sc->base.ranges[i].bus |= base_ranges[j++]; } sc->base.ranges[i].host = 0; for (k = 0; k < host_cells; k++) { sc->base.ranges[i].host <<= 32; sc->base.ranges[i].host |= base_ranges[j++]; } sc->base.ranges[i].size = 0; for (k = 0; k < sc->size_cells; k++) { sc->base.ranges[i].size <<= 32; sc->base.ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->base.nranges); } static bool arm_gic_add_children(device_t dev) { struct arm_gic_fdt_softc *sc; struct arm_gic_devinfo *dinfo; phandle_t child, node; device_t cdev; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); /* If we have no children don't probe for them */ child = OF_child(node); if (child == 0) return (false); if (arm_gic_fill_ranges(node, sc) < 0) { device_printf(dev, "Have a child, but no ranges\n"); return (false); } for (; child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } resource_list_init(&dinfo->rl); ofw_bus_reg_to_rl(dev, child, sc->addr_cells, sc->size_cells, &dinfo->rl); cdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obdinfo.obd_name); resource_list_free(&dinfo->rl); ofw_bus_gen_destroy_devinfo(&dinfo->obdinfo); free(dinfo, M_DEVBUF); continue; } device_set_ivars(cdev, dinfo); } return (true); } static const struct ofw_bus_devinfo * gic_ofw_get_devinfo(device_t bus __unused, device_t child) { struct arm_gic_devinfo *di; di = device_get_ivars(child); return (&di->obdinfo); } static struct ofw_compat_data gicv2m_compat_data[] = { {"arm,gic-v2m-frame", true}, {NULL, false} }; static int arm_gicv2m_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, gicv2m_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "ARM Generic Interrupt Controller MSI/MSIX"); return (BUS_PROBE_DEFAULT); } static int arm_gicv2m_fdt_attach(device_t dev) { struct arm_gicv2m_softc *sc; sc = device_get_softc(dev); sc->sc_xref = OF_xref_from_node(ofw_bus_get_node(dev)); return (arm_gicv2m_attach(dev)); } static device_method_t arm_gicv2m_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arm_gicv2m_fdt_probe), DEVMETHOD(device_attach, arm_gicv2m_fdt_attach), /* End */ DEVMETHOD_END }; DEFINE_CLASS_1(gicv2m, arm_gicv2m_fdt_driver, arm_gicv2m_fdt_methods, sizeof(struct arm_gicv2m_softc), arm_gicv2m_driver); EARLY_DRIVER_MODULE(gicv2m, gic, arm_gicv2m_fdt_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c index 8274a792839d..06f599652698 100644 --- a/sys/arm/arm/nexus.c +++ b/sys/arm/arm/nexus.c @@ -1,431 +1,431 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static device_probe_t nexus_probe; static device_attach_t nexus_attach; static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_deactivate_resource_t nexus_deactivate_resource; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; static bus_get_dma_tag_t nexus_get_dma_tag; #ifdef FDT static ofw_bus_map_intr_t nexus_ofw_map_intr; #endif /* * Normally NULL (which results in defaults which are handled in * busdma_machdep), platform init code can use nexus_set_dma_tag() to set this * to a tag that will be inherited by all busses and devices on the platform. */ static bus_dma_tag_t nexus_dma_tag; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rman_alloc_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD(bus_get_dma_tag, nexus_get_dma_tag), #ifdef FDT /* ofw_bus interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif DEVMETHOD_END }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus, root, nexus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_probe mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); /* First, add ofwbus0. */ device_add_child(dev, "ofwbus", 0); /* * Next, deal with the children we know about already. */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { #ifdef FDT return (fdtbus_bs_tag); #else return ((void *)1); #endif } static bus_dma_tag_t nexus_get_dma_tag(device_t dev, device_t child) { return (nexus_dma_tag); } void nexus_set_dma_tag(bus_dma_tag_t tag) { nexus_dma_tag = tag; } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { int ret = ENODEV; device_printf(dev, "bus_config_intr is obsolete and not supported!\n"); ret = EOPNOTSUPP; return (ret); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; return (intr_setup_irq(child, res, filt, intr, arg, flags, cookiep)); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif static int nexus_activate_resource(device_t bus, device_t child, struct resource *r) { int err; switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_activate_resource(bus, child, r)); case SYS_RES_IRQ: err = rman_activate_resource(r); if (err != 0) return (err); err = intr_activate_irq(child, r); if (err != 0) { rman_deactivate_resource(r); return (err); } return (0); default: return (EINVAL); } } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); #ifdef FDT error = bus_space_map(fdtbus_bs_tag, start, length, 0, &map->r_bushandle); if (error) return (error); map->r_bustag = fdtbus_bs_tag; map->r_vaddr = (void *)map->r_bushandle; #else map->r_vaddr = pmap_mapdev(start, length); if (map->r_vaddr == NULL) return (ENOMEM); map->r_bustag = (void *)1; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; #endif map->r_size = length; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: #ifdef FDT bus_space_unmap(map->r_bustag, map->r_bushandle, map->r_size); #else pmap_unmapdev(map->r_vaddr, map->r_size); #endif return (0); default: return (EINVAL); } } static int nexus_deactivate_resource(device_t bus, device_t child, struct resource *r) { int error; switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (bus_generic_rman_deactivate_resource(bus, child, r)); case SYS_RES_IRQ: error = rman_deactivate_resource(r); if (error) return (error); intr_deactivate_irq(child, r); return (0); default: return (EINVAL); } } #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif /* FDT */ diff --git a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c index b9f788b2af9b..5ff7b5069c6b 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c @@ -1,866 +1,866 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #include "opt_mmccam.h" #include "bcm2835_dma.h" #include #ifdef NOTYET #include #endif #include #define BCM2835_DEFAULT_SDHCI_FREQ 50 #define BCM2838_DEFAULT_SDHCI_FREQ 100 #define BCM_SDHCI_BUFFER_SIZE 512 /* * NUM_DMA_SEGS is the number of DMA segments we want to accommodate on average. * We add in a number of segments based on how much we may need to spill into * another segment due to crossing page boundaries. e.g. up to PAGE_SIZE, an * extra page is needed as we can cross a page boundary exactly once. */ #define NUM_DMA_SEGS 1 #define NUM_DMA_SPILL_SEGS \ ((((NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) - 1) / PAGE_SIZE) + 1) #define ALLOCATED_DMA_SEGS (NUM_DMA_SEGS + NUM_DMA_SPILL_SEGS) #define BCM_DMA_MAXSIZE (NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) #define BCM_SDHCI_SLOT_LEFT(slot) \ ((slot)->curcmd->data->len - (slot)->offset) #define BCM_SDHCI_SEGSZ_LEFT(slot) \ min(BCM_DMA_MAXSIZE, \ rounddown(BCM_SDHCI_SLOT_LEFT(slot), BCM_SDHCI_BUFFER_SIZE)) #define DATA_PENDING_MASK (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL) #define DATA_XFER_MASK (DATA_PENDING_MASK | SDHCI_INT_DATA_END) #ifdef DEBUG static int bcm2835_sdhci_debug = 0; TUNABLE_INT("hw.bcm2835.sdhci.debug", &bcm2835_sdhci_debug); SYSCTL_INT(_hw_sdhci, OID_AUTO, bcm2835_sdhci_debug, CTLFLAG_RWTUN, &bcm2835_sdhci_debug, 0, "bcm2835 SDHCI debug level"); #define dprintf(fmt, args...) \ do { \ if (bcm2835_sdhci_debug) \ printf("%s: " fmt, __func__, ##args); \ } while (0) #else #define dprintf(fmt, args...) #endif static int bcm2835_sdhci_hs = 1; static int bcm2835_sdhci_pio_mode = 0; struct bcm_mmc_conf { int clock_id; int clock_src; int default_freq; int quirks; int emmc_dreq; }; struct bcm_mmc_conf bcm2835_sdhci_conf = { .clock_id = BCM2835_MBOX_CLOCK_ID_EMMC, .clock_src = -1, .default_freq = BCM2835_DEFAULT_SDHCI_FREQ, .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DONT_SET_HISPD_BIT | SDHCI_QUIRK_MISSING_CAPS, .emmc_dreq = BCM_DMA_DREQ_EMMC, }; struct bcm_mmc_conf bcm2838_emmc2_conf = { .clock_id = BCM2838_MBOX_CLOCK_ID_EMMC2, .clock_src = -1, .default_freq = BCM2838_DEFAULT_SDHCI_FREQ, .quirks = 0, .emmc_dreq = BCM_DMA_DREQ_NONE, }; static struct ofw_compat_data compat_data[] = { {"broadcom,bcm2835-sdhci", (uintptr_t)&bcm2835_sdhci_conf}, {"brcm,bcm2835-sdhci", (uintptr_t)&bcm2835_sdhci_conf}, {"brcm,bcm2835-mmc", (uintptr_t)&bcm2835_sdhci_conf}, {"brcm,bcm2711-emmc2", (uintptr_t)&bcm2838_emmc2_conf}, {"brcm,bcm2838-emmc2", (uintptr_t)&bcm2838_emmc2_conf}, {NULL, 0} }; TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs); TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode); struct bcm_sdhci_softc { device_t sc_dev; struct resource * sc_mem_res; struct resource * sc_irq_res; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; void * sc_intrhand; struct mmc_request * sc_req; struct sdhci_slot sc_slot; struct mmc_helper sc_mmc_helper; int sc_dma_ch; bus_dma_tag_t sc_dma_tag; bus_dmamap_t sc_dma_map; vm_paddr_t sc_sdhci_buffer_phys; bus_addr_t dmamap_seg_addrs[ALLOCATED_DMA_SEGS]; bus_size_t dmamap_seg_sizes[ALLOCATED_DMA_SEGS]; int dmamap_seg_count; int dmamap_seg_index; int dmamap_status; uint32_t blksz_and_count; uint32_t cmd_and_mode; bool need_update_blk; #ifdef NOTYET device_t clkman; #endif struct bcm_mmc_conf * conf; }; static int bcm_sdhci_probe(device_t); static int bcm_sdhci_attach(device_t); static int bcm_sdhci_detach(device_t); static void bcm_sdhci_intr(void *); static int bcm_sdhci_get_ro(device_t, device_t); static void bcm_sdhci_dma_intr(int ch, void *arg); static void bcm_sdhci_start_dma(struct sdhci_slot *slot); static void bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { struct bcm_sdhci_softc *sc = arg; int i; /* Sanity check: we can only ever have one mapping at a time. */ KASSERT(sc->dmamap_seg_count == 0, ("leaked DMA segment")); sc->dmamap_status = err; sc->dmamap_seg_count = nseg; /* Note nseg is guaranteed to be zero if err is non-zero. */ for (i = 0; i < nseg; i++) { sc->dmamap_seg_addrs[i] = segs[i].ds_addr; sc->dmamap_seg_sizes[i] = segs[i].ds_len; } } static int bcm_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Broadcom 2708 SDHCI controller"); return (BUS_PROBE_DEFAULT); } static int bcm_sdhci_attach(device_t dev) { struct bcm_sdhci_softc *sc = device_get_softc(dev); int rid, err; phandle_t node; pcell_t cell; u_int default_freq; sc->sc_dev = dev; sc->sc_req = NULL; sc->conf = (struct bcm_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (sc->conf == 0) return (ENXIO); err = bcm2835_mbox_set_power_state(BCM2835_MBOX_POWER_ID_EMMC, TRUE); if (err != 0) { if (bootverbose) device_printf(dev, "Unable to enable the power\n"); return (err); } default_freq = 0; err = bcm2835_mbox_get_clock_rate(sc->conf->clock_id, &default_freq); if (err == 0) { /* Convert to MHz */ default_freq /= 1000000; } if (default_freq == 0) { node = ofw_bus_get_node(sc->sc_dev); if ((OF_getencprop(node, "clock-frequency", &cell, sizeof(cell))) > 0) default_freq = cell / 1000000; } if (default_freq == 0) default_freq = sc->conf->default_freq; if (bootverbose) device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq); #ifdef NOTYET if (sc->conf->clock_src > 0) { uint32_t f; sc->clkman = devclass_get_device( devclass_find("bcm2835_clkman"), 0); if (sc->clkman == NULL) { device_printf(dev, "cannot find Clock Manager\n"); return (ENXIO); } f = bcm2835_clkman_set_frequency(sc->clkman, sc->conf->clock_src, default_freq); if (f == 0) return (EINVAL); if (bootverbose) device_printf(dev, "Clock source frequency: %dMHz\n", f); } #endif rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } if (!bcm2835_sdhci_pio_mode) sc->sc_slot.opt = SDHCI_PLATFORM_TRANSFER; sc->sc_slot.caps = SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_180; if (bcm2835_sdhci_hs) sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD; sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT); sc->sc_slot.quirks = sc->conf->quirks; sdhci_init_slot(dev, &sc->sc_slot, 0); mmc_fdt_parse(dev, 0, &sc->sc_mmc_helper, &sc->sc_slot.host); sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_ANY); if (sc->sc_dma_ch == BCM_DMA_CH_INVALID) goto fail; err = bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc); if (err != 0) { device_printf(dev, "cannot setup dma interrupt handler\n"); err = ENXIO; goto fail; } /* Allocate bus_dma resources. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, bcm283x_dmabus_peripheral_lowaddr(), BUS_SPACE_MAXADDR, NULL, NULL, BCM_DMA_MAXSIZE, ALLOCATED_DMA_SEGS, BCM_SDHCI_BUFFER_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_dma_tag); if (err) { device_printf(dev, "failed allocate DMA tag"); goto fail; } err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map); if (err) { device_printf(dev, "bus_dmamap_create failed\n"); goto fail; } /* FIXME: Fix along with other BUS_SPACE_PHYSADDR instances */ sc->sc_sdhci_buffer_phys = rman_get_start(sc->sc_mem_res) + SDHCI_BUFFER; - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->sc_slot); /* Seed our copies. */ sc->blksz_and_count = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_BLOCK_SIZE); sc->cmd_and_mode = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_TRANSFER_MODE); return (0); fail: if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (err); } static int bcm_sdhci_detach(device_t dev) { return (EBUSY); } static void bcm_sdhci_intr(void *arg) { struct bcm_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->sc_slot); } static int bcm_sdhci_update_ios(device_t bus, device_t child) { struct bcm_sdhci_softc *sc; struct mmc_ios *ios; int rv; sc = device_get_softc(bus); ios = &sc->sc_slot.host.ios; if (ios->power_mode == power_up) { if (sc->sc_mmc_helper.vmmc_supply) regulator_enable(sc->sc_mmc_helper.vmmc_supply); if (sc->sc_mmc_helper.vqmmc_supply) regulator_enable(sc->sc_mmc_helper.vqmmc_supply); } rv = sdhci_generic_update_ios(bus, child); if (rv != 0) return (rv); if (ios->power_mode == power_off) { if (sc->sc_mmc_helper.vmmc_supply) regulator_disable(sc->sc_mmc_helper.vmmc_supply); if (sc->sc_mmc_helper.vqmmc_supply) regulator_disable(sc->sc_mmc_helper.vqmmc_supply); } return (0); } static int bcm_sdhci_get_ro(device_t bus, device_t child) { return (0); } static inline uint32_t RD4(struct bcm_sdhci_softc *sc, bus_size_t off) { uint32_t val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return val; } static inline void WR4(struct bcm_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); /* * The Arasan HC has a bug where it may lose the content of * consecutive writes to registers that are within two SD-card * clock cycles of each other (a clock domain crossing problem). */ if (sc->sc_slot.clock > 0) DELAY(((2 * 1000000) / sc->sc_slot.clock) + 1); } static uint8_t bcm_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val = RD4(sc, off & ~3); return ((val >> (off & 3)*8) & 0xff); } static uint16_t bcm_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* * Standard 32-bit handling of command and transfer mode, as * well as block size and count. */ if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) && sc->need_update_blk) val32 = sc->blksz_and_count; else if (off == SDHCI_TRANSFER_MODE || off == SDHCI_COMMAND_FLAGS) val32 = sc->cmd_and_mode; else val32 = RD4(sc, off & ~3); return ((val32 >> (off & 3)*8) & 0xffff); } static uint32_t bcm_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhci_softc *sc = device_get_softc(dev); return RD4(sc, off); } static void bcm_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhci_softc *sc = device_get_softc(dev); bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count); } static void bcm_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3)*8); val32 |= (val << (off & 3)*8); WR4(sc, off & ~3, val32); } static void bcm_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* * If we have a queued up 16bit value for blk size or count, use and * update the saved value rather than doing any real register access. * If we did not touch either since the last write, then read from * register as at least block count can change. * Similarly, if we are about to issue a command, always use the saved * value for transfer mode as we can never write that without issuing * a command. */ if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) && sc->need_update_blk) val32 = sc->blksz_and_count; else if (off == SDHCI_COMMAND_FLAGS) val32 = sc->cmd_and_mode; else val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3)*8); val32 |= (val << (off & 3)*8); if (off == SDHCI_TRANSFER_MODE) sc->cmd_and_mode = val32; else if (off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) { sc->blksz_and_count = val32; sc->need_update_blk = true; } else { if (off == SDHCI_COMMAND_FLAGS) { /* If we saved blk writes, do them now before cmd. */ if (sc->need_update_blk) { WR4(sc, SDHCI_BLOCK_SIZE, sc->blksz_and_count); sc->need_update_blk = false; } /* Always save cmd and mode registers. */ sc->cmd_and_mode = val32; } WR4(sc, off & ~3, val32); } } static void bcm_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct bcm_sdhci_softc *sc = device_get_softc(dev); WR4(sc, off, val); } static void bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhci_softc *sc = device_get_softc(dev); bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count); } static void bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc) { struct sdhci_slot *slot; vm_paddr_t pdst, psrc; int err __diagused, idx, len, sync_op, width; slot = &sc->sc_slot; mtx_assert(&slot->mtx, MA_OWNED); idx = sc->dmamap_seg_index++; len = sc->dmamap_seg_sizes[idx]; slot->offset += len; width = (len & 0xf ? BCM_DMA_32BIT : BCM_DMA_128BIT); if (slot->curcmd->data->flags & MMC_DATA_READ) { /* * Peripherals on the AXI bus do not need DREQ pacing for reads * from the ARM core, so we can safely set this to NONE. */ bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE, BCM_DMA_SAME_ADDR, BCM_DMA_32BIT); bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_NONE, BCM_DMA_INC_ADDR, width); psrc = sc->sc_sdhci_buffer_phys; pdst = sc->dmamap_seg_addrs[idx]; sync_op = BUS_DMASYNC_PREREAD; } else { /* * The ordering here is important, because the last write to * dst/src in the dma control block writes the real dreq value. */ bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE, BCM_DMA_INC_ADDR, width); bcm_dma_setup_dst(sc->sc_dma_ch, sc->conf->emmc_dreq, BCM_DMA_SAME_ADDR, BCM_DMA_32BIT); psrc = sc->dmamap_seg_addrs[idx]; pdst = sc->sc_sdhci_buffer_phys; sync_op = BUS_DMASYNC_PREWRITE; } /* * When starting a new DMA operation do the busdma sync operation, and * disable SDCHI data interrrupts because we'll be driven by DMA * interrupts (or SDHCI error interrupts) until the IO is done. */ if (idx == 0) { bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op); slot->intmask &= ~DATA_XFER_MASK; bcm_sdhci_write_4(sc->sc_dev, slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } /* * Start the DMA transfer. Only programming errors (like failing to * allocate a channel) cause a non-zero return from bcm_dma_start(). */ err = bcm_dma_start(sc->sc_dma_ch, psrc, pdst, len); KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start")); } static void bcm_sdhci_dma_exit(struct bcm_sdhci_softc *sc) { struct sdhci_slot *slot = &sc->sc_slot; mtx_assert(&slot->mtx, MA_OWNED); /* Re-enable interrupts */ slot->intmask |= DATA_XFER_MASK; bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE, slot->intmask); } static void bcm_sdhci_dma_unload(struct bcm_sdhci_softc *sc) { struct sdhci_slot *slot = &sc->sc_slot; if (sc->dmamap_seg_count == 0) return; if ((slot->curcmd->data->flags & MMC_DATA_READ) != 0) bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, BUS_DMASYNC_POSTREAD); else bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map); sc->dmamap_seg_count = 0; sc->dmamap_seg_index = 0; } static void bcm_sdhci_dma_intr(int ch, void *arg) { struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg; struct sdhci_slot *slot = &sc->sc_slot; uint32_t reg; mtx_lock(&slot->mtx); if (slot->curcmd == NULL) goto out; /* * If there are more segments for the current dma, start the next one. * Otherwise unload the dma map and decide what to do next based on the * status of the sdhci controller and whether there's more data left. */ if (sc->dmamap_seg_index < sc->dmamap_seg_count) { bcm_sdhci_start_dma_seg(sc); goto out; } bcm_sdhci_dma_unload(sc); /* * If we had no further segments pending, we need to determine how to * proceed next. If the 'data/space pending' bit is already set and we * can continue via DMA, do so. Otherwise, re-enable interrupts and * return. */ reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS) & DATA_XFER_MASK; if ((reg & DATA_PENDING_MASK) != 0 && BCM_SDHCI_SEGSZ_LEFT(slot) >= BCM_SDHCI_BUFFER_SIZE) { /* ACK any pending interrupts */ bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS, DATA_PENDING_MASK); bcm_sdhci_start_dma(slot); if (slot->curcmd->error != 0) { /* We won't recover from this error for this command. */ bcm_sdhci_dma_unload(sc); bcm_sdhci_dma_exit(sc); sdhci_finish_data(slot); } } else if ((reg & SDHCI_INT_DATA_END) != 0) { bcm_sdhci_dma_exit(sc); bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS, reg); slot->flags &= ~PLATFORM_DATA_STARTED; sdhci_finish_data(slot); } else { bcm_sdhci_dma_exit(sc); } out: mtx_unlock(&slot->mtx); } static void bcm_sdhci_start_dma(struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); uint8_t *buf; size_t left; mtx_assert(&slot->mtx, MA_OWNED); left = BCM_SDHCI_SEGSZ_LEFT(slot); buf = (uint8_t *)slot->curcmd->data->data + slot->offset; KASSERT(left != 0, ("%s: DMA handling incorrectly indicated", __func__)); /* * No need to check segment count here; if we've not yet unloaded * previous segments, we'll catch that in bcm_sdhci_dmacb. */ if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buf, left, bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 || sc->dmamap_status != 0) { slot->curcmd->error = MMC_ERR_NO_MEMORY; return; } /* DMA start */ bcm_sdhci_start_dma_seg(sc); } static int bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot) { #ifdef INVARIANTS struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); #endif /* * We don't want to perform DMA in this context -- interrupts are * disabled, and a transaction may already be in progress. */ if (dumping) return (0); /* * This indicates that we somehow let a data interrupt slip by into the * SDHCI framework, when it should not have. This really needs to be * caught and fixed ASAP, as it really shouldn't happen. */ KASSERT(sc->dmamap_seg_count == 0, ("data pending interrupt pushed through SDHCI framework")); /* * Do not use DMA for transfers less than our block size. Checking * alignment serves little benefit, as we round transfer sizes down to * a multiple of the block size and push the transfer back to * SDHCI-driven PIO once we're below the block size. */ if (BCM_SDHCI_SEGSZ_LEFT(slot) < BCM_DMA_BLOCK_SIZE) return (0); return (1); } static void bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot, uint32_t *intmask) { /* DMA transfer FIFO 1KB */ bcm_sdhci_start_dma(slot); } static void bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot) { struct bcm_sdhci_softc *sc = device_get_softc(slot->bus); /* * Clean up. Interrupts are clearly enabled, because we received an * SDHCI_INT_DATA_END to get this far -- just make sure we don't leave * anything laying around. */ if (sc->dmamap_seg_count != 0) { /* * Our segment math should have worked out such that we would * never finish the transfer without having used up all of the * segments. If we haven't, that means we must have erroneously * regressed to SDHCI-driven PIO to finish the operation and * this is certainly caused by developer-error. */ bcm_sdhci_dma_unload(sc); } sdhci_finish_data(slot); } static device_method_t bcm_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcm_sdhci_probe), DEVMETHOD(device_attach, bcm_sdhci_attach), DEVMETHOD(device_detach, bcm_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), DEVMETHOD(bus_add_child, bus_generic_add_child), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, bcm_sdhci_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, bcm_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* Platform transfer methods */ DEVMETHOD(sdhci_platform_will_handle, bcm_sdhci_will_handle_transfer), DEVMETHOD(sdhci_platform_start_transfer, bcm_sdhci_start_transfer), DEVMETHOD(sdhci_platform_finish_transfer, bcm_sdhci_finish_transfer), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, bcm_sdhci_read_1), DEVMETHOD(sdhci_read_2, bcm_sdhci_read_2), DEVMETHOD(sdhci_read_4, bcm_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, bcm_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, bcm_sdhci_write_1), DEVMETHOD(sdhci_write_2, bcm_sdhci_write_2), DEVMETHOD(sdhci_write_4, bcm_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, bcm_sdhci_write_multi_4), DEVMETHOD_END }; static driver_t bcm_sdhci_driver = { "sdhci_bcm", bcm_sdhci_methods, sizeof(struct bcm_sdhci_softc), }; DRIVER_MODULE(sdhci_bcm, simplebus, bcm_sdhci_driver, NULL, NULL); #ifdef NOTYET MODULE_DEPEND(sdhci_bcm, bcm2835_clkman, 1, 1, 1); #endif SDHCI_DEPEND(sdhci_bcm); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_bcm); #endif diff --git a/sys/arm/broadcom/bcm2835/bcm2835_sdhost.c b/sys/arm/broadcom/bcm2835/bcm2835_sdhost.c index 232948fbb17f..29f062d6ab2b 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_sdhost.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_sdhost.c @@ -1,1285 +1,1285 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Klaus P. Ohrhallinger * All rights reserved. * * Based on bcm2835_sdhci.c: * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include /* * pin 48-53 - card slot * pin 34-39 - radio module * * alt-0 - rubbish SDHCI (0x7e202000) aka sdhost * alt-3 - advanced SDHCI (0x7e300000) aka sdhci/mmc/sdio * * driving card slot with mmc: * * sdhost_pins { * brcm,pins = <0x30 0x31 0x32 0x33 0x34 0x35>; * brcm,function = <0x7>; * brcm,pull = <0x0 0x2 0x2 0x2 0x2 0x2>; * phandle = <0x17>; * }; * sdio_pins { * brcm,pins = <0x22 0x23 0x24 0x25 0x26 0x27>; * brcm,function = <0x4>; * brcm,pull = <0x0 0x2 0x2 0x2 0x2 0x2>; * phandle = <0x18>; * }; * * driving card slot with sdhost: * * sdhost_pins { * brcm,pins = <0x30 0x31 0x32 0x33 0x34 0x35>; * brcm,function = <0x4>; * brcm,pull = <0x0 0x2 0x2 0x2 0x2 0x2>; * phandle = <0x17>; * }; * sdio_pins { * brcm,pins = <0x22 0x23 0x24 0x25 0x26 0x27>; * brcm,function = <0x7>; * brcm,pull = <0x0 0x2 0x2 0x2 0x2 0x2>; * phandle = <0x18>; * }; * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" #include "opt_mmccam.h" #include "bcm2835_dma.h" #include #include "bcm2835_vcbus.h" /* #define SDHOST_DEBUG */ /* Registers */ #define HC_COMMAND 0x00 /* Command and flags */ #define HC_ARGUMENT 0x04 #define HC_TIMEOUTCOUNTER 0x08 #define HC_CLOCKDIVISOR 0x0c #define HC_RESPONSE_0 0x10 #define HC_RESPONSE_1 0x14 #define HC_RESPONSE_2 0x18 #define HC_RESPONSE_3 0x1c #define HC_HOSTSTATUS 0x20 #define HC_POWER 0x30 #define HC_DEBUG 0x34 #define HC_HOSTCONFIG 0x38 #define HC_BLOCKSIZE 0x3c #define HC_DATAPORT 0x40 #define HC_BLOCKCOUNT 0x50 /* Flags for HC_COMMAND register */ #define HC_CMD_ENABLE 0x8000 #define HC_CMD_FAILED 0x4000 #define HC_CMD_BUSY 0x0800 #define HC_CMD_RESPONSE_NONE 0x0400 #define HC_CMD_RESPONSE_LONG 0x0200 #define HC_CMD_WRITE 0x0080 #define HC_CMD_READ 0x0040 #define HC_CMD_COMMAND_MASK 0x003f #define HC_CLOCKDIVISOR_MAXVAL 0x07ff /* Flags for HC_HOSTSTATUS register */ #define HC_HSTST_HAVEDATA 0x0001 #define HC_HSTST_ERROR_FIFO 0x0008 #define HC_HSTST_ERROR_CRC7 0x0010 #define HC_HSTST_ERROR_CRC16 0x0020 #define HC_HSTST_TIMEOUT_CMD 0x0040 #define HC_HSTST_TIMEOUT_DATA 0x0080 #define HC_HSTST_INT_BLOCK 0x0200 #define HC_HSTST_INT_BUSY 0x0400 #define HC_HSTST_RESET 0xffff #define HC_HSTST_MASK_ERROR_DATA (HC_HSTST_ERROR_FIFO | \ HC_HSTST_ERROR_CRC7 | HC_HSTST_ERROR_CRC16 | HC_HSTST_TIMEOUT_DATA) #define HC_HSTST_MASK_ERROR_ALL (HC_HSTST_MASK_ERROR_DATA | \ HC_HSTST_TIMEOUT_CMD) /* Flags for HC_HOSTCONFIG register */ #define HC_HSTCF_INTBUS_WIDE 0x0002 #define HC_HSTCF_EXTBUS_4BIT 0x0004 #define HC_HSTCF_SLOW_CARD 0x0008 #define HC_HSTCF_INT_DATA 0x0010 #define HC_HSTCF_INT_BLOCK 0x0100 #define HC_HSTCF_INT_BUSY 0x0400 /* Flags for HC_DEBUG register */ #define HC_DBG_FIFO_THRESH_WRITE_SHIFT 9 #define HC_DBG_FIFO_THRESH_READ_SHIFT 14 #define HC_DBG_FIFO_THRESH_MASK 0x001f /* Settings */ #define HC_FIFO_SIZE 16 #define HC_FIFO_THRESH_READ 4 #define HC_FIFO_THRESH_WRITE 4 #define HC_TIMEOUT_DEFAULT 0x00f00000 #define BCM2835_DEFAULT_SDHCI_FREQ 50 static int bcm2835_sdhost_debug = 0; #ifdef SDHOST_DEBUG TUNABLE_INT("hw.bcm2835.sdhost.debug", &bcm2835_sdhost_debug); SYSCTL_INT(_hw_sdhci, OID_AUTO, bcm2835_sdhost_debug, CTLFLAG_RWTUN, &bcm2835_sdhost_debug, 0, "bcm2835-sdhost Debug level"); #define dprintf(fmt, args...) \ do { \ if (bcm2835_sdhost_debug > 0) \ printf(fmt,##args); \ } while (0) #else #define dprintf(fmt, args...) #endif /* ! SDHOST_DEBUG */ static struct ofw_compat_data compat_data[] = { {"brcm,bcm2835-sdhost", 1}, {NULL, 0} }; struct bcm_sdhost_softc { device_t sc_dev; struct resource * sc_mem_res; struct resource * sc_irq_res; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; void * sc_intrhand; struct mmc_request * sc_req; struct sdhci_slot sc_slot; struct mtx mtx; char cmdbusy; char mmc_app_cmd; u_int32_t sdhci_int_status; u_int32_t sdhci_signal_enable; u_int32_t sdhci_present_state; u_int32_t sdhci_blocksize; u_int32_t sdhci_blockcount; u_int32_t sdcard_rca; }; static int bcm_sdhost_probe(device_t); static int bcm_sdhost_attach(device_t); static int bcm_sdhost_detach(device_t); static void bcm_sdhost_intr(void *); static int bcm_sdhost_get_ro(device_t, device_t); static inline uint32_t RD4(struct bcm_sdhost_softc *sc, bus_size_t off) { uint32_t val; val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); return (val); } static inline void WR4(struct bcm_sdhost_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val); } #ifdef notyet static inline uint16_t RD2(struct bcm_sdhost_softc *sc, bus_size_t off) { uint32_t val; val = RD4(sc, off & ~3); return ((val >> (off & 3)*8) & 0xffff); } #endif static inline uint8_t RD1(struct bcm_sdhost_softc *sc, bus_size_t off) { uint32_t val; val = RD4(sc, off & ~3); return ((val >> (off & 3)*8) & 0xff); } static inline void WR2(struct bcm_sdhost_softc *sc, bus_size_t off, uint16_t val) { uint32_t val32; val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3)*8); val32 |= (val << (off & 3)*8); WR4(sc, off & ~3, val32); } static inline void WR1(struct bcm_sdhost_softc *sc, bus_size_t off, uint8_t val) { uint32_t val32; val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3)*8); val32 |= (val << (off & 3)*8); WR4(sc, off & ~3, val32); } static void bcm_sdhost_print_regs(struct bcm_sdhost_softc *sc, struct sdhci_slot *slot, int line, int error) { if (bcm2835_sdhost_debug > 0 || error > 0) { printf("%s: sc=%p slot=%p\n", __func__, sc, slot); printf("HC_COMMAND: 0x%08x\n", RD4(sc, HC_COMMAND)); printf("HC_ARGUMENT: 0x%08x\n", RD4(sc, HC_ARGUMENT)); printf("HC_TIMEOUTCOUNTER: 0x%08x\n", RD4(sc, HC_TIMEOUTCOUNTER)); printf("HC_CLOCKDIVISOR: 0x%08x\n", RD4(sc, HC_CLOCKDIVISOR)); printf("HC_RESPONSE_0: 0x%08x\n", RD4(sc, HC_RESPONSE_0)); printf("HC_RESPONSE_1: 0x%08x\n", RD4(sc, HC_RESPONSE_1)); printf("HC_RESPONSE_2: 0x%08x\n", RD4(sc, HC_RESPONSE_2)); printf("HC_RESPONSE_3: 0x%08x\n", RD4(sc, HC_RESPONSE_3)); printf("HC_HOSTSTATUS: 0x%08x\n", RD4(sc, HC_HOSTSTATUS)); printf("HC_POWER: 0x%08x\n", RD4(sc, HC_POWER)); printf("HC_DEBUG: 0x%08x\n", RD4(sc, HC_DEBUG)); printf("HC_HOSTCONFIG: 0x%08x\n", RD4(sc, HC_HOSTCONFIG)); printf("HC_BLOCKSIZE: 0x%08x\n", RD4(sc, HC_BLOCKSIZE)); printf("HC_BLOCKCOUNT: 0x%08x\n", RD4(sc, HC_BLOCKCOUNT)); } else { /* printf("%04d | HC_COMMAND: 0x%08x HC_ARGUMENT: 0x%08x " "HC_HOSTSTATUS: 0x%08x HC_HOSTCONFIG: 0x%08x\n", line, RD4(sc, HC_COMMAND), RD4(sc, HC_ARGUMENT), RD4(sc, HC_HOSTSTATUS), RD4(sc, HC_HOSTCONFIG)); */ } } static void bcm_sdhost_reset(device_t dev, struct sdhci_slot *slot) { struct bcm_sdhost_softc *sc = device_get_softc(dev); u_int32_t dbg; WR4(sc, HC_POWER, 0); WR4(sc, HC_COMMAND, 0); WR4(sc, HC_ARGUMENT, 0); WR4(sc, HC_TIMEOUTCOUNTER, HC_TIMEOUT_DEFAULT); WR4(sc, HC_CLOCKDIVISOR, 0); WR4(sc, HC_HOSTSTATUS, HC_HSTST_RESET); WR4(sc, HC_HOSTCONFIG, 0); WR4(sc, HC_BLOCKSIZE, 0); WR4(sc, HC_BLOCKCOUNT, 0); dbg = RD4(sc, HC_DEBUG); dbg &= ~( (HC_DBG_FIFO_THRESH_MASK << HC_DBG_FIFO_THRESH_READ_SHIFT) | (HC_DBG_FIFO_THRESH_MASK << HC_DBG_FIFO_THRESH_WRITE_SHIFT) ); dbg |= (HC_FIFO_THRESH_READ << HC_DBG_FIFO_THRESH_READ_SHIFT) | (HC_FIFO_THRESH_WRITE << HC_DBG_FIFO_THRESH_WRITE_SHIFT); WR4(sc, HC_DEBUG, dbg); DELAY(250000); WR4(sc, HC_POWER, 1); DELAY(250000); sc->sdhci_present_state = SDHCI_CARD_PRESENT | SDHCI_CARD_STABLE | SDHCI_WRITE_PROTECT; WR4(sc, HC_CLOCKDIVISOR, HC_CLOCKDIVISOR_MAXVAL); WR4(sc, HC_HOSTCONFIG, HC_HSTCF_INT_BUSY); } static int bcm_sdhost_probe(device_t dev) { dprintf("%s:\n", __func__); if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Broadcom 2708 SDHOST controller"); return (BUS_PROBE_DEFAULT); } static int bcm_sdhost_attach(device_t dev) { struct bcm_sdhost_softc *sc = device_get_softc(dev); int rid, err; u_int default_freq; dprintf("%s: dev=%p sc=%p unit=%d\n", __func__, dev, sc, device_get_unit(dev)); mtx_init(&sc->mtx, "BCM SDHOST mtx", "bcm_sdhost", MTX_DEF | MTX_RECURSE); sc->sc_dev = dev; sc->sc_req = NULL; sc->cmdbusy = 0; sc->mmc_app_cmd = 0; sc->sdhci_int_status = 0; sc->sdhci_signal_enable = 0; sc->sdhci_present_state = 0; sc->sdhci_blocksize = 0; sc->sdhci_blockcount = 0; sc->sdcard_rca = 0; default_freq = 50; err = 0; if (bootverbose) device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); bcm_sdhost_reset(dev, &sc->sc_slot); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 0); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, bcm_sdhost_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } sc->sc_slot.caps = 0; sc->sc_slot.caps |= SDHCI_CAN_VDD_330; sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD; sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT); sc->sc_slot.quirks = 0; sc->sc_slot.quirks |= SDHCI_QUIRK_MISSING_CAPS; sc->sc_slot.quirks |= SDHCI_QUIRK_DONT_SHIFT_RESPONSE; sc->sc_slot.opt = 0; /* XXX ? sc->slot->timeout_clk = ...; */ sdhci_init_slot(dev, &sc->sc_slot, 0); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->sc_slot); return (0); fail: if (sc->sc_intrhand) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (err); } static int bcm_sdhost_detach(device_t dev) { dprintf("%s:\n", __func__); return (EBUSY); } /* * rv 0 --> command finished * rv 1 --> command timed out */ static inline int bcm_sdhost_waitcommand(struct bcm_sdhost_softc *sc) { int timeout = 1000; mtx_assert(&sc->mtx, MA_OWNED); while ((RD4(sc, HC_COMMAND) & HC_CMD_ENABLE) && --timeout > 0) { DELAY(100); } return ((timeout > 0) ? 0 : 1); } static int bcm_sdhost_waitcommand_status(struct bcm_sdhost_softc *sc) { u_int32_t cdst; int i; /* wait for card to change status from * ''prg'' to ''trn'' * card status: sd specs p. 103 */ i = 0; do { DELAY(1000); WR4(sc, HC_ARGUMENT, sc->sdcard_rca << 16); WR4(sc, HC_COMMAND, MMC_SEND_STATUS | HC_CMD_ENABLE); bcm_sdhost_waitcommand(sc); cdst = RD4(sc, HC_RESPONSE_0); dprintf("%s: card status %08x (cs %d)\n", __func__, cdst, (cdst & 0x0e00) >> 9); if (i++ > 100) { printf("%s: giving up, " "card status %08x (cs %d)\n", __func__, cdst, (cdst & 0x0e00) >> 9); return (1); break; } } while (((cdst & 0x0e00) >> 9) != 4); return (0); } static void bcm_sdhost_intr(void *arg) { struct bcm_sdhost_softc *sc = arg; struct sdhci_slot *slot = &sc->sc_slot; uint32_t hstst; uint32_t cmd; mtx_lock(&sc->mtx); hstst = RD4(sc, HC_HOSTSTATUS); cmd = RD4(sc, HC_COMMAND); if (hstst & HC_HSTST_HAVEDATA) { if (cmd & HC_CMD_READ) { sc->sdhci_present_state |= SDHCI_DATA_AVAILABLE; sc->sdhci_int_status |= SDHCI_INT_DATA_AVAIL; } else if (cmd & HC_CMD_WRITE) { sc->sdhci_present_state |= SDHCI_SPACE_AVAILABLE; sc->sdhci_int_status |= SDHCI_INT_SPACE_AVAIL; } else { panic("%s: hstst & HC_HSTST_HAVEDATA but no " "HC_CMD_READ or HC_CMD_WRITE: cmd=%0x8 " "hstst=%08x\n", __func__, cmd, hstst); } } else { sc->sdhci_present_state &= ~(SDHCI_DATA_AVAILABLE|SDHCI_SPACE_AVAILABLE); sc->sdhci_int_status &= ~(SDHCI_INT_DATA_AVAIL|SDHCI_INT_SPACE_AVAIL); } if (hstst & HC_HSTST_MASK_ERROR_ALL) { printf("%s: ERROR: HC_HOSTSTATUS: %08x\n", __func__, hstst); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); sc->sdhci_int_status |= SDHCI_INT_ERROR; } else { sc->sdhci_int_status &= ~SDHCI_INT_ERROR; } dprintf("%s: hstst=%08x offset=%08lx sdhci_present_state=%08x " "sdhci_int_status=%08x\n", __func__, hstst, slot->offset, sc->sdhci_present_state, sc->sdhci_int_status); sdhci_generic_intr(&sc->sc_slot); sc->sdhci_int_status &= ~(SDHCI_INT_ERROR|SDHCI_INT_DATA_AVAIL|SDHCI_INT_DATA_END); sc->sdhci_present_state &= ~SDHCI_DATA_AVAILABLE; if ((hstst & HC_HSTST_HAVEDATA) && (sc->sdhci_blocksize * sc->sdhci_blockcount == slot->offset)) { dprintf("%s: offset=%08lx sdhci_blocksize=%08x " "sdhci_blockcount=%08x\n", __func__, slot->offset, sc->sdhci_blocksize, sc->sdhci_blockcount); sc->sdhci_int_status &= ~(SDHCI_INT_DATA_AVAIL|SDHCI_INT_SPACE_AVAIL); sc->sdhci_int_status |= SDHCI_INT_DATA_END; sdhci_generic_intr(&sc->sc_slot); sc->sdhci_int_status &= ~SDHCI_INT_DATA_END; if ((cmd & HC_CMD_COMMAND_MASK) == MMC_READ_MULTIPLE_BLOCK || (cmd & HC_CMD_COMMAND_MASK) == MMC_WRITE_MULTIPLE_BLOCK) { WR4(sc, HC_ARGUMENT, 0x00000000); WR4(sc, HC_COMMAND, MMC_STOP_TRANSMISSION | HC_CMD_ENABLE); if (bcm_sdhost_waitcommand(sc)) { printf("%s: timeout #1\n", __func__); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); } } if (cmd & HC_CMD_WRITE) { if (bcm_sdhost_waitcommand_status(sc) != 0) sc->sdhci_int_status |= SDHCI_INT_ERROR; } slot->data_done = 1; sc->sdhci_int_status |= SDHCI_INT_RESPONSE; sdhci_generic_intr(&sc->sc_slot); sc->sdhci_int_status &= ~(SDHCI_INT_RESPONSE|SDHCI_INT_ERROR); } /* this resets the interrupt */ WR4(sc, HC_HOSTSTATUS, (HC_HSTST_INT_BUSY|HC_HSTST_INT_BLOCK|HC_HSTST_HAVEDATA)); mtx_unlock(&sc->mtx); } static int bcm_sdhost_get_ro(device_t bus, device_t child) { dprintf("%s:\n", __func__); return (0); } static bool bcm_sdhost_get_card_present(device_t dev, struct sdhci_slot *slot) { dprintf("%s:\n", __func__); return (1); } static void bcm_sdhost_command(device_t dev, struct sdhci_slot *slot, uint16_t val) { struct bcm_sdhost_softc *sc = device_get_softc(dev); struct mmc_data *data = slot->curcmd->data; uint16_t val2; uint8_t opcode; uint8_t flags; mtx_assert(&sc->mtx, MA_OWNED); if (RD4(sc, HC_COMMAND) & HC_CMD_ENABLE) { panic("%s: HC_CMD_ENABLE on entry\n", __func__); } if (sc->cmdbusy == 1) panic("%s: cmdbusy\n", __func__); sc->cmdbusy = 1; val2 = ((val >> 8) & HC_CMD_COMMAND_MASK) | HC_CMD_ENABLE; opcode = val >> 8; flags = val & 0xff; if (opcode == MMC_APP_CMD) sc->mmc_app_cmd = 1; if ((flags & SDHCI_CMD_RESP_MASK) == SDHCI_CMD_RESP_LONG) val2 |= HC_CMD_RESPONSE_LONG; else if ((flags & SDHCI_CMD_RESP_MASK) == SDHCI_CMD_RESP_SHORT_BUSY) /* XXX XXX when enabled, cmd 7 (select card) blocks forever */ ;/*val2 |= HC_CMD_BUSY; */ else if ((flags & SDHCI_CMD_RESP_MASK) == SDHCI_CMD_RESP_SHORT) ; else val2 |= HC_CMD_RESPONSE_NONE; if (val2 & HC_CMD_BUSY) sc->sdhci_present_state |= SDHCI_CMD_INHIBIT | SDHCI_DAT_INHIBIT; if (data != NULL && data->flags & MMC_DATA_READ) val2 |= HC_CMD_READ; else if (data != NULL && data->flags & MMC_DATA_WRITE) val2 |= HC_CMD_WRITE; dprintf("%s: SDHCI_COMMAND_FLAGS --> HC_COMMAND %04x --> %04x\n", __func__, val, val2); if (opcode == MMC_READ_MULTIPLE_BLOCK || opcode == MMC_WRITE_MULTIPLE_BLOCK) { u_int32_t save_sdarg; dprintf("%s: issuing MMC_SET_BLOCK_COUNT: CMD %08x ARG %08x\n", __func__, MMC_SET_BLOCK_COUNT | HC_CMD_ENABLE, sc->sdhci_blockcount); save_sdarg = RD4(sc, HC_ARGUMENT); WR4(sc, HC_ARGUMENT, sc->sdhci_blockcount); WR4(sc, HC_COMMAND, MMC_SET_BLOCK_COUNT | HC_CMD_ENABLE); /* Seems to always return timeout */ if (bcm_sdhost_waitcommand(sc)) { printf("%s: timeout #2\n", __func__); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); } else { bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 0); } WR4(sc, HC_ARGUMENT, save_sdarg); } else if (opcode == MMC_SELECT_CARD) { sc->sdcard_rca = (RD4(sc, HC_ARGUMENT) >> 16); } /* actually issuing the command */ WR4(sc, HC_COMMAND, val2); if (val2 & HC_CMD_READ || val2 & HC_CMD_WRITE) { u_int8_t hstcfg; hstcfg = RD4(sc, HC_HOSTCONFIG); hstcfg |= (HC_HSTCF_INT_BUSY | HC_HSTCF_INT_DATA); WR4(sc, HC_HOSTCONFIG, hstcfg); slot->data_done = 0; if (bcm_sdhost_waitcommand(sc)) { printf("%s: timeout #3\n", __func__); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); } } else if (opcode == MMC_ERASE) { if (bcm_sdhost_waitcommand_status(sc) != 0) { printf("%s: timeout #4\n", __func__); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); } slot->data_done = 1; sc->sdhci_present_state &= ~(SDHCI_CMD_INHIBIT | SDHCI_DAT_INHIBIT); } else { if (bcm_sdhost_waitcommand(sc)) { printf("%s: timeout #5\n", __func__); bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); } slot->data_done = 1; sc->sdhci_present_state &= ~(SDHCI_CMD_INHIBIT | SDHCI_DAT_INHIBIT); } bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 0); if (RD4(sc, HC_HOSTSTATUS) & HC_HSTST_TIMEOUT_CMD) slot->curcmd->error = MMC_ERR_TIMEOUT; else if (RD4(sc, HC_COMMAND) & HC_CMD_FAILED) slot->curcmd->error = MMC_ERR_FAILED; dprintf("%s: curcmd->flags=%d data_done=%d\n", __func__, slot->curcmd->flags, slot->data_done); if (val2 & HC_CMD_RESPONSE_NONE) slot->curcmd->error = 0; if (sc->mmc_app_cmd == 1 && opcode != MMC_APP_CMD) sc->mmc_app_cmd = 0; if (RD4(sc, HC_COMMAND) & HC_CMD_ENABLE) { bcm_sdhost_print_regs(sc, &sc->sc_slot, __LINE__, 1); panic("%s: still HC_CMD_ENABLE on exit\n", __func__); } sc->cmdbusy = 0; if (!(val2 & HC_CMD_READ || val2 & HC_CMD_WRITE)) sc->sdhci_int_status |= SDHCI_INT_RESPONSE; /* HACK, so sdhci_finish_command() does not * have to be exported */ mtx_unlock(&slot->mtx); sdhci_generic_intr(slot); mtx_lock(&slot->mtx); sc->sdhci_int_status &= ~SDHCI_INT_RESPONSE; } static uint8_t bcm_sdhost_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint32_t val1, val2; mtx_lock(&sc->mtx); switch (off) { case SDHCI_HOST_CONTROL: val1 = RD4(sc, HC_HOSTCONFIG); val2 = 0; if (val1 & HC_HSTCF_EXTBUS_4BIT) val2 |= SDHCI_CTRL_4BITBUS; dprintf("%s: SDHCI_HOST_CONTROL --> HC_HOSTCONFIG val2 %02x\n", __func__, val2); break; case SDHCI_POWER_CONTROL: val1 = RD1(sc, HC_POWER); val2 = (val1 == 1) ? 0x0f : 0; dprintf("%s: SDHCI_POWER_CONTROL --> HC_POWER val2 %02x\n", __func__, val2); break; case SDHCI_BLOCK_GAP_CONTROL: dprintf("%s: SDHCI_BLOCK_GAP_CONTROL\n", __func__); val2 = 0; break; case SDHCI_WAKE_UP_CONTROL: dprintf("%s: SDHCI_WAKE_UP_CONTROL\n", __func__); val2 = 0; break; case SDHCI_TIMEOUT_CONTROL: dprintf("%s: SDHCI_TIMEOUT_CONTROL\n", __func__); val2 = 0; break; case SDHCI_SOFTWARE_RESET: dprintf("%s: SDHCI_SOFTWARE_RESET\n", __func__); val2 = 0; break; case SDHCI_ADMA_ERR: dprintf("%s: SDHCI_ADMA_ERR\n", __func__); val2 = 0; break; default: dprintf("%s: UNKNOWN off=%08lx\n", __func__, off); val2 = 0; break; } mtx_unlock(&sc->mtx); return (val2); } static uint16_t bcm_sdhost_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint32_t val2, val; /* = RD4(sc, off & ~3); */ mtx_lock(&sc->mtx); switch (off) { case SDHCI_BLOCK_SIZE: val2 = sc->sdhci_blocksize; dprintf("%s: SDHCI_BLOCK_SIZE --> HC_BLOCKSIZE %08x\n", __func__, val2); break; case SDHCI_BLOCK_COUNT: val2 = sc->sdhci_blockcount; dprintf("%s: SDHCI_BLOCK_COUNT --> HC_BLOCKCOUNT %08x\n", __func__, val2); break; case SDHCI_TRANSFER_MODE: dprintf("%s: SDHCI_TRANSFER_MODE\n", __func__); val2 = 0; break; case SDHCI_CLOCK_CONTROL: val = RD4(sc, HC_CLOCKDIVISOR); val2 = (val << SDHCI_DIVIDER_SHIFT) | SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN | SDHCI_CLOCK_INT_STABLE; dprintf("%s: SDHCI_CLOCK_CONTROL %04x --> %04x\n", __func__, val, val2); break; case SDHCI_ACMD12_ERR: dprintf("%s: SDHCI_ACMD12_ERR\n", __func__); val2 = 0; break; case SDHCI_HOST_CONTROL2: dprintf("%s: SDHCI_HOST_CONTROL2\n", __func__); val2 = 0; break; case SDHCI_SLOT_INT_STATUS: dprintf("%s: SDHCI_SLOT_INT_STATUS\n", __func__); val2 = 0; break; case SDHCI_HOST_VERSION: dprintf("%s: SDHCI_HOST_VERSION\n", __func__); val2 = 0; break; default: dprintf("%s: UNKNOWN off=%08lx\n", __func__, off); val2 = 0; break; } mtx_unlock(&sc->mtx); return (val2); } static uint32_t bcm_sdhost_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint32_t val2; mtx_lock(&sc->mtx); switch (off) { case SDHCI_DMA_ADDRESS: dprintf("%s: SDHCI_DMA_ADDRESS\n", __func__); val2 = 0; break; case SDHCI_ARGUMENT: dprintf("%s: SDHCI_ARGUMENT\n", __func__); val2 = (RD4(sc, HC_COMMAND) << 16) | (RD4(sc, HC_ARGUMENT) & 0x0000ffff); break; case SDHCI_RESPONSE + 0: val2 = RD4(sc, HC_RESPONSE_0); dprintf("%s: SDHCI_RESPONSE+0 %08x\n", __func__, val2); break; case SDHCI_RESPONSE + 4: val2 = RD4(sc, HC_RESPONSE_1); dprintf("%s: SDHCI_RESPONSE+4 %08x\n", __func__, val2); break; case SDHCI_RESPONSE + 8: val2 = RD4(sc, HC_RESPONSE_2); dprintf("%s: SDHCI_RESPONSE+8 %08x\n", __func__, val2); break; case SDHCI_RESPONSE + 12: val2 = RD4(sc, HC_RESPONSE_3); dprintf("%s: SDHCI_RESPONSE+12 %08x\n", __func__, val2); break; case SDHCI_BUFFER: dprintf("%s: SDHCI_BUFFER\n", __func__); val2 = 0; break; case SDHCI_PRESENT_STATE: dprintf("%s: SDHCI_PRESENT_STATE %08x\n", __func__, sc->sdhci_present_state); val2 = sc->sdhci_present_state; break; case SDHCI_INT_STATUS: dprintf("%s: SDHCI_INT_STATUS %08x\n", __func__, sc->sdhci_int_status); val2 = sc->sdhci_int_status; break; case SDHCI_INT_ENABLE: dprintf("%s: SDHCI_INT_ENABLE\n", __func__); val2 = 0; break; case SDHCI_SIGNAL_ENABLE: dprintf("%s: SDHCI_SIGNAL_ENABLE %08x\n", __func__, sc->sdhci_signal_enable); val2 = sc->sdhci_signal_enable; break; case SDHCI_CAPABILITIES: val2 = 0; break; case SDHCI_CAPABILITIES2: dprintf("%s: SDHCI_CAPABILITIES2\n", __func__); val2 = 0; break; case SDHCI_MAX_CURRENT: dprintf("%s: SDHCI_MAX_CURRENT\n", __func__); val2 = 0; break; case SDHCI_ADMA_ADDRESS_LO: dprintf("%s: SDHCI_ADMA_ADDRESS_LO\n", __func__); val2 = 0; break; default: dprintf("%s: UNKNOWN off=%08lx\n", __func__, off); val2 = 0; break; } mtx_unlock(&sc->mtx); return (val2); } static void bcm_sdhost_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhost_softc *sc = device_get_softc(dev); bus_size_t i; bus_size_t avail; uint32_t edm; mtx_lock(&sc->mtx); dprintf("%s: off=%08lx count=%08lx\n", __func__, off, count); for (i = 0; i < count;) { edm = RD4(sc, HC_DEBUG); avail = ((edm >> 4) & 0x1f); if (i + avail > count) avail = count - i; if (avail > 0) bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, HC_DATAPORT, data + i, avail); i += avail; DELAY(1); } mtx_unlock(&sc->mtx); } static void bcm_sdhost_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint32_t val2; mtx_lock(&sc->mtx); switch (off) { case SDHCI_HOST_CONTROL: val2 = RD4(sc, HC_HOSTCONFIG); val2 |= HC_HSTCF_INT_BUSY; val2 |= HC_HSTCF_INTBUS_WIDE | HC_HSTCF_SLOW_CARD; if (val & SDHCI_CTRL_4BITBUS) val2 |= HC_HSTCF_EXTBUS_4BIT; dprintf("%s: SDHCI_HOST_CONTROL --> HC_HOSTC %04x --> %04x\n", __func__, val, val2); WR4(sc, HC_HOSTCONFIG, val2); break; case SDHCI_POWER_CONTROL: val2 = (val != 0) ? 1 : 0; dprintf("%s: SDHCI_POWER_CONTROL --> HC_POWER %02x --> %02x\n", __func__, val, val2); WR1(sc, HC_POWER, val2); break; case SDHCI_BLOCK_GAP_CONTROL: dprintf("%s: SDHCI_BLOCK_GAP_CONTROL val=%02x\n", __func__, val); break; case SDHCI_TIMEOUT_CONTROL: dprintf("%s: SDHCI_TIMEOUT_CONTROL val=%02x\n", __func__, val); break; case SDHCI_SOFTWARE_RESET: dprintf("%s: SDHCI_SOFTWARE_RESET val=%02x\n", __func__, val); break; case SDHCI_ADMA_ERR: dprintf("%s: SDHCI_ADMA_ERR val=%02x\n", __func__, val); break; default: dprintf("%s: UNKNOWN off=%08lx val=%08x\n", __func__, off, val); break; } mtx_unlock(&sc->mtx); } static void bcm_sdhost_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint16_t val2; mtx_lock(&sc->mtx); switch (off) { case SDHCI_BLOCK_SIZE: dprintf("%s: SDHCI_BLOCK_SIZE val=%04x\n" , __func__, val); sc->sdhci_blocksize = val; WR2(sc, HC_BLOCKSIZE, val); break; case SDHCI_BLOCK_COUNT: dprintf("%s: SDHCI_BLOCK_COUNT val=%04x\n" , __func__, val); sc->sdhci_blockcount = val; WR2(sc, HC_BLOCKCOUNT, val); break; case SDHCI_TRANSFER_MODE: dprintf("%s: SDHCI_TRANSFER_MODE val=%04x\n" , __func__, val); break; case SDHCI_COMMAND_FLAGS: bcm_sdhost_command(dev, slot, val); break; case SDHCI_CLOCK_CONTROL: val2 = (val & ~SDHCI_DIVIDER_MASK) >> SDHCI_DIVIDER_SHIFT; /* get crc16 errors with cdiv=0 */ if (val2 == 0) val2 = 1; dprintf("%s: SDHCI_CLOCK_CONTROL %04x --> SCDIV %04x\n", __func__, val, val2); WR4(sc, HC_CLOCKDIVISOR, val2); break; case SDHCI_ACMD12_ERR: dprintf("%s: SDHCI_ACMD12_ERR val=%04x\n" , __func__, val); break; case SDHCI_HOST_CONTROL2: dprintf("%s: SDHCI_HOST_CONTROL2 val=%04x\n" , __func__, val); break; case SDHCI_SLOT_INT_STATUS: dprintf("%s: SDHCI_SLOT_INT_STATUS val=%04x\n" , __func__, val); break; default: dprintf("%s: UNKNOWN off=%08lx val=%04x\n", __func__, off, val); break; } mtx_unlock(&sc->mtx); } static void bcm_sdhost_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct bcm_sdhost_softc *sc = device_get_softc(dev); uint32_t val2; uint32_t hstcfg; mtx_lock(&sc->mtx); switch (off) { case SDHCI_ARGUMENT: val2 = val; dprintf("%s: SDHCI_ARGUMENT --> HC_ARGUMENT val=%08x\n", __func__, val); WR4(sc, HC_ARGUMENT, val2); break; case SDHCI_INT_STATUS: dprintf("%s: SDHCI_INT_STATUS val=%08x\n", __func__, val); sc->sdhci_int_status = val; break; case SDHCI_INT_ENABLE: dprintf("%s: SDHCI_INT_ENABLE val=%08x\n" , __func__, val); break; case SDHCI_SIGNAL_ENABLE: sc->sdhci_signal_enable = val; hstcfg = RD4(sc, HC_HOSTCONFIG); if (val != 0) hstcfg &= ~(HC_HSTCF_INT_BLOCK | HC_HSTCF_INT_DATA); else hstcfg |= (HC_HSTCF_INT_BUSY|HC_HSTCF_INT_BLOCK| HC_HSTCF_INT_DATA); hstcfg |= HC_HSTCF_INT_BUSY; dprintf("%s: SDHCI_SIGNAL_ENABLE --> HC_HOSTC %08x --> %08x\n" , __func__, val, hstcfg); WR4(sc, HC_HOSTCONFIG, hstcfg); break; case SDHCI_CAPABILITIES: dprintf("%s: SDHCI_CAPABILITIES val=%08x\n", __func__, val); break; case SDHCI_CAPABILITIES2: dprintf("%s: SDHCI_CAPABILITIES2 val=%08x\n", __func__, val); break; case SDHCI_MAX_CURRENT: dprintf("%s: SDHCI_MAX_CURRENT val=%08x\n", __func__, val); break; case SDHCI_ADMA_ADDRESS_LO: dprintf("%s: SDHCI_ADMA_ADDRESS_LO val=%08x\n", __func__, val); break; default: dprintf("%s: UNKNOWN off=%08lx val=%08x\n", __func__, off, val); break; } mtx_unlock(&sc->mtx); } static void bcm_sdhost_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct bcm_sdhost_softc *sc = device_get_softc(dev); bus_size_t i; bus_size_t space; uint32_t edm; mtx_lock(&sc->mtx); dprintf("%s: off=%08lx count=%02lx\n", __func__, off, count); for (i = 0; i < count;) { edm = RD4(sc, HC_DEBUG); space = HC_FIFO_SIZE - ((edm >> 4) & 0x1f); if (i + space > count) space = count - i; if (space > 0) bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, HC_DATAPORT, data + i, space); i += space; DELAY(1); } /* wait until FIFO is really empty */ while (((RD4(sc, HC_DEBUG) >> 4) & 0x1f) > 0) DELAY(1); mtx_unlock(&sc->mtx); } static device_method_t bcm_sdhost_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bcm_sdhost_probe), DEVMETHOD(device_attach, bcm_sdhost_attach), DEVMETHOD(device_detach, bcm_sdhost_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, bcm_sdhost_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, bcm_sdhost_read_1), DEVMETHOD(sdhci_read_2, bcm_sdhost_read_2), DEVMETHOD(sdhci_read_4, bcm_sdhost_read_4), DEVMETHOD(sdhci_read_multi_4, bcm_sdhost_read_multi_4), DEVMETHOD(sdhci_write_1, bcm_sdhost_write_1), DEVMETHOD(sdhci_write_2, bcm_sdhost_write_2), DEVMETHOD(sdhci_write_4, bcm_sdhost_write_4), DEVMETHOD(sdhci_write_multi_4, bcm_sdhost_write_multi_4), DEVMETHOD(sdhci_get_card_present,bcm_sdhost_get_card_present), DEVMETHOD_END }; static driver_t bcm_sdhost_driver = { "sdhost_bcm", bcm_sdhost_methods, sizeof(struct bcm_sdhost_softc), }; DRIVER_MODULE(sdhost_bcm, simplebus, bcm_sdhost_driver, NULL, NULL); SDHCI_DEPEND(sdhost_bcm); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhost_bcm); #endif diff --git a/sys/arm/nvidia/tegra_sdhci.c b/sys/arm/nvidia/tegra_sdhci.c index 2b152227d95f..d37f71a150a2 100644 --- a/sys/arm/nvidia/tegra_sdhci.c +++ b/sys/arm/nvidia/tegra_sdhci.c @@ -1,472 +1,472 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * SDHCI driver glue for NVIDIA Tegra family * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include "opt_mmccam.h" /* Tegra SDHOST controller vendor register definitions */ #define SDMMC_VENDOR_CLOCK_CNTRL 0x100 #define VENDOR_CLOCK_CNTRL_CLK_SHIFT 8 #define VENDOR_CLOCK_CNTRL_CLK_MASK 0xFF #define SDMMC_VENDOR_SYS_SW_CNTRL 0x104 #define SDMMC_VENDOR_CAP_OVERRIDES 0x10C #define SDMMC_VENDOR_BOOT_CNTRL 0x110 #define SDMMC_VENDOR_BOOT_ACK_TIMEOUT 0x114 #define SDMMC_VENDOR_BOOT_DAT_TIMEOUT 0x118 #define SDMMC_VENDOR_DEBOUNCE_COUNT 0x11C #define SDMMC_VENDOR_MISC_CNTRL 0x120 #define VENDOR_MISC_CTRL_ENABLE_SDR104 0x8 #define VENDOR_MISC_CTRL_ENABLE_SDR50 0x10 #define VENDOR_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 #define VENDOR_MISC_CTRL_ENABLE_DDR50 0x200 #define SDMMC_MAX_CURRENT_OVERRIDE 0x124 #define SDMMC_MAX_CURRENT_OVERRIDE_HI 0x128 #define SDMMC_VENDOR_CLK_GATE_HYSTERESIS_COUNT 0x1D0 #define SDMMC_VENDOR_PHWRESET_VAL0 0x1D4 #define SDMMC_VENDOR_PHWRESET_VAL1 0x1D8 #define SDMMC_VENDOR_PHWRESET_VAL2 0x1DC #define SDMMC_SDMEMCOMPPADCTRL_0 0x1E0 #define SDMMC_AUTO_CAL_CONFIG 0x1E4 #define SDMMC_AUTO_CAL_INTERVAL 0x1E8 #define SDMMC_AUTO_CAL_STATUS 0x1EC #define SDMMC_SDMMC_MCCIF_FIFOCTRL 0x1F4 #define SDMMC_TIMEOUT_WCOAL_SDMMC 0x1F8 /* Compatible devices. */ static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-sdhci", 1}, {"nvidia,tegra210-sdhci", 1}, {NULL, 0}, }; struct tegra_sdhci_softc { device_t dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; u_int quirks; /* Chip specific quirks */ u_int caps; /* If we override SDHCI_CAPABILITIES */ uint32_t max_clk; /* Max possible freq */ clk_t clk; hwreset_t reset; gpio_pin_t gpio_power; struct sdhci_fdt_gpio *gpio; int force_card_present; struct sdhci_slot slot; }; static inline uint32_t RD4(struct tegra_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static uint8_t tegra_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_1(sc->mem_res, off)); } static uint16_t tegra_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); return (bus_read_2(sc->mem_res, off)); } static uint32_t tegra_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct tegra_sdhci_softc *sc; uint32_t val32; sc = device_get_softc(dev); val32 = bus_read_4(sc->mem_res, off); /* Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void tegra_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_1(sc->mem_res, off, val); } static void tegra_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_2(sc->mem_res, off, val); } static void tegra_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->mem_res, off, val); } static void tegra_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct tegra_sdhci_softc *sc; sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static void tegra_sdhci_intr(void *arg) { struct tegra_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); RD4(sc, SDHCI_INT_STATUS); } static int tegra_sdhci_get_ro(device_t brdev, device_t reqdev) { struct tegra_sdhci_softc *sc = device_get_softc(brdev); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool tegra_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct tegra_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static int tegra_sdhci_probe(device_t dev) { struct tegra_sdhci_softc *sc; phandle_t node; pcell_t cid; const struct ofw_compat_data *cd; sc = device_get_softc(dev); if (!ofw_bus_status_okay(dev)) return (ENXIO); cd = ofw_bus_search_compatible(dev, compat_data); if (cd->ocd_data == 0) return (ENXIO); node = ofw_bus_get_node(dev); device_set_desc(dev, "Tegra SDHCI controller"); /* Allow dts to patch quirks, slots, and max-frequency. */ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0) sc->quirks = cid; if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0) sc->max_clk = cid; return (BUS_PROBE_DEFAULT); } static int tegra_sdhci_attach(device_t dev) { struct tegra_sdhci_softc *sc; int rid, rv; uint64_t freq; phandle_t node, prop; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); rv = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); rv = ENXIO; goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sdhci", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sdhci' reset\n"); goto fail; } rv = hwreset_assert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot reset 'sdhci' reset\n"); goto fail; } gpio_pin_get_by_ofw_property(sc->dev, node, "power-gpios", &sc->gpio_power); if (OF_hasprop(node, "assigned-clocks")) { rv = clk_set_assigned(sc->dev, node); if (rv != 0) { device_printf(dev, "Cannot set assigned clocks\n"); goto fail; } } rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get clock\n"); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock\n"); goto fail; } rv = clk_set_freq(sc->clk, 48000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock\n"); } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } DELAY(4000); rv = hwreset_deassert(sc->reset); if (rv != 0) { device_printf(dev, "Cannot unreset 'sdhci' reset\n"); goto fail; } if (bootverbose) device_printf(dev, " Base MMC clock: %jd\n", (uintmax_t)freq); /* Fill slot information. */ sc->max_clk = (int)freq; sc->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_MISSING_CAPS; /* Limit real slot capabilities. */ sc->caps = RD4(sc, SDHCI_CAPABILITIES); if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } if (OF_hasprop(node, "non-removable")) sc->force_card_present = 1; /* * Clear clock field, so SDHCI driver uses supplied frequency. * in sc->slot.max_clk */ sc->caps &= ~SDHCI_CLOCK_V3_BASE_MASK; sc->slot.quirks = sc->quirks; sc->slot.max_clk = sc->max_clk; sc->slot.caps = sc->caps; if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, tegra_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); rv = ENXIO; goto fail; } rv = sdhci_init_slot(dev, &sc->slot, 0); if (rv != 0) { goto fail; } sc->gpio = sdhci_fdt_gpio_setup(sc->dev, &sc->slot); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->gpio != NULL) sdhci_fdt_gpio_teardown(sc->gpio); if (sc->intr_cookie != NULL) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->gpio_power != NULL) gpio_pin_release(sc->gpio_power); if (sc->clk != NULL) clk_release(sc->clk); if (sc->reset != NULL) hwreset_release(sc->reset); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (rv); } static int tegra_sdhci_detach(device_t dev) { struct tegra_sdhci_softc *sc = device_get_softc(dev); struct sdhci_slot *slot = &sc->slot; bus_generic_detach(dev); sdhci_fdt_gpio_teardown(sc->gpio); clk_release(sc->clk); bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); sdhci_cleanup_slot(slot); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); return (0); } static device_method_t tegra_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tegra_sdhci_probe), DEVMETHOD(device_attach, tegra_sdhci_attach), DEVMETHOD(device_detach, tegra_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, tegra_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, tegra_sdhci_read_1), DEVMETHOD(sdhci_read_2, tegra_sdhci_read_2), DEVMETHOD(sdhci_read_4, tegra_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, tegra_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, tegra_sdhci_write_1), DEVMETHOD(sdhci_write_2, tegra_sdhci_write_2), DEVMETHOD(sdhci_write_4, tegra_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, tegra_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present, tegra_sdhci_get_card_present), DEVMETHOD_END }; static DEFINE_CLASS_0(sdhci, tegra_sdhci_driver, tegra_sdhci_methods, sizeof(struct tegra_sdhci_softc)); DRIVER_MODULE(sdhci_tegra, simplebus, tegra_sdhci_driver, NULL, NULL); SDHCI_DEPEND(sdhci_tegra); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci); #endif diff --git a/sys/arm/ti/am335x/am335x_ehrpwm.c b/sys/arm/ti/am335x/am335x_ehrpwm.c index 2296b050517f..77b6384b17a7 100644 --- a/sys/arm/ti/am335x/am335x_ehrpwm.c +++ b/sys/arm/ti/am335x/am335x_ehrpwm.c @@ -1,598 +1,598 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" #include "am335x_pwm.h" /******************************************************************************* * Enhanced resolution PWM driver. Many of the advanced featues of the hardware * are not supported by this driver. What is implemented here is simple * variable-duty-cycle PWM output. ******************************************************************************/ /* In ticks */ #define DEFAULT_PWM_PERIOD 1000 #define PWM_CLOCK 100000000UL #define NS_PER_SEC 1000000000 #define PWM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define PWM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define PWM_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define PWM_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \ device_get_nameunit(_sc->sc_dev), "am335x_ehrpwm softc", MTX_DEF) #define PWM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) #define EPWM_READ2(_sc, reg) bus_read_2((_sc)->sc_mem_res, reg) #define EPWM_WRITE2(_sc, reg, value) \ bus_write_2((_sc)->sc_mem_res, reg, value) #define EPWM_TBCTL 0x00 /* see 15.2.2.11 for the first two, used in debug situations */ #define TBCTL_FREERUN_STOP_NEXT_TBC_INCREMENT (0 << 14) #define TBCTL_FREERUN_STOP_COMPLETE_CYCLE (1 << 14) /* ignore suspend control signal */ #define TBCTL_FREERUN (2 << 14) #define TBCTL_PHDIR_UP (1 << 13) #define TBCTL_PHDIR_DOWN (0 << 13) #define TBCTL_CLKDIV(x) ((x) << 10) #define TBCTL_CLKDIV_MASK (7 << 10) #define TBCTL_HSPCLKDIV(x) ((x) << 7) #define TBCTL_HSPCLKDIV_MASK (7 << 7) #define TBCTL_SYNCOSEL_DISABLED (3 << 4) #define TBCTL_PRDLD_SHADOW (0 << 3) #define TBCTL_PRDLD_IMMEDIATE (1 << 3) #define TBCTL_PHSEN_DISABLED (0 << 2) #define TBCTL_PHSEN_ENABLED (1 << 2) #define TBCTL_CTRMODE_MASK (3) #define TBCTL_CTRMODE_UP (0 << 0) #define TBCTL_CTRMODE_DOWN (1 << 0) #define TBCTL_CTRMODE_UPDOWN (2 << 0) #define TBCTL_CTRMODE_FREEZE (3 << 0) #define EPWM_TBSTS 0x02 #define EPWM_TBPHSHR 0x04 #define EPWM_TBPHS 0x06 #define EPWM_TBCNT 0x08 #define EPWM_TBPRD 0x0a /* Counter-compare */ #define EPWM_CMPCTL 0x0e #define CMPCTL_SHDWBMODE_SHADOW (1 << 6) #define CMPCTL_SHDWBMODE_IMMEDIATE (0 << 6) #define CMPCTL_SHDWAMODE_SHADOW (1 << 4) #define CMPCTL_SHDWAMODE_IMMEDIATE (0 << 4) #define CMPCTL_LOADBMODE_ZERO (0 << 2) #define CMPCTL_LOADBMODE_PRD (1 << 2) #define CMPCTL_LOADBMODE_EITHER (2 << 2) #define CMPCTL_LOADBMODE_FREEZE (3 << 2) #define CMPCTL_LOADAMODE_ZERO (0 << 0) #define CMPCTL_LOADAMODE_PRD (1 << 0) #define CMPCTL_LOADAMODE_EITHER (2 << 0) #define CMPCTL_LOADAMODE_FREEZE (3 << 0) #define EPWM_CMPAHR 0x10 #define EPWM_CMPA 0x12 #define EPWM_CMPB 0x14 /* CMPCTL_LOADAMODE_ZERO */ #define EPWM_AQCTLA 0x16 #define EPWM_AQCTLB 0x18 #define AQCTL_CBU_NONE (0 << 8) #define AQCTL_CBU_CLEAR (1 << 8) #define AQCTL_CBU_SET (2 << 8) #define AQCTL_CBU_TOGGLE (3 << 8) #define AQCTL_CAU_NONE (0 << 4) #define AQCTL_CAU_CLEAR (1 << 4) #define AQCTL_CAU_SET (2 << 4) #define AQCTL_CAU_TOGGLE (3 << 4) #define AQCTL_ZRO_NONE (0 << 0) #define AQCTL_ZRO_CLEAR (1 << 0) #define AQCTL_ZRO_SET (2 << 0) #define AQCTL_ZRO_TOGGLE (3 << 0) #define EPWM_AQSFRC 0x1a #define EPWM_AQCSFRC 0x1c #define AQCSFRC_OFF 0 #define AQCSFRC_LO 1 #define AQCSFRC_HI 2 #define AQCSFRC_MASK 3 #define AQCSFRC(chan, hilo) ((hilo) << (2 * chan)) /* Trip-Zone module */ #define EPWM_TZSEL 0x24 #define EPWM_TZCTL 0x28 #define EPWM_TZFLG 0x2C /* Dead band */ #define EPWM_DBCTL 0x1E #define DBCTL_MASK (3 << 0) #define DBCTL_BYPASS 0 #define DBCTL_RISING_EDGE 1 #define DBCTL_FALLING_EDGE 2 #define DBCTL_BOTH_EDGE 3 /* PWM-chopper */ #define EPWM_PCCTL 0x3C #define PCCTL_CHPEN_MASK (1 << 0) #define PCCTL_CHPEN_DISABLE 0 #define PCCTL_CHPEN_ENABLE 1 /* High-Resolution PWM */ #define EPWM_HRCTL 0x40 #define HRCTL_DELMODE_BOTH 3 #define HRCTL_DELMODE_FALL 2 #define HRCTL_DELMODE_RISE 1 static device_probe_t am335x_ehrpwm_probe; static device_attach_t am335x_ehrpwm_attach; static device_detach_t am335x_ehrpwm_detach; struct ehrpwm_channel { u_int duty; /* on duration, in ns */ bool enabled; /* channel enabled? */ bool inverted; /* signal inverted? */ }; #define NUM_CHANNELS 2 struct am335x_ehrpwm_softc { device_t sc_dev; device_t sc_busdev; struct mtx sc_mtx; struct resource *sc_mem_res; int sc_mem_rid; /* Things used for configuration via pwm(9) api. */ u_int sc_clkfreq; /* frequency in Hz */ u_int sc_clktick; /* duration in ns */ u_int sc_period; /* duration in ns */ struct ehrpwm_channel sc_channels[NUM_CHANNELS]; }; static struct ofw_compat_data compat_data[] = { {"ti,am3352-ehrpwm", true}, {"ti,am33xx-ehrpwm", true}, {NULL, false}, }; SIMPLEBUS_PNP_INFO(compat_data); static void am335x_ehrpwm_cfg_duty(struct am335x_ehrpwm_softc *sc, u_int chan, u_int duty) { u_int tbcmp; if (duty == 0) tbcmp = 0; else tbcmp = max(1, duty / sc->sc_clktick); sc->sc_channels[chan].duty = tbcmp * sc->sc_clktick; PWM_LOCK_ASSERT(sc); EPWM_WRITE2(sc, (chan == 0) ? EPWM_CMPA : EPWM_CMPB, tbcmp); } static void am335x_ehrpwm_cfg_enable(struct am335x_ehrpwm_softc *sc, u_int chan, bool enable) { uint16_t regval; sc->sc_channels[chan].enabled = enable; /* * Turn off any existing software-force of the channel, then force * it in the right direction (high or low) if it's not being enabled. */ PWM_LOCK_ASSERT(sc); regval = EPWM_READ2(sc, EPWM_AQCSFRC); regval &= ~AQCSFRC(chan, AQCSFRC_MASK); if (!sc->sc_channels[chan].enabled) { if (sc->sc_channels[chan].inverted) regval |= AQCSFRC(chan, AQCSFRC_HI); else regval |= AQCSFRC(chan, AQCSFRC_LO); } EPWM_WRITE2(sc, EPWM_AQCSFRC, regval); } static bool am335x_ehrpwm_cfg_period(struct am335x_ehrpwm_softc *sc, u_int period) { uint16_t regval; u_int clkdiv, hspclkdiv, pwmclk, pwmtick, tbprd; /* Can't do a period shorter than 2 clock ticks. */ if (period < 2 * NS_PER_SEC / PWM_CLOCK) { sc->sc_clkfreq = 0; sc->sc_clktick = 0; sc->sc_period = 0; return (false); } /* * Figure out how much we have to divide down the base 100MHz clock so * that we can express the requested period as a 16-bit tick count. */ tbprd = 0; for (clkdiv = 0; clkdiv < 8; ++clkdiv) { const u_int cd = 1 << clkdiv; for (hspclkdiv = 0; hspclkdiv < 8; ++hspclkdiv) { const u_int cdhs = max(1, hspclkdiv * 2); pwmclk = PWM_CLOCK / (cd * cdhs); pwmtick = NS_PER_SEC / pwmclk; if (period / pwmtick < 65536) { tbprd = period / pwmtick; break; } } if (tbprd != 0) break; } /* Handle requested period too long for available clock divisors. */ if (tbprd == 0) return (false); /* * If anything has changed from the current settings, reprogram the * clock divisors and period register. */ if (sc->sc_clkfreq != pwmclk || sc->sc_clktick != pwmtick || sc->sc_period != tbprd * pwmtick) { sc->sc_clkfreq = pwmclk; sc->sc_clktick = pwmtick; sc->sc_period = tbprd * pwmtick; PWM_LOCK_ASSERT(sc); regval = EPWM_READ2(sc, EPWM_TBCTL); regval &= ~(TBCTL_CLKDIV_MASK | TBCTL_HSPCLKDIV_MASK); regval |= TBCTL_CLKDIV(clkdiv) | TBCTL_HSPCLKDIV(hspclkdiv); EPWM_WRITE2(sc, EPWM_TBCTL, regval); EPWM_WRITE2(sc, EPWM_TBPRD, tbprd - 1); #if 0 device_printf(sc->sc_dev, "clkdiv %u hspclkdiv %u tbprd %u " "clkfreq %u Hz clktick %u ns period got %u requested %u\n", clkdiv, hspclkdiv, tbprd - 1, sc->sc_clkfreq, sc->sc_clktick, sc->sc_period, period); #endif /* * If the period changed, that invalidates the current CMP * registers (duty values), just zero them out. */ am335x_ehrpwm_cfg_duty(sc, 0, 0); am335x_ehrpwm_cfg_duty(sc, 1, 0); } return (true); } static int am335x_ehrpwm_channel_count(device_t dev, u_int *nchannel) { *nchannel = NUM_CHANNELS; return (0); } static int am335x_ehrpwm_channel_config(device_t dev, u_int channel, u_int period, u_int duty) { struct am335x_ehrpwm_softc *sc; bool status; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); status = am335x_ehrpwm_cfg_period(sc, period); if (status) am335x_ehrpwm_cfg_duty(sc, channel, duty); PWM_UNLOCK(sc); return (status ? 0 : EINVAL); } static int am335x_ehrpwm_channel_get_config(device_t dev, u_int channel, u_int *period, u_int *duty) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); *period = sc->sc_period; *duty = sc->sc_channels[channel].duty; return (0); } static int am335x_ehrpwm_channel_set_flags(device_t dev, u_int channel, uint32_t flags) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); if (flags & PWM_POLARITY_INVERTED) { sc->sc_channels[channel].inverted = true; /* Action-Qualifier 15.2.2.5 */ if (channel == 0) EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_CLEAR | AQCTL_CAU_SET)); else EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_CLEAR | AQCTL_CBU_SET)); } else { sc->sc_channels[channel].inverted = false; if (channel == 0) EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_SET | AQCTL_CAU_CLEAR)); else EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_SET | AQCTL_CBU_CLEAR)); } PWM_UNLOCK(sc); return (0); } static int am335x_ehrpwm_channel_get_flags(device_t dev, u_int channel, uint32_t *flags) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_channels[channel].inverted == true) *flags = PWM_POLARITY_INVERTED; else *flags = 0; return (0); } static int am335x_ehrpwm_channel_enable(device_t dev, u_int channel, bool enable) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); PWM_LOCK(sc); am335x_ehrpwm_cfg_enable(sc, channel, enable); PWM_UNLOCK(sc); return (0); } static int am335x_ehrpwm_channel_is_enabled(device_t dev, u_int channel, bool *enabled) { struct am335x_ehrpwm_softc *sc; if (channel >= NUM_CHANNELS) return (EINVAL); sc = device_get_softc(dev); *enabled = sc->sc_channels[channel].enabled; return (0); } static int am335x_ehrpwm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "AM335x EHRPWM"); return (BUS_PROBE_DEFAULT); } static int am335x_ehrpwm_attach(device_t dev) { struct am335x_ehrpwm_softc *sc; uint16_t reg; sc = device_get_softc(dev); sc->sc_dev = dev; PWM_LOCK_INIT(sc); sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot allocate memory resources\n"); goto fail; } /* CONFIGURE EPWM */ reg = EPWM_READ2(sc, EPWM_TBCTL); reg &= ~(TBCTL_CLKDIV_MASK | TBCTL_HSPCLKDIV_MASK); EPWM_WRITE2(sc, EPWM_TBCTL, reg); EPWM_WRITE2(sc, EPWM_TBPRD, DEFAULT_PWM_PERIOD - 1); EPWM_WRITE2(sc, EPWM_CMPA, 0); EPWM_WRITE2(sc, EPWM_CMPB, 0); /* Action-Qualifier 15.2.2.5 */ EPWM_WRITE2(sc, EPWM_AQCTLA, (AQCTL_ZRO_SET | AQCTL_CAU_CLEAR)); EPWM_WRITE2(sc, EPWM_AQCTLB, (AQCTL_ZRO_SET | AQCTL_CBU_CLEAR)); /* Dead band 15.2.2.6 */ reg = EPWM_READ2(sc, EPWM_DBCTL); reg &= ~DBCTL_MASK; reg |= DBCTL_BYPASS; EPWM_WRITE2(sc, EPWM_DBCTL, reg); /* PWM-chopper described in 15.2.2.7 */ /* Acc. TRM used in pulse transformerbased gate drivers * to control the power switching-elements */ reg = EPWM_READ2(sc, EPWM_PCCTL); reg &= ~PCCTL_CHPEN_MASK; reg |= PCCTL_CHPEN_DISABLE; EPWM_WRITE2(sc, EPWM_PCCTL, PCCTL_CHPEN_DISABLE); /* Trip zone are described in 15.2.2.8. * Essential its used to detect faults and can be configured * to react on such faults.. */ /* disable TZn as one-shot / CVC trip source 15.2.4.18 */ EPWM_WRITE2(sc, EPWM_TZSEL, 0x0); /* reg described in 15.2.4.19 */ EPWM_WRITE2(sc, EPWM_TZCTL, 0xf); reg = EPWM_READ2(sc, EPWM_TZFLG); /* START EPWM */ reg &= ~TBCTL_CTRMODE_MASK; reg |= TBCTL_CTRMODE_UP | TBCTL_FREERUN; EPWM_WRITE2(sc, EPWM_TBCTL, reg); if ((sc->sc_busdev = device_add_child(dev, "pwmbus", -1)) == NULL) { device_printf(dev, "Cannot add child pwmbus\n"); // This driver can still do things even without the bus child. } - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); fail: PWM_LOCK_DESTROY(sc); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); return(ENXIO); } static int am335x_ehrpwm_detach(device_t dev) { struct am335x_ehrpwm_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(sc->sc_dev)) != 0) return (error); PWM_LOCK(sc); if (sc->sc_busdev != NULL) device_delete_child(dev, sc->sc_busdev); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); PWM_UNLOCK(sc); PWM_LOCK_DESTROY(sc); return (0); } static phandle_t am335x_ehrpwm_get_node(device_t bus, device_t dev) { /* * Share our controller node with our pwmbus child; it instantiates * devices by walking the children contained within our node. */ return ofw_bus_get_node(bus); } static device_method_t am335x_ehrpwm_methods[] = { DEVMETHOD(device_probe, am335x_ehrpwm_probe), DEVMETHOD(device_attach, am335x_ehrpwm_attach), DEVMETHOD(device_detach, am335x_ehrpwm_detach), /* ofw_bus_if */ DEVMETHOD(ofw_bus_get_node, am335x_ehrpwm_get_node), /* pwm interface */ DEVMETHOD(pwmbus_channel_count, am335x_ehrpwm_channel_count), DEVMETHOD(pwmbus_channel_config, am335x_ehrpwm_channel_config), DEVMETHOD(pwmbus_channel_get_config, am335x_ehrpwm_channel_get_config), DEVMETHOD(pwmbus_channel_set_flags, am335x_ehrpwm_channel_set_flags), DEVMETHOD(pwmbus_channel_get_flags, am335x_ehrpwm_channel_get_flags), DEVMETHOD(pwmbus_channel_enable, am335x_ehrpwm_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, am335x_ehrpwm_channel_is_enabled), DEVMETHOD_END }; static driver_t am335x_ehrpwm_driver = { "pwm", am335x_ehrpwm_methods, sizeof(struct am335x_ehrpwm_softc), }; DRIVER_MODULE(am335x_ehrpwm, am335x_pwmss, am335x_ehrpwm_driver, 0, 0); MODULE_VERSION(am335x_ehrpwm, 1); MODULE_DEPEND(am335x_ehrpwm, am335x_pwmss, 1, 1, 1); MODULE_DEPEND(am335x_ehrpwm, pwmbus, 1, 1, 1); diff --git a/sys/arm/ti/am335x/am335x_pwmss.c b/sys/arm/ti/am335x/am335x_pwmss.c index 8766c706b807..9d584910ee58 100644 --- a/sys/arm/ti/am335x/am335x_pwmss.c +++ b/sys/arm/ti/am335x/am335x_pwmss.c @@ -1,175 +1,175 @@ /*- * Copyright (c) 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include "am335x_pwm.h" #include "am335x_scm.h" #define PWMSS_IDVER 0x00 #define PWMSS_SYSCONFIG 0x04 #define PWMSS_CLKCONFIG 0x08 #define CLKCONFIG_EPWMCLK_EN (1 << 8) #define PWMSS_CLKSTATUS 0x0C /* TRM chapter 2 memory map table 2-3 + VER register location */ #define PWMSS_REV_0 0x0000 #define PWMSS_REV_1 0x2000 #define PWMSS_REV_2 0x4000 static device_probe_t am335x_pwmss_probe; static device_attach_t am335x_pwmss_attach; static device_detach_t am335x_pwmss_detach; struct am335x_pwmss_softc { struct simplebus_softc sc_simplebus; device_t sc_dev; struct syscon *syscon; }; static device_method_t am335x_pwmss_methods[] = { DEVMETHOD(device_probe, am335x_pwmss_probe), DEVMETHOD(device_attach, am335x_pwmss_attach), DEVMETHOD(device_detach, am335x_pwmss_detach), DEVMETHOD_END }; static int am335x_pwmss_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,am33xx-pwmss")) return (ENXIO); device_set_desc(dev, "AM335x PWM"); return (BUS_PROBE_DEFAULT); } static int am335x_pwmss_attach(device_t dev) { struct am335x_pwmss_softc *sc; uint32_t reg, id; uint64_t rev_address; phandle_t node, opp_table; sc = device_get_softc(dev); sc->sc_dev = dev; /* FIXME: For now; Go and kidnap syscon from opp-table */ opp_table = OF_finddevice("/opp-table"); if (opp_table == -1) { device_printf(dev, "Cant find /opp-table\n"); return (ENXIO); } if (!OF_hasprop(opp_table, "syscon")) { device_printf(dev, "/opp-table doesnt have required syscon property\n"); return (ENXIO); } if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &sc->syscon) != 0) { device_printf(dev, "Failed to get syscon\n"); return (ENXIO); } ti_sysc_clock_enable(device_get_parent(dev)); rev_address = ti_sysc_get_rev_address(device_get_parent(dev)); switch (rev_address) { case PWMSS_REV_0: id = 0; break; case PWMSS_REV_1: id = 1; break; case PWMSS_REV_2: id = 2; break; } reg = SYSCON_READ_4(sc->syscon, SCM_PWMSS_CTRL); reg |= (1 << id); SYSCON_WRITE_4(sc->syscon, SCM_PWMSS_CTRL, reg); node = ofw_bus_get_node(dev); if (node == -1) return (ENXIO); simplebus_init(dev, node); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); } static int am335x_pwmss_detach(device_t dev) { return (0); } DEFINE_CLASS_1(am335x_pwmss, am335x_pwmss_driver, am335x_pwmss_methods, sizeof(struct am335x_pwmss_softc), simplebus_driver); DRIVER_MODULE(am335x_pwmss, simplebus, am335x_pwmss_driver, 0, 0); MODULE_VERSION(am335x_pwmss, 1); MODULE_DEPEND(am335x_pwmss, ti_sysc, 1, 1, 1); diff --git a/sys/arm/ti/cpsw/if_cpsw.c b/sys/arm/ti/cpsw/if_cpsw.c index 8012f8441e76..7756940bb28d 100644 --- a/sys/arm/ti/cpsw/if_cpsw.c +++ b/sys/arm/ti/cpsw/if_cpsw.c @@ -1,3016 +1,3016 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Damjan Marion * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * TI Common Platform Ethernet Switch (CPSW) Driver * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. * * This controller is documented in the AM335x Technical Reference * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. * * It is basically a single Ethernet port (port 0) wired internally to * a 3-port store-and-forward switch connected to two independent * "sliver" controllers (port 1 and port 2). You can operate the * controller in a variety of different ways by suitably configuring * the slivers and the Address Lookup Engine (ALE) that routes packets * between the ports. * * This code was developed and tested on a BeagleBone with * an AM335x SoC. */ #include #include "opt_cpsw.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include #include #include #include #include #include #ifdef CPSW_ETHERSWITCH #include #include "etherswitch_if.h" #endif #include "if_cpswreg.h" #include "if_cpswvar.h" #include "miibus_if.h" /* Device probe/attach/detach. */ static int cpsw_probe(device_t); static int cpsw_attach(device_t); static int cpsw_detach(device_t); static int cpswp_probe(device_t); static int cpswp_attach(device_t); static int cpswp_detach(device_t); static phandle_t cpsw_get_node(device_t, device_t); /* Device Init/shutdown. */ static int cpsw_shutdown(device_t); static void cpswp_init(void *); static void cpswp_init_locked(void *); static void cpswp_stop_locked(struct cpswp_softc *); /* Device Suspend/Resume. */ static int cpsw_suspend(device_t); static int cpsw_resume(device_t); /* Ioctl. */ static int cpswp_ioctl(if_t, u_long command, caddr_t data); static int cpswp_miibus_readreg(device_t, int phy, int reg); static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); static void cpswp_miibus_statchg(device_t); /* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); static void cpsw_rx_enqueue(struct cpsw_softc *); static void cpswp_start(if_t); static void cpsw_intr_tx(void *); static void cpswp_tx_enqueue(struct cpswp_softc *); static int cpsw_tx_dequeue(struct cpsw_softc *); /* Misc interrupts and watchdog. */ static void cpsw_intr_rx_thresh(void *); static void cpsw_intr_misc(void *); static void cpswp_tick(void *); static void cpswp_ifmedia_sts(if_t, struct ifmediareq *); static int cpswp_ifmedia_upd(if_t); static void cpsw_tx_watchdog(void *); /* ALE support */ static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); static void cpsw_ale_dump_table(struct cpsw_softc *); static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, int); static int cpswp_ale_update_addresses(struct cpswp_softc *, int); /* Statistics and sysctls. */ static void cpsw_add_sysctls(struct cpsw_softc *); static void cpsw_stats_collect(struct cpsw_softc *); static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); #ifdef CPSW_ETHERSWITCH static etherswitch_info_t *cpsw_getinfo(device_t); static int cpsw_getport(device_t, etherswitch_port_t *); static int cpsw_setport(device_t, etherswitch_port_t *); static int cpsw_getconf(device_t, etherswitch_conf_t *); static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_readreg(device_t, int); static int cpsw_writereg(device_t, int, int); static int cpsw_readphy(device_t, int, int); static int cpsw_writephy(device_t, int, int, int); #endif /* * Arbitrary limit on number of segments in an mbuf to be transmitted. * Packets with more segments than this will be defragmented before * they are queued. */ #define CPSW_TXFRAGS 16 /* Shared resources. */ static device_method_t cpsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpsw_probe), DEVMETHOD(device_attach, cpsw_attach), DEVMETHOD(device_detach, cpsw_detach), DEVMETHOD(device_shutdown, cpsw_shutdown), DEVMETHOD(device_suspend, cpsw_suspend), DEVMETHOD(device_resume, cpsw_resume), /* Bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, cpsw_get_node), #ifdef CPSW_ETHERSWITCH /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), DEVMETHOD(etherswitch_readreg, cpsw_readreg), DEVMETHOD(etherswitch_writereg, cpsw_writereg), DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), DEVMETHOD(etherswitch_getport, cpsw_getport), DEVMETHOD(etherswitch_setport, cpsw_setport), DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), DEVMETHOD(etherswitch_getconf, cpsw_getconf), #endif DEVMETHOD_END }; static driver_t cpsw_driver = { "cpswss", cpsw_methods, sizeof(struct cpsw_softc), }; DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); /* Port/Slave resources. */ static device_method_t cpswp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpswp_probe), DEVMETHOD(device_attach, cpswp_attach), DEVMETHOD(device_detach, cpswp_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), DEVMETHOD_END }; static driver_t cpswp_driver = { "cpsw", cpswp_methods, sizeof(struct cpswp_softc), }; #ifdef CPSW_ETHERSWITCH DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); #endif DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); MODULE_DEPEND(cpsw, miibus, 1, 1, 1); #ifdef CPSW_ETHERSWITCH static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; #endif static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; static struct resource_spec irq_res_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct { void (*cb)(void *); } cpsw_intr_cb[] = { { cpsw_intr_rx_thresh }, { cpsw_intr_rx }, { cpsw_intr_tx }, { cpsw_intr_misc }, }; /* Number of entries here must match size of stats * array in struct cpswp_softc. */ static struct cpsw_stat { int reg; char *oid; } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { {0x00, "GoodRxFrames"}, {0x04, "BroadcastRxFrames"}, {0x08, "MulticastRxFrames"}, {0x0C, "PauseRxFrames"}, {0x10, "RxCrcErrors"}, {0x14, "RxAlignErrors"}, {0x18, "OversizeRxFrames"}, {0x1c, "RxJabbers"}, {0x20, "ShortRxFrames"}, {0x24, "RxFragments"}, {0x30, "RxOctets"}, {0x34, "GoodTxFrames"}, {0x38, "BroadcastTxFrames"}, {0x3c, "MulticastTxFrames"}, {0x40, "PauseTxFrames"}, {0x44, "DeferredTxFrames"}, {0x48, "CollisionsTxFrames"}, {0x4c, "SingleCollisionTxFrames"}, {0x50, "MultipleCollisionTxFrames"}, {0x54, "ExcessiveCollisions"}, {0x58, "LateCollisions"}, {0x5c, "TxUnderrun"}, {0x60, "CarrierSenseErrors"}, {0x64, "TxOctets"}, {0x68, "RxTx64OctetFrames"}, {0x6c, "RxTx65to127OctetFrames"}, {0x70, "RxTx128to255OctetFrames"}, {0x74, "RxTx256to511OctetFrames"}, {0x78, "RxTx512to1024OctetFrames"}, {0x7c, "RxTx1024upOctetFrames"}, {0x80, "NetOctets"}, {0x84, "RxStartOfFrameOverruns"}, {0x88, "RxMiddleOfFrameOverruns"}, {0x8c, "RxDmaOverruns"} }; /* * Basic debug support. */ static void cpsw_debugf_head(const char *funcname) { int t = (int)(time_second % (24 * 60 * 60)); printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); } static void cpsw_debugf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } #define CPSW_DEBUGF(_sc, a) do { \ if ((_sc)->debug) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) /* * Locking macros */ #define CPSW_TX_LOCK(sc) do { \ mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->tx.lock); \ } while (0) #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_PORT_LOCK(_sc) do { \ mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ mtx_lock(&(_sc)->lock); \ } while (0) #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) /* * Read/Write macros */ #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) #define cpsw_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_reg), (_val)) #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) #define cpsw_cpdma_bd_paddr(sc, slot) \ BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) #define cpsw_cpdma_read_bd(sc, slot, val) \ bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd(sc, slot, val) \ bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ bus_write_2(sc->mem_res, slot->bd_offset + 14, val) #define cpsw_cpdma_read_bd_flags(sc, slot) \ bus_read_2(sc->mem_res, slot->bd_offset + 14) #define cpsw_write_hdp_slot(sc, queue, slot) \ cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) #define cpsw_read_cp(sc, queue) \ cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) #define cpsw_write_cp(sc, queue, val) \ cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) #define cpsw_write_cp_slot(sc, queue, slot) \ cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) #if 0 /* XXX temporary function versions for debugging. */ static void cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t reg = queue->hdp_offset; uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); cpsw_write_4(sc, reg, v); } static void cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); cpsw_write_cp(sc, queue, v); } #endif /* * Expanded dump routines for verbose debugging. */ static void cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", "Port0"}; struct cpsw_cpdma_bd bd; const char *sep; int i; cpsw_cpdma_read_bd(sc, slot, &bd); printf("BD Addr : 0x%08x Next : 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); printf(" Flags: "); sep = ""; for (i = 0; i < 16; ++i) { if (bd.flags & (1 << (15 - i))) { printf("%s%s", sep, flags[i]); sep = ","; } } printf("\n"); if (slot->mbuf) { printf(" Ether: %14D\n", (char *)(slot->mbuf->m_data), " "); printf(" Packet: %16D\n", (char *)(slot->mbuf->m_data) + 14, " "); } } #define CPSW_DUMP_SLOT(cs, slot) do { \ IF_DEBUG(sc) { \ cpsw_dump_slot(sc, slot); \ } \ } while (0) static void cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) { struct cpsw_slot *slot; int i = 0; int others = 0; STAILQ_FOREACH(slot, q, next) { if (i > CPSW_TXFRAGS) ++others; else cpsw_dump_slot(sc, slot); ++i; } if (others) printf(" ... and %d more.\n", others); printf("\n"); } #define CPSW_DUMP_QUEUE(sc, q) do { \ IF_DEBUG(sc) { \ cpsw_dump_queue(sc, q); \ } \ } while (0) static void cpsw_init_slots(struct cpsw_softc *sc) { struct cpsw_slot *slot; int i; STAILQ_INIT(&sc->avail); /* Put the slot descriptors onto the global avail list. */ for (i = 0; i < nitems(sc->_slots); i++) { slot = &sc->_slots[i]; slot->bd_offset = cpsw_cpdma_bd_offset(i); STAILQ_INSERT_TAIL(&sc->avail, slot, next); } } static int cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) { const int max_slots = nitems(sc->_slots); struct cpsw_slot *slot; int i; if (requested < 0) requested = max_slots; for (i = 0; i < requested; ++i) { slot = STAILQ_FIRST(&sc->avail); if (slot == NULL) return (0); if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { device_printf(sc->dev, "failed to create dmamap\n"); return (ENOMEM); } STAILQ_REMOVE_HEAD(&sc->avail, next); STAILQ_INSERT_TAIL(&queue->avail, slot, next); ++queue->avail_queue_len; ++queue->queue_slots; } return (0); } static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { int error __diagused; if (slot->dmamap) { if (slot->mbuf) bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); KASSERT(error == 0, ("Mapping still active")); slot->dmamap = NULL; } if (slot->mbuf) { m_freem(slot->mbuf); slot->mbuf = NULL; } } static void cpsw_reset(struct cpsw_softc *sc) { int i; callout_stop(&sc->watchdog.callout); /* Reset RMII/RGMII wrapper. */ cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) ; /* Disable TX and RX interrupts for all cores. */ for (i = 0; i < 3; ++i) { cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); } /* Reset CPSW subsystem. */ cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) ; /* Reset Sliver port 1 and 2 */ for (i = 0; i < 2; i++) { /* Reset */ cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) ; } /* Reset DMA controller. */ cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) ; /* Disable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); /* Clear all queues. */ for (i = 0; i < 8; i++) { cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); } /* Clear all interrupt Masks */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } static void cpsw_init(struct cpsw_softc *sc) { struct cpsw_slot *slot; uint32_t reg; /* Disable the interrupt pacing. */ reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); /* Clear ALE */ cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); /* Enable ALE */ reg = CPSW_ALE_CTL_ENABLE; if (sc->dualemac) reg |= CPSW_ALE_CTL_VLAN_AWARE; cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); /* Set Host Port Mapping. */ cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); /* Initialize ALE: set host port to forwarding(3). */ cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpsw_write_4(sc, CPSW_SS_PTYPE, 0); /* Enable statistics for ports 0, 1 and 2 */ cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); /* Turn off flow control. */ cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); /* Make IP hdr aligned with 4 */ cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); /* Initialize RX Buffer Descriptors */ cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); /* Enable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); /* Enable Interrupts for core 0 */ cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); /* Enable host Error Interrupt */ cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); /* Enable interrupts for RX and TX on Channel 0 */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); /* Select MII in GMII_SEL, Internal Delay mode */ //ti_scm_reg_write_4(0x650, 0); /* Initialize active queues. */ slot = STAILQ_FIRST(&sc->tx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->tx, slot); slot = STAILQ_FIRST(&sc->rx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->rx, slot); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); /* Activate network interface. */ sc->rx.running = 1; sc->tx.running = 1; sc->watchdog.timer = 0; callout_init(&sc->watchdog.callout, 0); callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * Device Probe, Attach, Detach. * */ static int cpsw_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,cpsw")) return (ENXIO); device_set_desc(dev, "3-port Switch Ethernet Subsystem"); return (BUS_PROBE_DEFAULT); } static int cpsw_intr_attach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (bus_setup_intr(sc->dev, sc->irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { return (-1); } } return (0); } static void cpsw_intr_detach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (sc->ih_cookie[i]) { bus_teardown_intr(sc->dev, sc->irq_res[i], sc->ih_cookie[i]); } } } static int cpsw_get_fdt_data(struct cpsw_softc *sc, int port) { char *name; int len, phy, vlan; pcell_t phy_id[3], vlan_id; phandle_t child; unsigned long mdio_child_addr; /* Find any slave with phy-handle/phy_id */ phy = -1; vlan = -1; for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { if (OF_getprop_alloc(child, "name", (void **)&name) < 0) continue; if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { OF_prop_free(name); continue; } OF_prop_free(name); if (mdio_child_addr != slave_mdio_addr[port] && mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) continue; if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ /* Users with old DTB will have phy_id instead */ phy = -1; len = OF_getproplen(child, "phy_id"); if (len / sizeof(pcell_t) == 2) { /* Get phy address from fdt */ if (OF_getencprop(child, "phy_id", phy_id, len) > 0) phy = phy_id[1]; } } len = OF_getproplen(child, "dual_emac_res_vlan"); if (len / sizeof(pcell_t) == 1) { /* Get phy address from fdt */ if (OF_getencprop(child, "dual_emac_res_vlan", &vlan_id, len) > 0) { vlan = vlan_id; } } break; } if (phy == -1) return (ENXIO); sc->port[port].phy = phy; sc->port[port].vlan = vlan; return (0); } static int cpsw_attach(device_t dev) { int error, i; struct cpsw_softc *sc; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); getbinuptime(&sc->attach_uptime); if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, sizeof(sc->active_slave)) <= 0) { sc->active_slave = 0; } if (sc->active_slave > 1) sc->active_slave = 1; if (OF_hasprop(sc->node, "dual_emac")) sc->dualemac = 1; for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; if (cpsw_get_fdt_data(sc, i) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } } /* Initialize mutexes */ mtx_init(&sc->tx.lock, device_get_nameunit(dev), "cpsw TX lock", MTX_DEF); mtx_init(&sc->rx.lock, device_get_nameunit(dev), "cpsw RX lock", MTX_DEF); /* Allocate IRQ resources */ error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); if (error) { device_printf(dev, "could not allocate IRQ resources\n"); cpsw_detach(dev); return (ENXIO); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(sc->dev, "failed to allocate memory resource\n"); cpsw_detach(dev); return (ENXIO); } reg = cpsw_read_4(sc, CPSW_SS_IDVER); device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); return (error); } /* Allocate a NULL buffer for padding. */ sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); cpsw_init_slots(sc); /* Allocate slots to TX and RX queues. */ STAILQ_INIT(&sc->rx.avail); STAILQ_INIT(&sc->rx.active); STAILQ_INIT(&sc->tx.avail); STAILQ_INIT(&sc->tx.active); // For now: 128 slots to TX, rest to RX. // XXX TODO: start with 32/64 and grow dynamically based on demand. if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { device_printf(dev, "failed to allocate dmamaps\n"); cpsw_detach(dev); return (ENOMEM); } device_printf(dev, "Initial queue size TX=%d RX=%d\n", sc->tx.queue_slots, sc->rx.queue_slots); sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); if (cpsw_intr_attach(sc) == -1) { device_printf(dev, "failed to setup interrupts\n"); cpsw_detach(dev); return (ENXIO); } #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) cpsw_vgroups[i].vid = -1; #endif /* Reset the controller. */ cpsw_reset(sc); cpsw_init(sc); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; sc->port[i].dev = device_add_child(dev, "cpsw", i); if (sc->port[i].dev == NULL) { cpsw_detach(dev); return (ENXIO); } } - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int cpsw_detach(device_t dev) { struct cpsw_softc *sc; int error, i; bus_generic_detach(dev); sc = device_get_softc(dev); for (i = 0; i < CPSW_PORTS; i++) { if (sc->port[i].dev) device_delete_child(dev, sc->port[i].dev); } if (device_is_attached(dev)) { callout_stop(&sc->watchdog.callout); callout_drain(&sc->watchdog.callout); } /* Stop and release all interrupts */ cpsw_intr_detach(sc); /* Free dmamaps and mbufs */ for (i = 0; i < nitems(sc->_slots); ++i) cpsw_free_slot(sc, &sc->_slots[i]); /* Free null padding buffer. */ if (sc->nullpad) free(sc->nullpad, M_DEVBUF); /* Free DMA tag */ if (sc->mbuf_dtag) { error = bus_dma_tag_destroy(sc->mbuf_dtag); KASSERT(error == 0, ("Unable to destroy DMA tag")); } /* Free IO memory handler */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resources(dev, irq_res_spec, sc->irq_res); /* Destroy mutexes */ mtx_destroy(&sc->rx.lock); mtx_destroy(&sc->tx.lock); /* Detach the switch device, if present. */ error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } static phandle_t cpsw_get_node(device_t bus, device_t dev) { /* Share controller node with port device. */ return (ofw_bus_get_node(bus)); } static int cpswp_probe(device_t dev) { if (device_get_unit(dev) > 1) { device_printf(dev, "Only two ports are supported.\n"); return (ENXIO); } device_set_desc(dev, "Ethernet Switch Port"); return (BUS_PROBE_DEFAULT); } static int cpswp_attach(device_t dev) { int error; if_t ifp; struct cpswp_softc *sc; uint32_t reg; uint8_t mac_addr[ETHER_ADDR_LEN]; phandle_t opp_table; struct syscon *syscon; sc = device_get_softc(dev); sc->dev = dev; sc->pdev = device_get_parent(dev); sc->swsc = device_get_softc(sc->pdev); sc->unit = device_get_unit(dev); sc->phy = sc->swsc->port[sc->unit].phy; sc->vlan = sc->swsc->port[sc->unit].vlan; if (sc->swsc->dualemac && sc->vlan == -1) sc->vlan = sc->unit + 1; if (sc->unit == 0) { sc->physel = MDIOUSERPHYSEL0; sc->phyaccess = MDIOUSERACCESS0; } else { sc->physel = MDIOUSERPHYSEL1; sc->phyaccess = MDIOUSERACCESS1; } mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", MTX_DEF); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if_initname(ifp, device_get_name(sc->dev), sc->unit); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setinitfn(ifp, cpswp_init); if_setstartfn(ifp, cpswp_start); if_setioctlfn(ifp, cpswp_ioctl); if_setsendqlen(ifp, sc->swsc->tx.queue_slots); if_setsendqready(ifp); /* FIXME: For now; Go and kidnap syscon from opp-table */ /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ opp_table = OF_finddevice("/opp-table"); if (opp_table == -1) { device_printf(dev, "Cant find /opp-table\n"); cpswp_detach(dev); return (ENXIO); } if (!OF_hasprop(opp_table, "syscon")) { device_printf(dev, "/opp-table doesnt have required syscon property\n"); cpswp_detach(dev); return (ENXIO); } if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { device_printf(dev, "Failed to get syscon\n"); cpswp_detach(dev); return (ENXIO); } /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); mac_addr[0] = reg & 0xFF; mac_addr[1] = (reg >> 8) & 0xFF; mac_addr[2] = (reg >> 16) & 0xFF; mac_addr[3] = (reg >> 24) & 0xFF; /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); mac_addr[4] = reg & 0xFF; mac_addr[5] = (reg >> 8) & 0xFF; error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); cpswp_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Select PHY and enable interrupts */ cpsw_write_4(sc->swsc, sc->physel, MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); ether_ifattach(sc->ifp, mac_addr); callout_init(&sc->mii_callout, 0); return (0); } static int cpswp_detach(device_t dev) { struct cpswp_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_PORT_LOCK(sc); cpswp_stop_locked(sc); CPSW_PORT_UNLOCK(sc); callout_drain(&sc->mii_callout); } bus_generic_detach(dev); if_free(sc->ifp); mtx_destroy(&sc->lock); return (0); } /* * * Init/Shutdown. * */ static int cpsw_ports_down(struct cpsw_softc *sc) { struct cpswp_softc *psc; if_t ifp1, ifp2; if (!sc->dualemac) return (1); psc = device_get_softc(sc->port[0].dev); ifp1 = psc->ifp; psc = device_get_softc(sc->port[1].dev); ifp2 = psc->ifp; if ((if_getflags(ifp1) & IFF_UP) == 0 && (if_getflags(ifp2) & IFF_UP) == 0) return (1); return (0); } static void cpswp_init(void *arg) { struct cpswp_softc *sc = arg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); cpswp_init_locked(arg); CPSW_PORT_UNLOCK(sc); } static void cpswp_init_locked(void *arg) { #ifdef CPSW_ETHERSWITCH int i; #endif struct cpswp_softc *sc = arg; if_t ifp; uint32_t reg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); ifp = sc->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; getbinuptime(&sc->init_uptime); if (!sc->swsc->rx.running && !sc->swsc->tx.running) { /* Reset the controller. */ cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } /* Set Slave Mapping. */ cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 0x33221100); cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); /* Enable MAC RX/TX modules. */ /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg |= CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); /* Initialize ALE: set port to forwarding, initialize addrs */ cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpswp_ale_update_addresses(sc, 1); if (sc->swsc->dualemac) { /* Set Port VID. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), sc->vlan & 0xfff); cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) { if (cpsw_vgroups[i].vid != -1) continue; cpsw_vgroups[i].vid = sc->vlan; break; } #endif } mii_mediachg(sc->mii); callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } static int cpsw_shutdown(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static void cpsw_rx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_RX_LOCK(sc); CPSW_DEBUGF(sc, ("starting RX teardown")); sc->rx.teardown = 1; cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); CPSW_RX_UNLOCK(sc); while (sc->rx.running) { if (++i > 10) { device_printf(sc->dev, "Unable to cleanly shutdown receiver\n"); return; } DELAY(200); } if (!sc->rx.running) CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); } static void cpsw_tx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_TX_LOCK(sc); CPSW_DEBUGF(sc, ("starting TX teardown")); /* Start the TX queue teardown if queue is not empty. */ if (STAILQ_FIRST(&sc->tx.active) != NULL) cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); else sc->tx.teardown = 1; cpsw_tx_dequeue(sc); while (sc->tx.running && ++i < 10) { DELAY(200); cpsw_tx_dequeue(sc); } if (sc->tx.running) { device_printf(sc->dev, "Unable to cleanly shutdown transmitter\n"); } CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", i, sc->tx.active_queue_len)); CPSW_TX_UNLOCK(sc); } static void cpswp_stop_locked(struct cpswp_softc *sc) { if_t ifp; uint32_t reg; ifp = sc->ifp; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); /* Stop ticker */ callout_stop(&sc->mii_callout); /* Tear down the RX/TX queues. */ if (cpsw_ports_down(sc->swsc)) { cpsw_rx_teardown(sc->swsc); cpsw_tx_teardown(sc->swsc); } /* Stop MAC RX/TX modules. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg &= ~CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); if (cpsw_ports_down(sc->swsc)) { /* Capture stats before we reset controller. */ cpsw_stats_collect(sc->swsc); cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } } /* * Suspend/Resume. */ static int cpsw_suspend(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static int cpsw_resume(device_t dev) { struct cpsw_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); return (0); } /* * * IOCTL * */ static void cpsw_set_promisc(struct cpswp_softc *sc, int set) { uint32_t reg; /* * Enabling promiscuous mode requires ALE_BYPASS to be enabled. * That disables the ALE forwarding logic and causes every * packet to be sent only to the host port. In bypass mode, * the ALE processes host port transmit packets the same as in * normal mode. */ reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); reg &= ~CPSW_ALE_CTL_BYPASS; if (set) reg |= CPSW_ALE_CTL_BYPASS; cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); } static void cpsw_set_allmulti(struct cpswp_softc *sc, int set) { if (set) { printf("All-multicast mode unimplemented\n"); } } static int cpswp_ioctl(if_t ifp, u_long command, caddr_t data) { struct cpswp_softc *sc; struct ifreq *ifr; int error; uint32_t changed; error = 0; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; switch (command) { case SIOCSIFCAP: changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); else if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); } error = 0; break; case SIOCSIFFLAGS: CPSW_PORT_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { changed = if_getflags(ifp) ^ sc->if_flags; CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); if (changed & IFF_PROMISC) cpsw_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC); if (changed & IFF_ALLMULTI) cpsw_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI); } else { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: starting up")); cpswp_init_locked(sc); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); cpswp_stop_locked(sc); } sc->if_flags = if_getflags(ifp); CPSW_PORT_UNLOCK(sc); break; case SIOCADDMULTI: cpswp_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: /* Ugh. DELMULTI doesn't provide the specific address being removed, so the best we can do is remove everything and rebuild it all. */ cpswp_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * * MIIBUS * */ static int cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) { uint32_t r, retries = CPSW_MIIBUS_RETRIES; while (--retries) { r = cpsw_read_4(sc, reg); if ((r & MDIO_PHYACCESS_GO) == 0) return (1); DELAY(CPSW_MIIBUS_DELAY); } return (0); } static int cpswp_miibus_readreg(device_t dev, int phy, int reg) { struct cpswp_softc *sc; uint32_t cmd, r; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to read\n"); return (0); } /* Set GO, reg, phy */ cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during read\n"); return (0); } r = cpsw_read_4(sc->swsc, sc->phyaccess); if ((r & MDIO_PHYACCESS_ACK) == 0) { device_printf(dev, "Failed to read from PHY.\n"); r = 0; } return (r & 0xFFFF); } static int cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct cpswp_softc *sc; uint32_t cmd; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to write\n"); return (0); } /* Set GO, WRITE, reg, phy, and value */ cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during write\n"); return (0); } return (0); } static void cpswp_miibus_statchg(device_t dev) { struct cpswp_softc *sc; uint32_t mac_control, reg; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); reg = CPSW_SL_MACCONTROL(sc->unit); mac_control = cpsw_read_4(sc->swsc, reg); mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: mac_control |= CPSW_SL_MACTL_GIG; break; case IFM_100_TX: mac_control |= CPSW_SL_MACTL_IFCTL_A; break; } if (sc->mii->mii_media_active & IFM_FDX) mac_control |= CPSW_SL_MACTL_FULLDUPLEX; cpsw_write_4(sc->swsc, reg, mac_control); } /* * * Transmit/Receive Packets. * */ static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); if (sc->rx.teardown) { sc->rx.running = 0; sc->rx.teardown = 0; cpsw_write_cp(sc, &sc->rx, 0xfffffffc); } received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static struct mbuf * cpsw_rx_dequeue(struct cpsw_softc *sc) { int nsegs, port, removed; struct cpsw_cpdma_bd bd; struct cpsw_slot *last, *slot; struct cpswp_softc *psc; struct mbuf *m, *m0, *mb_head, *mb_tail; uint16_t m0_flags; nsegs = 0; m0 = NULL; last = NULL; mb_head = NULL; mb_tail = NULL; removed = 0; /* Pull completed packets off hardware RX queue. */ while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { cpsw_cpdma_read_bd(sc, slot, &bd); /* * Stop on packets still in use by hardware, but do not stop * on packets with the teardown complete flag, they will be * discarded later. */ if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == CPDMA_BD_OWNER) break; last = slot; ++removed; STAILQ_REMOVE_HEAD(&sc->rx.active, next); STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m = slot->mbuf; slot->mbuf = NULL; if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(sc, ("RX teardown is complete")); m_freem(m); sc->rx.running = 0; sc->rx.teardown = 0; break; } port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; KASSERT(port >= 0 && port <= 1, ("patcket received with invalid port: %d", port)); psc = device_get_softc(sc->port[port].dev); /* Set up mbuf */ m->m_data += bd.bufoff; m->m_len = bd.buflen; if (bd.flags & CPDMA_BD_SOP) { m->m_pkthdr.len = bd.pktlen; m->m_pkthdr.rcvif = psc->ifp; m->m_flags |= M_PKTHDR; m0_flags = bd.flags; m0 = m; } nsegs++; m->m_next = NULL; m->m_nextpkt = NULL; if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { if (m0_flags & CPDMA_BD_PASS_CRC) m_adj(m0, -ETHER_CRC_LEN); m0_flags = 0; m0 = NULL; if (nsegs > sc->rx.longest_chain) sc->rx.longest_chain = nsegs; nsegs = 0; } if ((if_getcapenable(psc->ifp) & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ if ((bd.flags & (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == CPDMA_BD_SOP) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } } if (STAILQ_FIRST(&sc->rx.active) != NULL && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->rx, STAILQ_FIRST(&sc->rx.active)); sc->rx.queue_restart++; } /* Add mbuf to packet list to be returned. */ if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { mb_tail->m_nextpkt = m; } else if (mb_tail != NULL) { mb_tail->m_next = m; } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { if (bootverbose) printf( "%s: %s: discanding fragment packet w/o header\n", __func__, if_name(psc->ifp)); m_freem(m); continue; } else { mb_head = m; } mb_tail = m; } if (removed != 0) { cpsw_write_cp_slot(sc, &sc->rx, last); sc->rx.queue_removes += removed; sc->rx.avail_queue_len += removed; sc->rx.active_queue_len -= removed; if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); } return (mb_head); } static void cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; int error, nsegs, added = 0; /* Register new mbufs with hardware. */ first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (first_new_slot == NULL) first_new_slot = slot; if (slot->mbuf == NULL) { slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (slot->mbuf == NULL) { device_printf(sc->dev, "Unable to fill RX queue\n"); break; } slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; } error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); if (error != 0 || nsegs != 1) { device_printf(sc->dev, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); /* Create and submit new rx descriptor. */ if ((next = STAILQ_NEXT(slot, next)) != NULL) bd.next = cpsw_cpdma_bd_paddr(sc, next); else bd.next = 0; bd.bufptr = seg->ds_addr; bd.bufoff = 0; bd.buflen = MCLBYTES - 1; bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; cpsw_cpdma_write_bd(sc, slot, &bd); ++added; STAILQ_REMOVE_HEAD(&sc->rx.avail, next); STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); } if (added == 0 || first_new_slot == NULL) return; CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); /* Link new entries to hardware RX queue. */ if (last_old_slot == NULL) { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); } sc->rx.queue_adds += added; sc->rx.avail_queue_len -= added; sc->rx.active_queue_len += added; cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) sc->rx.max_active_queue_len = sc->rx.active_queue_len; } static void cpswp_start(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || sc->swsc->tx.running == 0) { return; } CPSW_TX_LOCK(sc->swsc); cpswp_tx_enqueue(sc); cpsw_tx_dequeue(sc->swsc); CPSW_TX_UNLOCK(sc->swsc); } static void cpsw_intr_tx(void *arg) { struct cpsw_softc *sc; sc = (struct cpsw_softc *)arg; CPSW_TX_LOCK(sc); if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) cpsw_write_cp(sc, &sc->tx, 0xfffffffc); cpsw_tx_dequeue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); CPSW_TX_UNLOCK(sc); } static void cpswp_tx_enqueue(struct cpswp_softc *sc) { bus_dma_segment_t segs[CPSW_TXFRAGS]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; struct mbuf *m0; int error, nsegs, seg, added = 0, padlen; /* Pull pending packets from IF queue and prep them for DMA. */ last = NULL; first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { m0 = if_dequeue(sc->ifp); if (m0 == NULL) break; slot->mbuf = m0; padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; if (padlen < 0) padlen = 0; else if (padlen > 0) m_append(slot->mbuf, padlen, sc->swsc->nullpad); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); /* If the packet is too fragmented, try to simplify. */ if (error == EFBIG || (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m0 = m_defrag(slot->mbuf, M_NOWAIT); if (m0 == NULL) { device_printf(sc->dev, "Can't defragment packet; dropping\n"); m_freem(slot->mbuf); } else { CPSW_DEBUGF(sc->swsc, ("Requeueing defragmented packet")); if_sendq_prepend(sc->ifp, m0); } slot->mbuf = NULL; continue; } if (error != 0) { device_printf(sc->dev, "%s: Can't setup DMA (error=%d), dropping packet\n", __func__, error); bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREWRITE); CPSW_DEBUGF(sc->swsc, ("Queueing TX packet: %d segments + %d pad bytes", nsegs, padlen)); if (first_new_slot == NULL) first_new_slot = slot; /* Link from the previous descriptor. */ if (last != NULL) cpsw_cpdma_write_bd_next(sc->swsc, last, slot); slot->ifp = sc->ifp; /* If there is only one segment, the for() loop * gets skipped and the single buffer gets set up * as both SOP and EOP. */ if (nsegs > 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; /* Start by setting up the first buffer. */ bd.bufptr = segs[0].ds_addr; bd.bufoff = 0; bd.buflen = segs[0].ds_len; bd.pktlen = m_length(slot->mbuf, NULL); bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; if (sc->swsc->dualemac) { bd.flags |= CPDMA_BD_TO_PORT; bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); } for (seg = 1; seg < nsegs; ++seg) { /* Save the previous buffer (which isn't EOP) */ cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); slot = STAILQ_FIRST(&sc->swsc->tx.avail); /* Setup next buffer (which isn't SOP) */ if (nsegs > seg + 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; bd.bufptr = segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = segs[seg].ds_len; bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER; } /* Save the final buffer. */ bd.flags |= CPDMA_BD_EOP; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); last = slot; added += nsegs; if (nsegs > sc->swsc->tx.longest_chain) sc->swsc->tx.longest_chain = nsegs; BPF_MTAP(sc->ifp, m0); } if (first_new_slot == NULL) return; /* Attach the list of new buffers to the hardware TX queue. */ if (last_old_slot != NULL && (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & CPDMA_BD_EOQ) == 0) { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, first_new_slot); } else { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } sc->swsc->tx.queue_adds += added; sc->swsc->tx.avail_queue_len -= added; sc->swsc->tx.active_queue_len += added; if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; } CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); } static int cpsw_tx_dequeue(struct cpsw_softc *sc) { struct cpsw_slot *slot, *last_removed_slot = NULL; struct cpsw_cpdma_bd bd; uint32_t flags, removed = 0; /* Pull completed buffers off the hardware TX queue. */ slot = STAILQ_FIRST(&sc->tx.active); while (slot != NULL) { flags = cpsw_cpdma_read_bd_flags(sc, slot); /* TearDown complete is only marked on the SOP for the packet. */ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { sc->tx.teardown = 1; } if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) break; /* Hardware is still using this packet. */ bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; if (slot->ifp) { if (sc->tx.teardown == 0) if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); } /* Dequeue any additional buffers used by this packet. */ while (slot != NULL && slot->mbuf == NULL) { STAILQ_REMOVE_HEAD(&sc->tx.active, next); STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); ++removed; last_removed_slot = slot; slot = STAILQ_FIRST(&sc->tx.active); } cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); /* Restart the TX queue if necessary. */ cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); if (slot != NULL && bd.next != 0 && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->tx, slot); sc->tx.queue_restart++; break; } } if (removed != 0) { sc->tx.queue_removes += removed; sc->tx.active_queue_len -= removed; sc->tx.avail_queue_len += removed; if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); } if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { CPSW_DEBUGF(sc, ("TX teardown is complete")); sc->tx.teardown = 0; sc->tx.running = 0; } return (removed); } /* * * Miscellaneous interrupts. * */ static void cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc; if_t ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; if_input(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static void cpsw_intr_misc_host_error(struct cpsw_softc *sc) { uint32_t intstat; uint32_t dmastat; int txerr, rxerr, txchan, rxchan; printf("\n\n"); device_printf(sc->dev, "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); printf("\n\n"); intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); txerr = (dmastat >> 20) & 15; txchan = (dmastat >> 16) & 7; rxerr = (dmastat >> 12) & 15; rxchan = (dmastat >> 8) & 7; switch (txerr) { case 0: break; case 1: printf("SOP error on TX channel %d\n", txchan); break; case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); break; case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); break; case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); break; case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); break; case 6: printf("Packet length error on TX channel %d\n", txchan); break; default: printf("Unknown error on TX channel %d\n", txchan); break; } if (txerr != 0) { printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); cpsw_dump_queue(sc, &sc->tx.active); } switch (rxerr) { case 0: break; case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); break; case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); break; case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); break; case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); break; default: printf("Unknown RX error on RX channel %d\n", rxchan); break; } if (rxerr != 0) { printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); printf("CPSW_CPDMA_RX%d_CP=0x%x\n", rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); cpsw_dump_queue(sc, &sc->rx.active); } printf("\nALE Table\n"); cpsw_ale_dump_table(sc); // XXX do something useful here?? panic("CPSW HOST ERROR INTERRUPT"); // Suppress this interrupt in the future. cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); // The watchdog will probably reset the controller // in a little while. It will probably fail again. } static void cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); if (stat & CPSW_WR_C_MISC_EVNT_PEND) CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); if (stat & CPSW_WR_C_MISC_STAT_PEND) cpsw_stats_collect(sc); if (stat & CPSW_WR_C_MISC_HOST_PEND) cpsw_intr_misc_host_error(sc); if (stat & CPSW_WR_C_MISC_MDIOLINK) { cpsw_write_4(sc, MDIOLINKINTMASKED, cpsw_read_4(sc, MDIOLINKINTMASKED)); } if (stat & CPSW_WR_C_MISC_MDIOUSER) { CPSW_DEBUGF(sc, ("MDIO operation completed interrupt unimplemented")); } cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } /* * * Periodic Checks and Watchdog. * */ static void cpswp_tick(void *msc) { struct cpswp_softc *sc = msc; /* Check for media type change */ mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); cpswp_ifmedia_upd(sc->ifp); } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); } static void cpswp_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct cpswp_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CPSW_PORT_UNLOCK(sc); } static int cpswp_ifmedia_upd(if_t ifp) { struct cpswp_softc *sc; sc = if_getsoftc(ifp); CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; CPSW_PORT_UNLOCK(sc); return (0); } static void cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) { struct cpswp_softc *psc; int i; cpsw_debugf_head("CPSW watchdog"); device_printf(sc->dev, "watchdog timeout\n"); printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); cpsw_dump_queue(sc, &sc->tx.active); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } } static void cpsw_tx_watchdog(void *msc) { struct cpsw_softc *sc; sc = msc; CPSW_TX_LOCK(sc); if (sc->tx.active_queue_len == 0 || !sc->tx.running) { sc->watchdog.timer = 0; /* Nothing to do. */ } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ } else if (cpsw_tx_dequeue(sc) > 0) { sc->watchdog.timer = 0; /* We just did something. */ } else { /* There was something to do but it didn't get done. */ ++sc->watchdog.timer; if (sc->watchdog.timer > 5) { sc->watchdog.timer = 0; ++sc->watchdog.resets; cpsw_tx_watchdog_full_reset(sc); } } sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; CPSW_TX_UNLOCK(sc); /* Schedule another timeout one second from now */ callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * ALE support routines. * */ static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static void cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; /* First four entries are link address and broadcast. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); } } } static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, uint8_t *mac) { int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3], ale_type; /* Find a matching entry or a free entry. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && (((ale_entry[0] >>24) & 0xFF) == mac[2]) && (((ale_entry[0] >>16) & 0xFF) == mac[3]) && (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (ENOMEM); i = free_index; } if (vlan != -1) ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; else ale_type = ALE_TYPE_ADDR << 28; /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ ale_entry[1] |= ALE_MCAST_FWD | ale_type; /* Set portmask [68:66] */ ale_entry[2] = (portmap & 7) << 2; cpsw_ale_write_entry(sc, i, ale_entry); return 0; } static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); switch (ALE_TYPE(ale_entry)) { case ALE_TYPE_VLAN: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); printf("\n"); break; case ALE_TYPE_ADDR: case ALE_TYPE_VLAN_ADDR: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", (ale_entry[1] >> 8) & 0xFF, (ale_entry[1] >> 0) & 0xFF, (ale_entry[0] >>24) & 0xFF, (ale_entry[0] >>16) & 0xFF, (ale_entry[0] >> 8) & 0xFF, (ale_entry[0] >> 0) & 0xFF); printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("port: %u ", ALE_PORTS(ale_entry)); printf("\n"); break; } } printf("\n"); } static u_int cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct cpswp_softc *sc = arg; uint32_t portmask; if (sc->swsc->dualemac) portmask = 1 << (sc->unit + 1) | 1 << 0; else portmask = 7; cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); return (1); } static int cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) { uint8_t *mac; uint32_t ale_entry[3], ale_type, portmask; if (sc->swsc->dualemac) { ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; portmask = 1 << (sc->unit + 1) | 1 << 0; } else { ale_type = ALE_TYPE_ADDR << 28; portmask = 7; } /* * Route incoming packets for our MAC address to Port 0 (host). * For simplicity, keep this entry at table index 0 for port 1 and * at index 2 for port 2 in the ALE. */ mac = LLADDR((struct sockaddr_dl *)if_getifaddr(sc->ifp)->ifa_addr); ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ ale_entry[2] = 0; /* port = 0 */ cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); /* Set outgoing MAC Address for slave port. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), mac[5] << 8 | mac[4]); /* Keep the broadcast address at table entry 1 (or 3). */ ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; ale_entry[2] = portmask << 2; cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); /* SIOCDELMULTI doesn't specify the particular address being removed, so we have to remove all and rebuild. */ if (purge) cpsw_ale_remove_all_mc_entries(sc->swsc); /* Set other multicast addrs desired. */ if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); return (0); } static int cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, int untag, int mcregflood, int mcunregflood) { int free_index, i, matching_index; uint32_t ale_entry[3]; free_index = matching_index = -1; /* Find a matching entry or a free entry. */ for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if (ALE_VLAN(ale_entry) == vlan) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (-1); i = free_index; } ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | (mcunregflood & 7) << 8 | (ports & 7); ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); return (0); } /* * * Statistics and Sysctls. * */ #if 0 static void cpsw_stats_dump(struct cpsw_softc *sc) { int i; uint32_t r; for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, (intmax_t)sc->shadow_stats[i], r, (intmax_t)sc->shadow_stats[i] + r)); } } #endif static void cpsw_stats_collect(struct cpsw_softc *sc) { int i; uint32_t r; CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); sc->shadow_stats[i] += r; cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); } } static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct cpsw_stat *stat; uint64_t result; sc = (struct cpsw_softc *)arg1; stat = &cpsw_stat_sysctls[oidp->oid_number]; result = sc->shadow_stats[oidp->oid_number]; result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); return (sysctl_handle_64(oidp, &result, 0, req)); } static int cpsw_stat_attached(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct bintime t; unsigned result; sc = (struct cpsw_softc *)arg1; getbinuptime(&t); bintime_sub(&t, &sc->attach_uptime); result = t.sec; return (sysctl_handle_int(oidp, &result, 0, req)); } static int cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) { int error; struct cpsw_softc *sc; uint32_t ctrl, intr_per_ms; sc = (struct cpsw_softc *)arg1; error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); if (error != 0 || req->newptr == NULL) return (error); ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); if (sc->coal_us == 0) { /* Disable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); return (0); } if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) sc->coal_us = CPSW_WR_C_IMAX_US_MAX; if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) sc->coal_us = CPSW_WR_C_IMAX_US_MIN; intr_per_ms = 1000 / sc->coal_us; /* Just to make sure... */ if (intr_per_ms > CPSW_WR_C_IMAX_MAX) intr_per_ms = CPSW_WR_C_IMAX_MAX; if (intr_per_ms < CPSW_WR_C_IMAX_MIN) intr_per_ms = CPSW_WR_C_IMAX_MIN; /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; /* Enable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); return (0); } static int cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *swsc; struct cpswp_softc *sc; struct bintime t; unsigned result; swsc = arg1; sc = device_get_softc(swsc->port[arg2].dev); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { getbinuptime(&t); bintime_sub(&t, &sc->init_uptime); result = t.sec; } else result = 0; return (sysctl_handle_int(oidp, &result, 0, req)); } static void cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", CTLFLAG_RD, &queue->queue_slots, 0, "Total buffers currently assigned to this queue"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", CTLFLAG_RD, &queue->active_queue_len, 0, "Buffers currently registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", CTLFLAG_RD, &queue->max_active_queue_len, 0, "Max value of activeBuffers since last driver reset"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", CTLFLAG_RD, &queue->avail_queue_len, 0, "Buffers allocated to this queue but not currently " "registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", CTLFLAG_RD, &queue->max_avail_queue_len, 0, "Max value of availBuffers since last driver reset"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", CTLFLAG_RD, &queue->queue_adds, 0, "Total buffers added to queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", CTLFLAG_RD, &queue->queue_removes, 0, "Total buffers removed from queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", CTLFLAG_RD, &queue->queue_restart, 0, "Total times the queue has been restarted"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", CTLFLAG_RD, &queue->longest_chain, 0, "Max buffers used for a single packet"); } static void cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", CTLFLAG_RD, &sc->watchdog.resets, 0, "Total number of watchdog resets"); } static void cpsw_add_sysctls(struct cpsw_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *stats_node, *queue_node, *node; struct sysctl_oid_list *parent, *stats_parent, *queue_parent; struct sysctl_oid_list *ports_parent, *port_parent; char port[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stat_attached, "IU", "Time since driver attach"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, cpsw_intr_coalesce, "IU", "minimum time between interrupts"); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); ports_parent = SYSCTL_CHILDREN(node); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; port[0] = '0' + i; port[1] = '\0'; node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Port Statistics"); port_parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, cpsw_stat_uptime, "IU", "Seconds since driver init"); } stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); stats_parent = SYSCTL_CHILDREN(stats_node); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { SYSCTL_ADD_PROC(ctx, stats_parent, i, cpsw_stat_sysctls[i].oid, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0, cpsw_stats_sysctl, "IU", cpsw_stat_sysctls[i].oid); } queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); queue_parent = SYSCTL_CHILDREN(queue_node); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->tx); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->rx); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); cpsw_add_watchdog_sysctls(ctx, node, sc); } #ifdef CPSW_ETHERSWITCH static etherswitch_info_t etherswitch_info = { .es_nports = CPSW_PORTS + 1, .es_nvlangroups = CPSW_VLANS, .es_name = "TI Common Platform Ethernet Switch (CPSW)", .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, }; static etherswitch_info_t * cpsw_getinfo(device_t dev) { return (ðerswitch_info); } static int cpsw_getport(device_t dev, etherswitch_port_t *p) { int err; struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmediareq *ifmr; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); err = 0; sc = device_get_softc(dev); if (p->es_port == CPSW_CPU_PORT) { p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; } else { psc = device_get_softc(sc->port[p->es_port - 1].dev); err = ifmedia_ioctl(psc->ifp, &p->es_ifr, &psc->mii->mii_media, SIOCGIFMEDIA); } reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); p->es_pvid = reg & ETHERSWITCH_VID_MASK; reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (reg & ALE_PORTCTL_DROP_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; if (reg & ALE_PORTCTL_INGRESS) p->es_flags |= ETHERSWITCH_PORT_INGRESS; return (err); } static int cpsw_setport(device_t dev, etherswitch_port_t *p) { struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmedia *ifm; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); sc = device_get_softc(dev); if (p->es_pvid != 0) { cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), p->es_pvid & ETHERSWITCH_VID_MASK); } reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= ALE_PORTCTL_DROP_UNTAGGED; else reg &= ~ALE_PORTCTL_DROP_UNTAGGED; if (p->es_flags & ETHERSWITCH_PORT_INGRESS) reg |= ALE_PORTCTL_INGRESS; else reg &= ~ALE_PORTCTL_INGRESS; cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); /* CPU port does not allow media settings. */ if (p->es_port == CPSW_CPU_PORT) return (0); psc = device_get_softc(sc->port[p->es_port - 1].dev); ifm = &psc->mii->mii_media; return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static int cpsw_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i, vid; uint32_t ale_entry[3]; struct cpsw_softc *sc; sc = device_get_softc(dev); if (vg->es_vlangroup >= CPSW_VLANS) return (EINVAL); vg->es_vid = 0; vid = cpsw_vgroups[vg->es_vlangroup].vid; if (vid == -1) return (0); for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vid != ALE_VLAN(ale_entry)) continue; vg->es_fid = 0; vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); } return (0); } static void cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vlan != ALE_VLAN(ale_entry)) continue; ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); break; } } static int cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i; struct cpsw_softc *sc; sc = device_get_softc(dev); for (i = 0; i < CPSW_VLANS; i++) { /* Is this Vlan ID in use by another vlangroup ? */ if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) return (EINVAL); } if (vg->es_vid == 0) { if (cpsw_vgroups[vg->es_vlangroup].vid == -1) return (0); cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); cpsw_vgroups[vg->es_vlangroup].vid = -1; vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_vid = 0; return (0); } vg->es_vid &= ETHERSWITCH_VID_MASK; vg->es_member_ports &= CPSW_PORTS_MASK; vg->es_untagged_ports &= CPSW_PORTS_MASK; if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) return (EINVAL); cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, vg->es_untagged_ports, vg->es_member_ports, 0); return (0); } static int cpsw_readreg(device_t dev, int addr) { /* Not supported. */ return (0); } static int cpsw_writereg(device_t dev, int addr, int value) { /* Not supported. */ return (0); } static int cpsw_readphy(device_t dev, int phy, int reg) { /* Not supported. */ return (0); } static int cpsw_writephy(device_t dev, int phy, int reg, int data) { /* Not supported. */ return (0); } #endif diff --git a/sys/arm/ti/ti_omap4_cm.c b/sys/arm/ti/ti_omap4_cm.c index b558d6262158..67c17ec4df98 100644 --- a/sys/arm/ti/ti_omap4_cm.c +++ b/sys/arm/ti/ti_omap4_cm.c @@ -1,143 +1,143 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Emmanuel Vadot * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Based on sys/arm/ti/ti_sysc.c */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct ofw_compat_data compat_data[] = { { "ti,omap4-cm", 1 }, { NULL, 0 } }; struct ti_omap4_cm_softc { struct simplebus_softc sc; device_t dev; }; uint64_t ti_omap4_cm_get_simplebus_base_host(device_t dev) { struct ti_omap4_cm_softc *sc; sc = device_get_softc(dev); if (sc->sc.nranges == 0) return (0); return (sc->sc.ranges[0].host); } static int ti_omap4_cm_probe(device_t dev); static int ti_omap4_cm_attach(device_t dev); static int ti_omap4_cm_detach(device_t dev); static int ti_omap4_cm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI OMAP4-CM"); if (!bootverbose) device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int ti_omap4_cm_attach(device_t dev) { struct ti_omap4_cm_softc *sc; device_t cdev; phandle_t node, child; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); simplebus_init(dev, node); if (simplebus_fill_ranges(node, &sc->sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } - bus_generic_probe(sc->dev); + bus_identify_children(sc->dev); for (child = OF_child(node); child > 0; child = OF_peer(child)) { cdev = simplebus_add_device(dev, child, 0, NULL, -1, NULL); if (cdev != NULL) device_probe_and_attach(cdev); } return (bus_generic_attach(dev)); } static int ti_omap4_cm_detach(device_t dev) { return (EBUSY); } static device_method_t ti_omap4_cm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_omap4_cm_probe), DEVMETHOD(device_attach, ti_omap4_cm_attach), DEVMETHOD(device_detach, ti_omap4_cm_detach), DEVMETHOD_END }; DEFINE_CLASS_1(ti_omap4_cm, ti_omap4_cm_driver, ti_omap4_cm_methods, sizeof(struct ti_omap4_cm_softc), simplebus_driver); EARLY_DRIVER_MODULE(ti_omap4_cm, simplebus, ti_omap4_cm_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); EARLY_DRIVER_MODULE(ti_omap4_cm, ofwbus, ti_omap4_cm_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); diff --git a/sys/arm/ti/ti_prcm.c b/sys/arm/ti/ti_prcm.c index 0006d0eaec9b..7afb9a2a5678 100644 --- a/sys/arm/ti/ti_prcm.c +++ b/sys/arm/ti/ti_prcm.c @@ -1,336 +1,336 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Based on sys/arm/ti/am335x/am335x_prcm.c */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clkdev_if.h" #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif struct ti_prcm_softc { struct simplebus_softc sc_simplebus; device_t dev; struct resource * mem_res; bus_space_tag_t bst; bus_space_handle_t bsh; int attach_done; struct mtx mtx; }; static struct ti_prcm_softc *ti_prcm_sc = NULL; static void omap4_prcm_reset(void); static void am335x_prcm_reset(void); #define TI_AM3_PRCM 18 #define TI_AM4_PRCM 17 #define TI_OMAP2_PRCM 16 #define TI_OMAP3_PRM 15 #define TI_OMAP3_CM 14 #define TI_OMAP4_CM1 13 #define TI_OMAP4_PRM 12 #define TI_OMAP4_CM2 11 #define TI_OMAP4_SCRM 10 #define TI_OMAP5_PRM 9 #define TI_OMAP5_CM_CORE_AON 8 #define TI_OMAP5_SCRM 7 #define TI_OMAP5_CM_CORE 6 #define TI_DRA7_PRM 5 #define TI_DRA7_CM_CORE_AON 4 #define TI_DRA7_CM_CORE 3 #define TI_DM814_PRCM 2 #define TI_DM816_PRCM 1 #define TI_PRCM_END 0 static struct ofw_compat_data compat_data[] = { { "ti,am3-prcm", TI_AM3_PRCM }, { "ti,am4-prcm", TI_AM4_PRCM }, { "ti,omap2-prcm", TI_OMAP2_PRCM }, { "ti,omap3-prm", TI_OMAP3_PRM }, { "ti,omap3-cm", TI_OMAP3_CM }, { "ti,omap4-cm1", TI_OMAP4_CM1 }, { "ti,omap4-prm", TI_OMAP4_PRM }, { "ti,omap4-cm2", TI_OMAP4_CM2 }, { "ti,omap4-scrm", TI_OMAP4_SCRM }, { "ti,omap5-prm", TI_OMAP5_PRM }, { "ti,omap5-cm-core-aon", TI_OMAP5_CM_CORE_AON }, { "ti,omap5-scrm", TI_OMAP5_SCRM }, { "ti,omap5-cm-core", TI_OMAP5_CM_CORE }, { "ti,dra7-prm", TI_DRA7_PRM }, { "ti,dra7-cm-core-aon", TI_DRA7_CM_CORE_AON }, { "ti,dra7-cm-core", TI_DRA7_CM_CORE }, { "ti,dm814-prcm", TI_DM814_PRCM }, { "ti,dm816-prcm", TI_DM816_PRCM }, { NULL, TI_PRCM_END} }; static int ti_prcm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) { return (ENXIO); } device_set_desc(dev, "TI Power and Clock Management"); return(BUS_PROBE_DEFAULT); } static int ti_prcm_attach(device_t dev) { struct ti_prcm_softc *sc; phandle_t node, child; int rid; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); simplebus_init(sc->dev, node); if (simplebus_fill_ranges(node, &sc->sc_simplebus) < 0) { device_printf(sc->dev, "could not get ranges\n"); return (ENXIO); } if (sc->sc_simplebus.nranges == 0) { device_printf(sc->dev, "nranges == 0\n"); return (ENXIO); } sc->mem_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &rid, sc->sc_simplebus.ranges[0].host, (sc->sc_simplebus.ranges[0].host + sc->sc_simplebus.ranges[0].size - 1), sc->sc_simplebus.ranges[0].size, RF_ACTIVE | RF_SHAREABLE); if (sc->mem_res == NULL) { return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Fixme: for xxx_prcm_reset functions. * Get rid of global variables? */ ti_prcm_sc = sc; switch(ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: ti_cpu_reset = omap4_prcm_reset; break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: ti_cpu_reset = am335x_prcm_reset; break; #endif } - bus_generic_probe(sc->dev); + bus_identify_children(sc->dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { simplebus_add_device(dev, child, 0, NULL, -1, NULL); } return (bus_generic_attach(sc->dev)); } int ti_prcm_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct ti_prcm_softc *sc; sc = device_get_softc(dev); DPRINTF(sc->dev, "offset=%lx write %x\n", addr, val); bus_space_write_4(sc->bst, sc->bsh, addr, val); return (0); } int ti_prcm_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct ti_prcm_softc *sc; sc = device_get_softc(dev); *val = bus_space_read_4(sc->bst, sc->bsh, addr); DPRINTF(sc->dev, "offset=%lx Read %x\n", addr, *val); return (0); } int ti_prcm_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct ti_prcm_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = bus_space_read_4(sc->bst, sc->bsh, addr); reg &= ~clr; reg |= set; bus_space_write_4(sc->bst, sc->bsh, addr, reg); DPRINTF(sc->dev, "offset=%lx reg: %x (clr %x set %x)\n", addr, reg, clr, set); return (0); } void ti_prcm_device_lock(device_t dev) { struct ti_prcm_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } void ti_prcm_device_unlock(device_t dev) { struct ti_prcm_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static device_method_t ti_prcm_methods[] = { DEVMETHOD(device_probe, ti_prcm_probe), DEVMETHOD(device_attach, ti_prcm_attach), /* clkdev interface */ DEVMETHOD(clkdev_write_4, ti_prcm_write_4), DEVMETHOD(clkdev_read_4, ti_prcm_read_4), DEVMETHOD(clkdev_modify_4, ti_prcm_modify_4), DEVMETHOD(clkdev_device_lock, ti_prcm_device_lock), DEVMETHOD(clkdev_device_unlock, ti_prcm_device_unlock), DEVMETHOD_END }; DEFINE_CLASS_1(ti_prcm, ti_prcm_driver, ti_prcm_methods, sizeof(struct ti_prcm_softc), simplebus_driver); EARLY_DRIVER_MODULE(ti_prcm, ofwbus, ti_prcm_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(ti_prcm, simplebus, ti_prcm_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_prcm, 1); MODULE_DEPEND(ti_prcm, ti_scm, 1, 1, 1); /* From sys/arm/ti/am335x/am335x_prcm.c * Copyright (c) 2012 Damjan Marion */ #define PRM_DEVICE_OFFSET 0xF00 #define AM335x_PRM_RSTCTRL (PRM_DEVICE_OFFSET + 0x00) static void am335x_prcm_reset(void) { ti_prcm_write_4(ti_prcm_sc->dev, AM335x_PRM_RSTCTRL, (1<<1)); } /* FIXME: Is this correct - or should the license part be ontop? */ /* From sys/arm/ti/omap4/omap4_prcm_clks.c */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define PRM_RSTCTRL 0x1b00 #define PRM_RSTCTRL_RESET 0x2 static void omap4_prcm_reset(void) { uint32_t reg; ti_prcm_read_4(ti_prcm_sc->dev, PRM_RSTCTRL, ®); reg = reg | PRM_RSTCTRL_RESET; ti_prcm_write_4(ti_prcm_sc->dev, PRM_RSTCTRL, reg); ti_prcm_read_4(ti_prcm_sc->dev, PRM_RSTCTRL, ®); } diff --git a/sys/arm/ti/ti_scm_syscon.c b/sys/arm/ti/ti_scm_syscon.c index 7286be7b8f2c..729af03c4abd 100644 --- a/sys/arm/ti/ti_scm_syscon.c +++ b/sys/arm/ti/ti_scm_syscon.c @@ -1,290 +1,290 @@ /*- * Copyright (c) 2019 Emmanuel Vadot * * Copyright (c) 2020 Oskar Holmlund * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Based on sys/arm/ti/ti_sysc.c */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "syscon_if.h" #include #include "clkdev_if.h" #include #if 0 #define DPRINTF(dev, msg...) device_printf(dev, msg) #else #define DPRINTF(dev, msg...) #endif MALLOC_DECLARE(M_SYSCON); struct ti_scm_syscon_softc { struct simplebus_softc sc_simplebus; device_t dev; struct syscon * syscon; struct resource * res[1]; bus_space_tag_t bst; bus_space_handle_t bsh; struct mtx mtx; }; static struct resource_spec ti_scm_syscon_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; /* Device */ static struct ofw_compat_data compat_data[] = { { "syscon", 1 }, { NULL, 0 } }; /* --- dev/extres/syscon syscon_method_t interface --- */ static int ti_scm_syscon_write_4(struct syscon *syscon, bus_size_t offset, uint32_t val) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(syscon->pdev); DPRINTF(sc->dev, "offset=%lx write %x\n", offset, val); mtx_lock(&sc->mtx); bus_space_write_4(sc->bst, sc->bsh, offset, val); mtx_unlock(&sc->mtx); return (0); } static uint32_t ti_scm_syscon_read_4(struct syscon *syscon, bus_size_t offset) { struct ti_scm_syscon_softc *sc; uint32_t val; sc = device_get_softc(syscon->pdev); mtx_lock(&sc->mtx); val = bus_space_read_4(sc->bst, sc->bsh, offset); mtx_unlock(&sc->mtx); DPRINTF(sc->dev, "offset=%lx Read %x\n", offset, val); return (val); } static int ti_scm_syscon_modify_4(struct syscon *syscon, bus_size_t offset, uint32_t clr, uint32_t set) { struct ti_scm_syscon_softc *sc; uint32_t reg; sc = device_get_softc(syscon->pdev); mtx_lock(&sc->mtx); reg = bus_space_read_4(sc->bst, sc->bsh, offset); reg &= ~clr; reg |= set; bus_space_write_4(sc->bst, sc->bsh, offset, reg); mtx_unlock(&sc->mtx); DPRINTF(sc->dev, "offset=%lx reg: %x (clr %x set %x)\n", offset, reg, clr, set); return (0); } static syscon_method_t ti_scm_syscon_reg_methods[] = { SYSCONMETHOD(syscon_read_4, ti_scm_syscon_read_4), SYSCONMETHOD(syscon_write_4, ti_scm_syscon_write_4), SYSCONMETHOD(syscon_modify_4, ti_scm_syscon_modify_4), SYSCONMETHOD_END }; DEFINE_CLASS_1(ti_scm_syscon_reg, ti_scm_syscon_reg_class, ti_scm_syscon_reg_methods, 0, syscon_class); /* device interface */ static int ti_scm_syscon_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ti_soc_is_supported()) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "TI OMAP Control Module Syscon"); return(BUS_PROBE_DEFAULT); } static int ti_scm_syscon_attach(device_t dev) { struct ti_scm_syscon_softc *sc; phandle_t node, child; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, ti_scm_syscon_res_spec, sc->res)) { device_printf(sc->dev, "Cant allocate resources\n"); return (ENXIO); } sc->dev = dev; sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); mtx_init(&sc->mtx, device_get_nameunit(sc->dev), NULL, MTX_DEF); node = ofw_bus_get_node(sc->dev); /* dev/extres/syscon interface */ sc->syscon = syscon_create_ofw_node(dev, &ti_scm_syscon_reg_class, node); if (sc->syscon == NULL) { device_printf(dev, "Failed to create/register syscon\n"); return (ENXIO); } simplebus_init(sc->dev, node); - bus_generic_probe(sc->dev); + bus_identify_children(sc->dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { simplebus_add_device(sc->dev, child, 0, NULL, -1, NULL); } return (bus_generic_attach(sc->dev)); } /* syscon interface */ static int ti_scm_syscon_get_handle(device_t dev, struct syscon **syscon) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(dev); *syscon = sc->syscon; if (*syscon == NULL) return (ENODEV); return (0); } /* clkdev interface */ static int ti_scm_syscon_clk_write_4(device_t dev, bus_addr_t addr, uint32_t val) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(dev); DPRINTF(sc->dev, "offset=%lx write %x\n", addr, val); bus_space_write_4(sc->bst, sc->bsh, addr, val); return (0); } static int ti_scm_syscon_clk_read_4(device_t dev, bus_addr_t addr, uint32_t *val) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(dev); *val = bus_space_read_4(sc->bst, sc->bsh, addr); DPRINTF(sc->dev, "offset=%lx Read %x\n", addr, *val); return (0); } static int ti_scm_syscon_clk_modify_4(device_t dev, bus_addr_t addr, uint32_t clr, uint32_t set) { struct ti_scm_syscon_softc *sc; uint32_t reg; sc = device_get_softc(dev); reg = bus_space_read_4(sc->bst, sc->bsh, addr); reg &= ~clr; reg |= set; bus_space_write_4(sc->bst, sc->bsh, addr, reg); DPRINTF(sc->dev, "offset=%lx reg: %x (clr %x set %x)\n", addr, reg, clr, set); return (0); } static void ti_scm_syscon_clk_device_lock(device_t dev) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->mtx); } static void ti_scm_syscon_clk_device_unlock(device_t dev) { struct ti_scm_syscon_softc *sc; sc = device_get_softc(dev); mtx_unlock(&sc->mtx); } static device_method_t ti_scm_syscon_methods[] = { DEVMETHOD(device_probe, ti_scm_syscon_probe), DEVMETHOD(device_attach, ti_scm_syscon_attach), /* syscon interface */ DEVMETHOD(syscon_get_handle, ti_scm_syscon_get_handle), /* clkdev interface */ DEVMETHOD(clkdev_write_4, ti_scm_syscon_clk_write_4), DEVMETHOD(clkdev_read_4, ti_scm_syscon_clk_read_4), DEVMETHOD(clkdev_modify_4, ti_scm_syscon_clk_modify_4), DEVMETHOD(clkdev_device_lock, ti_scm_syscon_clk_device_lock), DEVMETHOD(clkdev_device_unlock, ti_scm_syscon_clk_device_unlock), DEVMETHOD_END }; DEFINE_CLASS_1(ti_scm_syscon, ti_scm_syscon_driver, ti_scm_syscon_methods, sizeof(struct ti_scm_syscon_softc), simplebus_driver); EARLY_DRIVER_MODULE(ti_scm_syscon, simplebus, ti_scm_syscon_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ti_scm_syscon, 1); MODULE_DEPEND(ti_scm_syscon, ti_scm, 1, 1, 1); diff --git a/sys/arm/ti/ti_sdhci.c b/sys/arm/ti/ti_sdhci.c index 29035fee77c9..597d2a35fba6 100644 --- a/sys/arm/ti/ti_sdhci.c +++ b/sys/arm/ti/ti_sdhci.c @@ -1,765 +1,765 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Ian Lepore * Copyright (c) 2011 Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include #include #include #include #include #include #include #include #include "sdhci_if.h" #include #include #include #include "opt_mmccam.h" struct ti_sdhci_softc { device_t dev; struct sdhci_fdt_gpio * gpio; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; struct sdhci_slot slot; uint32_t mmchs_reg_off; uint32_t sdhci_reg_off; uint64_t baseclk_hz; uint32_t cmd_and_mode; uint32_t sdhci_clkdiv; boolean_t disable_highspeed; boolean_t force_card_present; boolean_t disable_readonly; }; /* * Table of supported FDT compat strings. * * Note that "ti,mmchs" is our own invention, and should be phased out in favor * of the documented names. * * Note that vendor Beaglebone dtsi files use "ti,omap3-hsmmc" for the am335x. */ static struct ofw_compat_data compat_data[] = { {"ti,am335-sdhci", 1}, {"ti,omap3-hsmmc", 1}, {"ti,omap4-hsmmc", 1}, {"ti,mmchs", 1}, {NULL, 0}, }; /* * The MMCHS hardware has a few control and status registers at the beginning of * the device's memory map, followed by the standard sdhci register block. * Different SoCs have the register blocks at different offsets from the * beginning of the device. Define some constants to map out the registers we * access, and the various per-SoC offsets. The SDHCI_REG_OFFSET is how far * beyond the MMCHS block the SDHCI block is found; it's the same on all SoCs. */ #define OMAP3_MMCHS_REG_OFFSET 0x000 #define OMAP4_MMCHS_REG_OFFSET 0x100 #define AM335X_MMCHS_REG_OFFSET 0x100 #define SDHCI_REG_OFFSET 0x100 #define MMCHS_SYSCONFIG 0x010 #define MMCHS_SYSCONFIG_RESET (1 << 1) #define MMCHS_SYSSTATUS 0x014 #define MMCHS_SYSSTATUS_RESETDONE (1 << 0) #define MMCHS_CON 0x02C #define MMCHS_CON_DW8 (1 << 5) #define MMCHS_CON_DVAL_8_4MS (3 << 9) #define MMCHS_CON_OD (1 << 0) #define MMCHS_SYSCTL 0x12C #define MMCHS_SYSCTL_CLKD_MASK 0x3FF #define MMCHS_SYSCTL_CLKD_SHIFT 6 #define MMCHS_SD_CAPA 0x140 #define MMCHS_SD_CAPA_VS18 (1 << 26) #define MMCHS_SD_CAPA_VS30 (1 << 25) #define MMCHS_SD_CAPA_VS33 (1 << 24) /* Forward declarations, CAM-relataed */ // static void ti_sdhci_cam_poll(struct cam_sim *); // static void ti_sdhci_cam_action(struct cam_sim *, union ccb *); // static int ti_sdhci_cam_settran_settings(struct ti_sdhci_softc *sc, union ccb *); static inline uint32_t ti_mmchs_read_4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->mmchs_reg_off)); } static inline void ti_mmchs_write_4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->mmchs_reg_off, val); } static inline uint32_t RD4(struct ti_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off + sc->sdhci_reg_off)); } static inline void WR4(struct ti_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off + sc->sdhci_reg_off, val); } static uint8_t ti_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xff); } static uint16_t ti_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * The MMCHS hardware has a non-standard interpretation of the sdclock * divisor bits. It uses the same bit positions as SDHCI 3.0 (15..6) * but doesn't split them into low:high fields. Instead they're a * single number in the range 0..1023 and the number is exactly the * clock divisor (with 0 and 1 both meaning divide by 1). The SDHCI * driver code expects a v2.0 or v3.0 divisor. The shifting and masking * here extracts the MMCHS representation from the hardware word, cleans * those bits out, applies the 2N adjustment, and plugs the result into * the bit positions for the 2.0 or 3.0 divisor in the returned register * value. The ti_sdhci_write_2() routine performs the opposite * transformation when the SDHCI driver writes to the register. */ if (off == SDHCI_CLOCK_CONTROL) { val32 = RD4(sc, SDHCI_CLOCK_CONTROL); clkdiv = ((val32 >> MMCHS_SYSCTL_CLKD_SHIFT) & MMCHS_SYSCTL_CLKD_MASK) / 2; val32 &= ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= (clkdiv & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; if (slot->version >= SDHCI_SPEC_300) val32 |= ((clkdiv >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_HI_SHIFT; return (val32 & 0xffff); } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode >> 16); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode & 0x0000ffff); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xffff); } static uint32_t ti_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; val32 = RD4(sc, off); /* * If we need to disallow highspeed mode due to the OMAP4 erratum, strip * that flag from the returned capabilities. */ if (off == SDHCI_CAPABILITIES && sc->disable_highspeed) val32 &= ~SDHCI_CAN_DO_HISPD; /* * Force the card-present state if necessary. */ if (off == SDHCI_PRESENT_STATE && sc->force_card_present) val32 |= SDHCI_CARD_PRESENT; return (val32); } static void ti_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; #ifdef MMCCAM uint32_t newval32; if (off == SDHCI_HOST_CONTROL) { val32 = ti_mmchs_read_4(sc, MMCHS_CON); newval32 = val32; if (val & SDHCI_CTRL_8BITBUS) { device_printf(dev, "Custom-enabling 8-bit bus\n"); newval32 |= MMCHS_CON_DW8; } else { device_printf(dev, "Custom-disabling 8-bit bus\n"); newval32 &= ~MMCHS_CON_DW8; } if (newval32 != val32) ti_mmchs_write_4(sc, MMCHS_CON, newval32); } #endif val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t clkdiv, val32; /* * Translate between the hardware and SDHCI 2.0 or 3.0 representations * of the clock divisor. See the comments in ti_sdhci_read_2() for * details. */ if (off == SDHCI_CLOCK_CONTROL) { clkdiv = (val >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; if (slot->version >= SDHCI_SPEC_300) clkdiv |= ((val >> SDHCI_DIVIDER_HI_SHIFT) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_MASK_LEN; clkdiv *= 2; if (clkdiv > MMCHS_SYSCTL_CLKD_MASK) clkdiv = MMCHS_SYSCTL_CLKD_MASK; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); val32 &= 0xffff0000; val32 |= val & ~(MMCHS_SYSCTL_CLKD_MASK << MMCHS_SYSCTL_CLKD_SHIFT); val32 |= clkdiv << MMCHS_SYSCTL_CLKD_SHIFT; WR4(sc, SDHCI_CLOCK_CONTROL, val32); return; } /* * Standard 32-bit handling of command and transfer mode. */ if (off == SDHCI_TRANSFER_MODE) { sc->cmd_and_mode = (sc->cmd_and_mode & 0xffff0000) | ((uint32_t)val & 0x0000ffff); return; } else if (off == SDHCI_COMMAND_FLAGS) { sc->cmd_and_mode = (sc->cmd_and_mode & 0x0000ffff) | ((uint32_t)val << 16); WR4(sc, SDHCI_TRANSFER_MODE, sc->cmd_and_mode); return; } val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3) * 8); val32 |= ((val & 0xffff) << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void ti_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct ti_sdhci_softc *sc = device_get_softc(dev); WR4(sc, off, val); } static void ti_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct ti_sdhci_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off + sc->sdhci_reg_off, data, count); } static void ti_sdhci_intr(void *arg) { struct ti_sdhci_softc *sc = arg; sdhci_generic_intr(&sc->slot); } static int ti_sdhci_update_ios(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); struct sdhci_slot *slot; struct mmc_ios *ios; uint32_t val32, newval32; slot = device_get_ivars(reqdev); ios = &slot->host.ios; /* * There is an 8-bit-bus bit in the MMCHS control register which, when * set, overrides the 1 vs 4 bit setting in the standard SDHCI * registers. Set that bit first according to whether an 8-bit bus is * requested, then let the standard driver handle everything else. */ val32 = ti_mmchs_read_4(sc, MMCHS_CON); newval32 = val32; if (ios->bus_width == bus_width_8) newval32 |= MMCHS_CON_DW8; else newval32 &= ~MMCHS_CON_DW8; if (ios->bus_mode == opendrain) newval32 |= MMCHS_CON_OD; else /* if (ios->bus_mode == pushpull) */ newval32 &= ~MMCHS_CON_OD; if (newval32 != val32) ti_mmchs_write_4(sc, MMCHS_CON, newval32); return (sdhci_generic_update_ios(brdev, reqdev)); } static int ti_sdhci_get_ro(device_t brdev, device_t reqdev) { struct ti_sdhci_softc *sc = device_get_softc(brdev); if (sc->disable_readonly) return (0); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool ti_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct ti_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } static int ti_sdhci_detach(device_t dev) { /* sdhci_fdt_gpio_teardown(sc->gpio); */ return (EBUSY); } static int ti_sdhci_hw_init(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); uint32_t regval; unsigned long timeout; clk_t mmc_clk; int err; /* Enable the controller and interface/functional clocks */ if (ti_sysc_clock_enable(device_get_parent(dev)) != 0) { device_printf(dev, "Error: failed to enable MMC clock\n"); return (ENXIO); } /* FIXME: Devicetree dosent have any reference to mmc_clk */ err = clk_get_by_name(dev, "mmc_clk", &mmc_clk); if (err) { device_printf(dev, "Can not find mmc_clk\n"); return (ENXIO); } err = clk_get_freq(mmc_clk, &sc->baseclk_hz); if (err) { device_printf(dev, "Cant get mmc_clk frequency\n"); /* AM335x TRM 8.1.6.8 table 8-24 96MHz @ OPP100 */ sc->baseclk_hz = 96000000; } /* Issue a softreset to the controller */ ti_mmchs_write_4(sc, MMCHS_SYSCONFIG, MMCHS_SYSCONFIG_RESET); timeout = 1000; while (!(ti_mmchs_read_4(sc, MMCHS_SYSSTATUS) & MMCHS_SYSSTATUS_RESETDONE)) { if (--timeout == 0) { device_printf(dev, "Error: Controller reset operation timed out\n"); break; } DELAY(100); } /* * Reset the command and data state machines and also other aspects of * the controller such as bus clock and power. * * If we read the software reset register too fast after writing it we * can get back a zero that means the reset hasn't started yet rather * than that the reset is complete. Per TI recommendations, work around * it by reading until we see the reset bit asserted, then read until * it's clear. We also set the SDHCI_QUIRK_WAITFOR_RESET_ASSERTED quirk * so that the main sdhci driver uses this same logic in its resets. */ ti_sdhci_write_1(dev, NULL, SDHCI_SOFTWARE_RESET, SDHCI_RESET_ALL); timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL) != SDHCI_RESET_ALL) { if (--timeout == 0) { break; } DELAY(1); } timeout = 10000; while ((ti_sdhci_read_1(dev, NULL, SDHCI_SOFTWARE_RESET) & SDHCI_RESET_ALL)) { if (--timeout == 0) { device_printf(dev, "Error: Software reset operation timed out\n"); break; } DELAY(100); } /* * The attach() routine has examined fdt data and set flags in * slot.host.caps to reflect what voltages we can handle. Set those * values in the CAPA register. Empirical testing shows that the * values in this register can be overwritten at any time, but the * manual says that these values should only be set once, "before * initialization" whatever that means, and that they survive a reset. */ regval = ti_mmchs_read_4(sc, MMCHS_SD_CAPA); if (sc->slot.host.caps & MMC_OCR_LOW_VOLTAGE) regval |= MMCHS_SD_CAPA_VS18; if (sc->slot.host.caps & (MMC_OCR_290_300 | MMC_OCR_300_310)) regval |= MMCHS_SD_CAPA_VS30; ti_mmchs_write_4(sc, MMCHS_SD_CAPA, regval); /* Set initial host configuration (1-bit, std speed, pwr off). */ ti_sdhci_write_1(dev, NULL, SDHCI_HOST_CONTROL, 0); ti_sdhci_write_1(dev, NULL, SDHCI_POWER_CONTROL, 0); /* Set the initial controller configuration. */ ti_mmchs_write_4(sc, MMCHS_CON, MMCHS_CON_DVAL_8_4MS); return (0); } static int ti_sdhci_attach(device_t dev) { struct ti_sdhci_softc *sc = device_get_softc(dev); int rid, err; pcell_t prop; phandle_t node; sc->dev = dev; /* * Get the MMCHS device id from FDT. Use rev address to identify the unit. */ node = ofw_bus_get_node(dev); /* * The hardware can inherently do dual-voltage (1p8v, 3p0v) on the first * device, and only 1p8v on other devices unless an external transceiver * is used. The only way we could know about a transceiver is fdt data. * Note that we have to do this before calling ti_sdhci_hw_init() so * that it can set the right values in the CAPA register. */ sc->slot.host.caps |= MMC_OCR_LOW_VOLTAGE; if (OF_hasprop(node, "ti,dual-volt")) { sc->slot.host.caps |= MMC_OCR_290_300 | MMC_OCR_300_310; } /* * Set the offset from the device's memory start to the MMCHS registers. * Also for OMAP4 disable high speed mode due to erratum ID i626. */ switch (ti_chip()) { #ifdef SOC_OMAP4 case CHIP_OMAP_4: sc->mmchs_reg_off = OMAP4_MMCHS_REG_OFFSET; sc->disable_highspeed = true; break; #endif #ifdef SOC_TI_AM335X case CHIP_AM335X: sc->mmchs_reg_off = AM335X_MMCHS_REG_OFFSET; break; #endif default: panic("Unknown OMAP device\n"); } /* * The standard SDHCI registers are at a fixed offset (the same on all * SoCs) beyond the MMCHS registers. */ sc->sdhci_reg_off = sc->mmchs_reg_off + SDHCI_REG_OFFSET; /* Resource setup. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, ti_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } /* * Set up handling of card-detect and write-protect gpio lines. * * If there is no write protect info in the fdt data, fall back to the * historical practice of assuming that the card is writable. This * works around bad fdt data from the upstream source. The alternative * would be to trust the sdhci controller's PRESENT_STATE register WP * bit, but it may say write protect is in effect when it's not if the * pinmux setup doesn't route the WP signal into the sdchi block. */ sc->gpio = sdhci_fdt_gpio_setup(sc->dev, &sc->slot); if (!OF_hasprop(node, "wp-gpios") && !OF_hasprop(node, "wp-disable")) sc->disable_readonly = true; /* Initialise the MMCHS hardware. */ err = ti_sdhci_hw_init(dev); if (err != 0) { /* err should already contain ENXIO from ti_sdhci_hw_init() */ goto fail; } /* * The capabilities register can only express base clock frequencies in * the range of 0-63MHz for a v2.0 controller. Since our clock runs * faster than that, the hardware sets the frequency to zero in the * register. When the register contains zero, the sdhci driver expects * slot.max_clk to already have the right value in it. */ sc->slot.max_clk = sc->baseclk_hz; /* * The MMCHS timeout counter is based on the output sdclock. Tell the * sdhci driver to recalculate the timeout clock whenever the output * sdclock frequency changes. */ sc->slot.quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; /* * The MMCHS hardware shifts the 136-bit response data (in violation of * the spec), so tell the sdhci driver not to do the same in software. */ sc->slot.quirks |= SDHCI_QUIRK_DONT_SHIFT_RESPONSE; /* * Reset bits are broken, have to wait to see the bits asserted * before waiting to see them de-asserted. */ sc->slot.quirks |= SDHCI_QUIRK_WAITFOR_RESET_ASSERTED; /* * The controller waits for busy responses. */ sc->slot.quirks |= SDHCI_QUIRK_WAIT_WHILE_BUSY; /* * DMA is not really broken, I just haven't implemented it yet. */ sc->slot.quirks |= SDHCI_QUIRK_BROKEN_DMA; /* * Set up the hardware and go. Note that this sets many of the * slot.host.* fields, so we have to do this before overriding any of * those values based on fdt data, below. */ sdhci_init_slot(dev, &sc->slot, 0); /* * The SDHCI controller doesn't realize it, but we can support 8-bit * even though we're not a v3.0 controller. If there's an fdt bus-width * property, honor it. */ if (OF_getencprop(node, "bus-width", &prop, sizeof(prop)) > 0) { sc->slot.host.caps &= ~(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); switch (prop) { case 8: sc->slot.host.caps |= MMC_CAP_8_BIT_DATA; /* FALLTHROUGH */ case 4: sc->slot.host.caps |= MMC_CAP_4_BIT_DATA; break; case 1: break; default: device_printf(dev, "Bad bus-width value %u\n", prop); break; } } /* * If the slot is flagged with the non-removable property, set our flag * to always force the SDHCI_CARD_PRESENT bit on. */ node = ofw_bus_get_node(dev); if (OF_hasprop(node, "non-removable")) sc->force_card_present = true; - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: if (sc->intr_cookie) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); return (err); } static int ti_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data != 0) { device_set_desc(dev, "TI MMCHS (SDHCI 2.0)"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static device_method_t ti_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_sdhci_probe), DEVMETHOD(device_attach, ti_sdhci_attach), DEVMETHOD(device_detach, ti_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, ti_sdhci_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, ti_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI registers accessors */ DEVMETHOD(sdhci_read_1, ti_sdhci_read_1), DEVMETHOD(sdhci_read_2, ti_sdhci_read_2), DEVMETHOD(sdhci_read_4, ti_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, ti_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, ti_sdhci_write_1), DEVMETHOD(sdhci_write_2, ti_sdhci_write_2), DEVMETHOD(sdhci_write_4, ti_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, ti_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present, ti_sdhci_get_card_present), DEVMETHOD_END }; static driver_t ti_sdhci_driver = { "sdhci_ti", ti_sdhci_methods, sizeof(struct ti_sdhci_softc), }; DRIVER_MODULE(sdhci_ti, simplebus, ti_sdhci_driver, NULL, NULL); MODULE_DEPEND(sdhci_ti, ti_sysc, 1, 1, 1); SDHCI_DEPEND(sdhci_ti); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_ti); #endif diff --git a/sys/arm/ti/usb/omap_host.c b/sys/arm/ti/usb/omap_host.c index 631c74e19686..2e4677a2a488 100644 --- a/sys/arm/ti/usb/omap_host.c +++ b/sys/arm/ti/usb/omap_host.c @@ -1,463 +1,463 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * Copyright (c) 2011 Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include /* * USB Host Module */ /* UHH */ #define OMAP_USBHOST_UHH_REVISION 0x0000 #define OMAP_USBHOST_UHH_SYSCONFIG 0x0010 #define OMAP_USBHOST_UHH_SYSSTATUS 0x0014 #define OMAP_USBHOST_UHH_HOSTCONFIG 0x0040 #define OMAP_USBHOST_UHH_DEBUG_CSR 0x0044 /* UHH Register Set */ #define UHH_SYSCONFIG_MIDLEMODE_MASK (3UL << 12) #define UHH_SYSCONFIG_MIDLEMODE_SMARTSTANDBY (2UL << 12) #define UHH_SYSCONFIG_MIDLEMODE_NOSTANDBY (1UL << 12) #define UHH_SYSCONFIG_MIDLEMODE_FORCESTANDBY (0UL << 12) #define UHH_SYSCONFIG_CLOCKACTIVITY (1UL << 8) #define UHH_SYSCONFIG_SIDLEMODE_MASK (3UL << 3) #define UHH_SYSCONFIG_SIDLEMODE_SMARTIDLE (2UL << 3) #define UHH_SYSCONFIG_SIDLEMODE_NOIDLE (1UL << 3) #define UHH_SYSCONFIG_SIDLEMODE_FORCEIDLE (0UL << 3) #define UHH_SYSCONFIG_ENAWAKEUP (1UL << 2) #define UHH_SYSCONFIG_SOFTRESET (1UL << 1) #define UHH_SYSCONFIG_AUTOIDLE (1UL << 0) #define UHH_HOSTCONFIG_APP_START_CLK (1UL << 31) #define UHH_HOSTCONFIG_P3_CONNECT_STATUS (1UL << 10) #define UHH_HOSTCONFIG_P2_CONNECT_STATUS (1UL << 9) #define UHH_HOSTCONFIG_P1_CONNECT_STATUS (1UL << 8) #define UHH_HOSTCONFIG_ENA_INCR_ALIGN (1UL << 5) #define UHH_HOSTCONFIG_ENA_INCR16 (1UL << 4) #define UHH_HOSTCONFIG_ENA_INCR8 (1UL << 3) #define UHH_HOSTCONFIG_ENA_INCR4 (1UL << 2) #define UHH_HOSTCONFIG_AUTOPPD_ON_OVERCUR_EN (1UL << 1) #define UHH_HOSTCONFIG_P1_ULPI_BYPASS (1UL << 0) /* The following are on rev2 (OMAP44xx) of the EHCI only */ #define UHH_SYSCONFIG_IDLEMODE_MASK (3UL << 2) #define UHH_SYSCONFIG_IDLEMODE_NOIDLE (1UL << 2) #define UHH_SYSCONFIG_STANDBYMODE_MASK (3UL << 4) #define UHH_SYSCONFIG_STANDBYMODE_NOSTDBY (1UL << 4) #define UHH_HOSTCONFIG_P1_MODE_MASK (3UL << 16) #define UHH_HOSTCONFIG_P1_MODE_ULPI_PHY (0UL << 16) #define UHH_HOSTCONFIG_P1_MODE_UTMI_PHY (1UL << 16) #define UHH_HOSTCONFIG_P1_MODE_HSIC (3UL << 16) #define UHH_HOSTCONFIG_P2_MODE_MASK (3UL << 18) #define UHH_HOSTCONFIG_P2_MODE_ULPI_PHY (0UL << 18) #define UHH_HOSTCONFIG_P2_MODE_UTMI_PHY (1UL << 18) #define UHH_HOSTCONFIG_P2_MODE_HSIC (3UL << 18) /* * Values of UHH_REVISION - Note: these are not given in the TRM but taken * from the linux OMAP EHCI driver (thanks guys). It has been verified on * a Panda and Beagle board. */ #define OMAP_UHH_REV1 0x00000010 /* OMAP3 */ #define OMAP_UHH_REV2 0x50700100 /* OMAP4 */ struct omap_uhh_softc { struct simplebus_softc simplebus_sc; device_t sc_dev; /* UHH register set */ struct resource* uhh_mem_res; /* The revision of the HS USB HOST read from UHH_REVISION */ uint32_t uhh_rev; /* The following details are provided by conf hints */ int port_mode[3]; }; static device_attach_t omap_uhh_attach; static device_detach_t omap_uhh_detach; static inline uint32_t omap_uhh_read_4(struct omap_uhh_softc *sc, bus_size_t off) { return bus_read_4(sc->uhh_mem_res, off); } static inline void omap_uhh_write_4(struct omap_uhh_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->uhh_mem_res, off, val); } static int omap_uhh_init(struct omap_uhh_softc *isc) { uint8_t tll_ch_mask; uint32_t reg; int i; /* Enable Clocks for high speed USBHOST */ ti_sysc_clock_enable(device_get_parent(isc->sc_dev)); /* Read the UHH revision */ isc->uhh_rev = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_REVISION); device_printf(isc->sc_dev, "UHH revision 0x%08x\n", isc->uhh_rev); /* FIXME */ #if 0 if (isc->uhh_rev == OMAP_UHH_REV2) { /* For OMAP44xx devices you have to enable the per-port clocks: * PHY_MODE - External ULPI clock * TTL_MODE - Internal UTMI clock * HSIC_MODE - Internal 480Mhz and 60Mhz clocks */ switch(isc->port_mode[0]) { case EHCI_HCD_OMAP_MODE_UNKNOWN: break; case EHCI_HCD_OMAP_MODE_PHY: if (ti_prcm_clk_set_source(USBP1_PHY_CLK, EXT_CLK)) device_printf(isc->sc_dev, "failed to set clock source for port 0\n"); if (ti_prcm_clk_enable(USBP1_PHY_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP1_PHY_CLK source for port 0\n"); break; case EHCI_HCD_OMAP_MODE_TLL: if (ti_prcm_clk_enable(USBP1_UTMI_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP1_PHY_CLK source for port 0\n"); break; case EHCI_HCD_OMAP_MODE_HSIC: if (ti_prcm_clk_enable(USBP1_HSIC_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP1_PHY_CLK source for port 0\n"); break; default: device_printf(isc->sc_dev, "unknown port mode %d for port 0\n", isc->port_mode[0]); } switch(isc->port_mode[1]) { case EHCI_HCD_OMAP_MODE_UNKNOWN: break; case EHCI_HCD_OMAP_MODE_PHY: if (ti_prcm_clk_set_source(USBP2_PHY_CLK, EXT_CLK)) device_printf(isc->sc_dev, "failed to set clock source for port 0\n"); if (ti_prcm_clk_enable(USBP2_PHY_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP2_PHY_CLK source for port 1\n"); break; case EHCI_HCD_OMAP_MODE_TLL: if (ti_prcm_clk_enable(USBP2_UTMI_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP2_UTMI_CLK source for port 1\n"); break; case EHCI_HCD_OMAP_MODE_HSIC: if (ti_prcm_clk_enable(USBP2_HSIC_CLK)) device_printf(isc->sc_dev, "failed to set clock USBP2_HSIC_CLK source for port 1\n"); break; default: device_printf(isc->sc_dev, "unknown port mode %d for port 1\n", isc->port_mode[1]); } } #endif /* Put UHH in SmartIdle/SmartStandby mode */ reg = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_SYSCONFIG); if (isc->uhh_rev == OMAP_UHH_REV1) { reg &= ~(UHH_SYSCONFIG_SIDLEMODE_MASK | UHH_SYSCONFIG_MIDLEMODE_MASK); reg |= (UHH_SYSCONFIG_ENAWAKEUP | UHH_SYSCONFIG_AUTOIDLE | UHH_SYSCONFIG_CLOCKACTIVITY | UHH_SYSCONFIG_SIDLEMODE_SMARTIDLE | UHH_SYSCONFIG_MIDLEMODE_SMARTSTANDBY); } else if (isc->uhh_rev == OMAP_UHH_REV2) { reg &= ~UHH_SYSCONFIG_IDLEMODE_MASK; reg |= UHH_SYSCONFIG_IDLEMODE_NOIDLE; reg &= ~UHH_SYSCONFIG_STANDBYMODE_MASK; reg |= UHH_SYSCONFIG_STANDBYMODE_NOSTDBY; } omap_uhh_write_4(isc, OMAP_USBHOST_UHH_SYSCONFIG, reg); device_printf(isc->sc_dev, "OMAP_UHH_SYSCONFIG: 0x%08x\n", reg); reg = omap_uhh_read_4(isc, OMAP_USBHOST_UHH_HOSTCONFIG); /* Setup ULPI bypass and burst configurations */ reg |= (UHH_HOSTCONFIG_ENA_INCR4 | UHH_HOSTCONFIG_ENA_INCR8 | UHH_HOSTCONFIG_ENA_INCR16); reg &= ~UHH_HOSTCONFIG_ENA_INCR_ALIGN; if (isc->uhh_rev == OMAP_UHH_REV1) { if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~UHH_HOSTCONFIG_P1_CONNECT_STATUS; if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~UHH_HOSTCONFIG_P2_CONNECT_STATUS; if (isc->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~UHH_HOSTCONFIG_P3_CONNECT_STATUS; /* Bypass the TLL module for PHY mode operation */ if ((isc->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || (isc->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) reg &= ~UHH_HOSTCONFIG_P1_ULPI_BYPASS; else reg |= UHH_HOSTCONFIG_P1_ULPI_BYPASS; } else if (isc->uhh_rev == OMAP_UHH_REV2) { reg |= UHH_HOSTCONFIG_APP_START_CLK; /* Clear port mode fields for PHY mode*/ reg &= ~UHH_HOSTCONFIG_P1_MODE_MASK; reg &= ~UHH_HOSTCONFIG_P2_MODE_MASK; if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) reg |= UHH_HOSTCONFIG_P1_MODE_UTMI_PHY; else if (isc->port_mode[0] == EHCI_HCD_OMAP_MODE_HSIC) reg |= UHH_HOSTCONFIG_P1_MODE_HSIC; if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) reg |= UHH_HOSTCONFIG_P2_MODE_UTMI_PHY; else if (isc->port_mode[1] == EHCI_HCD_OMAP_MODE_HSIC) reg |= UHH_HOSTCONFIG_P2_MODE_HSIC; } omap_uhh_write_4(isc, OMAP_USBHOST_UHH_HOSTCONFIG, reg); device_printf(isc->sc_dev, "UHH setup done, uhh_hostconfig=0x%08x\n", reg); /* I found the code and comments in the Linux EHCI driver - thanks guys :) * * "An undocumented "feature" in the OMAP3 EHCI controller, causes suspended * ports to be taken out of suspend when the USBCMD.Run/Stop bit is cleared * (for example when we do omap_uhh_bus_suspend). This breaks suspend-resume if * the root-hub is allowed to suspend. Writing 1 to this undocumented * register bit disables this feature and restores normal behavior." */ #if 0 omap_uhh_write_4(isc, OMAP_USBHOST_INSNREG04, OMAP_USBHOST_INSNREG04_DISABLE_UNSUSPEND); #endif tll_ch_mask = 0; for (i = 0; i < OMAP_HS_USB_PORTS; i++) { if (isc->port_mode[i] == EHCI_HCD_OMAP_MODE_TLL) tll_ch_mask |= (1 << i); } if (tll_ch_mask) omap_tll_utmi_enable(tll_ch_mask); return(0); } /** * omap_uhh_fini - shutdown the EHCI controller * @isc: omap ehci device context * * * * LOCKING: * none * * RETURNS: * 0 on success, a negative error code on failure. */ static void omap_uhh_fini(struct omap_uhh_softc *isc) { unsigned long timeout; device_printf(isc->sc_dev, "Stopping TI EHCI USB Controller\n"); /* Set the timeout */ if (hz < 10) timeout = 1; else timeout = (100 * hz) / 1000; /* Reset the UHH, OHCI and EHCI modules */ omap_uhh_write_4(isc, OMAP_USBHOST_UHH_SYSCONFIG, 0x0002); while ((omap_uhh_read_4(isc, OMAP_USBHOST_UHH_SYSSTATUS) & 0x07) == 0x00) { /* Sleep for a tick */ pause("USBRESET", 1); if (timeout-- == 0) { device_printf(isc->sc_dev, "operation timed out\n"); break; } } /* Disable functional and interface clocks for the TLL and HOST modules */ ti_sysc_clock_disable(device_get_parent(isc->sc_dev)); device_printf(isc->sc_dev, "Clock to USB host has been disabled\n"); } int omap_usb_port_mode(device_t dev, int port) { struct omap_uhh_softc *isc; isc = device_get_softc(dev); if ((port < 0) || (port >= OMAP_HS_USB_PORTS)) return (-1); return isc->port_mode[port]; } static int omap_uhh_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,usbhs-host")) return (ENXIO); device_set_desc(dev, "TI OMAP USB 2.0 Host module"); return (BUS_PROBE_DEFAULT); } static int omap_uhh_attach(device_t dev) { struct omap_uhh_softc *isc = device_get_softc(dev); int err; int rid; int i; phandle_t node; char propname[16]; char *mode; /* save the device */ isc->sc_dev = dev; /* Allocate resource for the UHH register set */ rid = 0; isc->uhh_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!isc->uhh_mem_res) { device_printf(dev, "Error: Could not map UHH memory\n"); goto error; } node = ofw_bus_get_node(dev); if (node == -1) goto error; /* Get port modes from FDT */ for (i = 0; i < OMAP_HS_USB_PORTS; i++) { isc->port_mode[i] = EHCI_HCD_OMAP_MODE_UNKNOWN; snprintf(propname, sizeof(propname), "port%d-mode", i+1); if (OF_getprop_alloc(node, propname, (void**)&mode) <= 0) continue; if (strcmp(mode, "ehci-phy") == 0) isc->port_mode[i] = EHCI_HCD_OMAP_MODE_PHY; else if (strcmp(mode, "ehci-tll") == 0) isc->port_mode[i] = EHCI_HCD_OMAP_MODE_TLL; else if (strcmp(mode, "ehci-hsic") == 0) isc->port_mode[i] = EHCI_HCD_OMAP_MODE_HSIC; } /* Initialise the ECHI registers */ err = omap_uhh_init(isc); if (err) { device_printf(dev, "Error: could not setup OMAP EHCI, %d\n", err); goto error; } simplebus_init(dev, node); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); error: omap_uhh_detach(dev); return (ENXIO); } static int omap_uhh_detach(device_t dev) { struct omap_uhh_softc *isc = device_get_softc(dev); /* during module unload there are lots of children leftover */ device_delete_children(dev); if (isc->uhh_mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, isc->uhh_mem_res); isc->uhh_mem_res = NULL; } omap_uhh_fini(isc); return (0); } static device_method_t omap_uhh_methods[] = { /* Device interface */ DEVMETHOD(device_probe, omap_uhh_probe), DEVMETHOD(device_attach, omap_uhh_attach), DEVMETHOD(device_detach, omap_uhh_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; DEFINE_CLASS_1(omap_uhh, omap_uhh_driver, omap_uhh_methods, sizeof(struct omap_uhh_softc), simplebus_driver); DRIVER_MODULE(omap_uhh, simplebus, omap_uhh_driver, 0, 0); diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c index d780c9950e63..8628e29bd94c 100644 --- a/sys/arm64/arm64/nexus.c +++ b/sys/arm64/arm64/nexus.c @@ -1,592 +1,592 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for Arm Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #include "ofw_bus_if.h" #endif #ifdef DEV_ACPI #include #include #include "acpi_bus_if.h" #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; static int force_np; SYSCTL_INT(_kern, OID_AUTO, force_nonposted, CTLFLAG_RDTUN, &force_np, 0, "Force all devices to use non-posted device memory"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static int nexus_attach(device_t); #ifdef FDT static device_probe_t nexus_fdt_probe; static device_attach_t nexus_fdt_attach; static bus_activate_resource_t nexus_fdt_activate_resource; #endif #ifdef DEV_ACPI static device_probe_t nexus_acpi_probe; static device_attach_t nexus_acpi_attach; #endif static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_alloc_resource_t nexus_alloc_resource; static bus_get_resource_list_t nexus_get_reslist; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; #ifdef FDT static ofw_bus_map_intr_t nexus_ofw_map_intr; #endif static device_method_t nexus_methods[] = { /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD_END }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource_list_entry *rle; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return (NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* * On arm64 (due to INTRNG), ACPI interrupt configuration is * done in nexus_acpi_map_intr(). */ return (0); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error) return (error); error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind_irq(child, irq, cpu)); } #endif static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return (&memmap_bus); } static int nexus_activate_resource_flags(device_t bus, device_t child, struct resource *r, int flags) { struct resource_map_request args; struct resource_map map; int err, use_np; /* * If this is a memory resource, map it into the kernel. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: if ((err = rman_activate_resource(r)) != 0) return (err); if ((rman_get_flags(r) & RF_UNMAPPED) == 0) { resource_init_map_request(&args); use_np = (flags & BUS_SPACE_MAP_NONPOSTED) != 0 || force_np; if (!use_np) resource_int_value(device_get_name(child), device_get_unit(child), "force_nonposted", &use_np); if (use_np) args.memattr = VM_MEMATTR_DEVICE_NP; err = nexus_map_resource(bus, child, r, &args, &map); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_mapping(r, &map); } break; default: return (bus_generic_rman_activate_resource(bus, child, r)); } return (0); } static int nexus_activate_resource(device_t dev, device_t child, struct resource *r) { return (nexus_activate_resource_flags(dev, child, r, 0)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if ((rman_get_flags(r) & RF_ACTIVE) == 0) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); map->r_bustag = &memmap_bus; map->r_size = length; /* * The handle is the virtual address. */ map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } #ifdef FDT static device_method_t nexus_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_fdt_attach), /* Bus interface */ DEVMETHOD(bus_activate_resource, nexus_fdt_activate_resource), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), DEVMETHOD_END, }; #define nexus_baseclasses nexus_fdt_baseclasses DEFINE_CLASS_1(nexus, nexus_fdt_driver, nexus_fdt_methods, 1, nexus_driver); #undef nexus_baseclasses EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { if (arm64_bus_method != ARM64_BUS_FDT) return (ENXIO); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_fdt_attach(device_t dev) { nexus_add_child(dev, 10, "ofwbus", 0); return (nexus_attach(dev)); } static int nexus_fdt_activate_resource(device_t bus, device_t child, struct resource *r) { phandle_t node, parent; int flags; flags = 0; switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * If the fdt parent has the nonposted-mmio property we * need to use non-posted IO to access the device. When * we find this property set the BUS_SPACE_MAP_NONPOSTED * flag to be passed to bus_space_map. */ node = ofw_bus_get_node(child); if (node != -1) { parent = OF_parent(node); if (parent != 0 && OF_hasprop(parent, "nonposted-mmio")) { flags |= BUS_SPACE_MAP_NONPOSTED; } } break; default: break; } return (nexus_activate_resource_flags(bus, child, r, flags)); } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { u_int irq; struct intr_map_data_fdt *fdt_data; size_t len; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } #endif #ifdef DEV_ACPI static int nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol); static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), /* ACPI interface */ DEVMETHOD(acpi_bus_map_intr, nexus_acpi_map_intr), DEVMETHOD_END, }; #define nexus_baseclasses nexus_acpi_baseclasses DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); #undef nexus_baseclasses EARLY_DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_acpi_probe(device_t dev) { if (arm64_bus_method != ARM64_BUS_ACPI || acpi_identify() != 0) return (ENXIO); device_quiet(dev); return (BUS_PROBE_LOW_PRIORITY); } static int nexus_acpi_attach(device_t dev) { nexus_add_child(dev, 10, "acpi", 0); return (nexus_attach(dev)); } static int nexus_acpi_map_intr(device_t dev, device_t child, u_int irq, int trig, int pol) { struct intr_map_data_acpi *acpi_data; size_t len; len = sizeof(*acpi_data); acpi_data = (struct intr_map_data_acpi *)intr_alloc_map_data( INTR_MAP_DATA_ACPI, len, M_WAITOK | M_ZERO); acpi_data->irq = irq; acpi_data->pol = pol; acpi_data->trig = trig; /* * TODO: This will only handle a single interrupt controller. * ACPI will map multiple controllers into a single virtual IRQ * space. Each controller has a System Vector Base to hold the * first irq it handles in this space. As such the correct way * to handle interrupts with ACPI is to search through the * controllers for the largest base value that is no larger than * the IRQ value. */ irq = intr_map_irq(NULL, ACPI_INTR_XREF, (struct intr_map_data *)acpi_data); return (irq); } #endif diff --git a/sys/arm64/rockchip/rk_pinctrl.c b/sys/arm64/rockchip/rk_pinctrl.c index 4cf266df3cbf..f77e7965a549 100644 --- a/sys/arm64/rockchip/rk_pinctrl.c +++ b/sys/arm64/rockchip/rk_pinctrl.c @@ -1,1570 +1,1570 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #include "syscon_if.h" #include "fdt_pinctrl_if.h" struct rk_pinctrl_pin_drive { uint32_t bank; uint32_t subbank; uint32_t offset; uint32_t value; uint32_t ma; }; struct rk_pinctrl_bank { uint32_t bank; uint32_t subbank; uint32_t offset; uint32_t nbits; }; struct rk_pinctrl_pin_fixup { uint32_t bank; uint32_t subbank; uint32_t pin; uint32_t reg; uint32_t bit; uint32_t mask; }; struct rk_pinctrl_gpio { uint32_t bank; char *gpio_name; device_t gpio_dev; }; struct rk_pinctrl_softc; struct rk_pinctrl_conf { struct rk_pinctrl_bank *iomux_conf; uint32_t iomux_nbanks; struct rk_pinctrl_pin_fixup *pin_fixup; uint32_t npin_fixup; struct rk_pinctrl_pin_drive *pin_drive; uint32_t npin_drive; struct rk_pinctrl_gpio *gpio_bank; uint32_t ngpio_bank; uint32_t (*get_pd_offset)(struct rk_pinctrl_softc *, uint32_t); struct syscon *(*get_syscon)(struct rk_pinctrl_softc *, uint32_t); int (*parse_bias)(phandle_t, int); int (*resolv_bias_value)(int, int); int (*get_bias_value)(int, int); }; struct rk_pinctrl_softc { struct simplebus_softc simplebus_sc; device_t dev; struct syscon *grf; struct syscon *pmu; struct rk_pinctrl_conf *conf; struct mtx mtx; }; #define RK_PINCTRL_LOCK(_sc) mtx_lock_spin(&(_sc)->mtx) #define RK_PINCTRL_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->mtx) #define RK_PINCTRL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #define RK_IOMUX(_bank, _subbank, _offset, _nbits) \ { \ .bank = _bank, \ .subbank = _subbank, \ .offset = _offset, \ .nbits = _nbits, \ } #define RK_PINFIX(_bank, _pin, _reg, _bit, _mask) \ { \ .bank = _bank, \ .pin = _pin, \ .reg = _reg, \ .bit = _bit, \ .mask = _mask, \ } #define RK_PINDRIVE(_bank, _subbank, _offset, _value, _ma) \ { \ .bank = _bank, \ .subbank = _subbank, \ .offset = _offset, \ .value = _value, \ .ma = _ma, \ } #define RK_GPIO(_bank, _name) \ { \ .bank = _bank, \ .gpio_name = _name, \ } static struct rk_pinctrl_gpio rk3288_gpio_bank[] = { RK_GPIO(0, "gpio0"), RK_GPIO(1, "gpio1"), RK_GPIO(2, "gpio2"), RK_GPIO(3, "gpio3"), RK_GPIO(4, "gpio4"), RK_GPIO(5, "gpio5"), RK_GPIO(6, "gpio6"), RK_GPIO(7, "gpio7"), RK_GPIO(8, "gpio8"), }; static struct rk_pinctrl_bank rk3288_iomux_bank[] = { /* bank sub offs nbits */ /* PMU */ RK_IOMUX(0, 0, 0x0084, 2), RK_IOMUX(0, 1, 0x0088, 2), RK_IOMUX(0, 2, 0x008C, 2), /* GFR */ RK_IOMUX(1, 3, 0x000C, 2), RK_IOMUX(2, 0, 0x0010, 2), RK_IOMUX(2, 1, 0x0014, 2), RK_IOMUX(2, 2, 0x0018, 2), RK_IOMUX(2, 3, 0x001C, 2), RK_IOMUX(3, 0, 0x0020, 2), RK_IOMUX(3, 1, 0x0024, 2), RK_IOMUX(3, 2, 0x0028, 2), RK_IOMUX(3, 3, 0x002C, 4), RK_IOMUX(4, 0, 0x0034, 4), RK_IOMUX(4, 1, 0x003C, 4), RK_IOMUX(4, 2, 0x0044, 2), RK_IOMUX(4, 3, 0x0048, 2), /* 5,0 - Empty */ RK_IOMUX(5, 1, 0x0050, 2), RK_IOMUX(5, 2, 0x0054, 2), /* 5,3 - Empty */ RK_IOMUX(6, 0, 0x005C, 2), RK_IOMUX(6, 1, 0x0060, 2), RK_IOMUX(6, 2, 0x0064, 2), /* 6,3 - Empty */ RK_IOMUX(7, 0, 0x006C, 2), RK_IOMUX(7, 1, 0x0070, 2), RK_IOMUX(7, 2, 0x0074, 4), /* 7,3 - Empty */ RK_IOMUX(8, 0, 0x0080, 2), RK_IOMUX(8, 1, 0x0084, 2), /* 8,2 - Empty */ /* 8,3 - Empty */ }; static struct rk_pinctrl_pin_fixup rk3288_pin_fixup[] = { }; static struct rk_pinctrl_pin_drive rk3288_pin_drive[] = { /* bank sub offs val ma */ /* GPIO0A (PMU)*/ RK_PINDRIVE(0, 0, 0x070, 0, 2), RK_PINDRIVE(0, 0, 0x070, 1, 4), RK_PINDRIVE(0, 0, 0x070, 2, 8), RK_PINDRIVE(0, 0, 0x070, 3, 12), /* GPIO0B (PMU)*/ RK_PINDRIVE(0, 1, 0x074, 0, 2), RK_PINDRIVE(0, 1, 0x074, 1, 4), RK_PINDRIVE(0, 1, 0x074, 2, 8), RK_PINDRIVE(0, 1, 0x074, 3, 12), /* GPIO0C (PMU)*/ RK_PINDRIVE(0, 2, 0x078, 0, 2), RK_PINDRIVE(0, 2, 0x078, 1, 4), RK_PINDRIVE(0, 2, 0x078, 2, 8), RK_PINDRIVE(0, 2, 0x078, 3, 12), /* GPIO1D */ RK_PINDRIVE(1, 3, 0x1CC, 0, 2), RK_PINDRIVE(1, 3, 0x1CC, 1, 4), RK_PINDRIVE(1, 3, 0x1CC, 2, 8), RK_PINDRIVE(1, 3, 0x1CC, 3, 12), /* GPIO2A */ RK_PINDRIVE(2, 0, 0x1D0, 0, 2), RK_PINDRIVE(2, 0, 0x1D0, 1, 4), RK_PINDRIVE(2, 0, 0x1D0, 2, 8), RK_PINDRIVE(2, 0, 0x1D0, 3, 12), /* GPIO2B */ RK_PINDRIVE(2, 1, 0x1D4, 0, 2), RK_PINDRIVE(2, 1, 0x1D4, 1, 4), RK_PINDRIVE(2, 1, 0x1D4, 2, 8), RK_PINDRIVE(2, 1, 0x1D4, 3, 12), /* GPIO2C */ RK_PINDRIVE(2, 2, 0x1D8, 0, 2), RK_PINDRIVE(2, 2, 0x1D8, 1, 4), RK_PINDRIVE(2, 2, 0x1D8, 2, 8), RK_PINDRIVE(2, 2, 0x1D8, 3, 12), /* GPIO2D */ RK_PINDRIVE(2, 3, 0x1DC, 0, 2), RK_PINDRIVE(2, 3, 0x1DC, 1, 4), RK_PINDRIVE(2, 3, 0x1DC, 2, 8), RK_PINDRIVE(2, 3, 0x1DC, 3, 12), /* GPIO3A */ RK_PINDRIVE(3, 0, 0x1E0, 0, 2), RK_PINDRIVE(3, 0, 0x1E0, 1, 4), RK_PINDRIVE(3, 0, 0x1E0, 2, 8), RK_PINDRIVE(3, 0, 0x1E0, 3, 12), /* GPIO3B */ RK_PINDRIVE(3, 1, 0x1E4, 0, 2), RK_PINDRIVE(3, 1, 0x1E4, 1, 4), RK_PINDRIVE(3, 1, 0x1E4, 2, 8), RK_PINDRIVE(3, 1, 0x1E4, 3, 12), /* GPIO3C */ RK_PINDRIVE(3, 2, 0x1E8, 0, 2), RK_PINDRIVE(3, 2, 0x1E8, 1, 4), RK_PINDRIVE(3, 2, 0x1E8, 2, 8), RK_PINDRIVE(3, 2, 0x1E8, 3, 12), /* GPIO3D */ RK_PINDRIVE(3, 3, 0x1EC, 0, 2), RK_PINDRIVE(3, 3, 0x1EC, 1, 4), RK_PINDRIVE(3, 3, 0x1EC, 2, 8), RK_PINDRIVE(3, 3, 0x1EC, 3, 12), /* GPIO4A */ RK_PINDRIVE(4, 0, 0x1F0, 0, 2), RK_PINDRIVE(4, 0, 0x1F0, 1, 4), RK_PINDRIVE(4, 0, 0x1F0, 2, 8), RK_PINDRIVE(4, 0, 0x1F0, 3, 12), /* GPIO4B */ RK_PINDRIVE(4, 1, 0x1F4, 0, 2), RK_PINDRIVE(4, 1, 0x1F4, 1, 4), RK_PINDRIVE(4, 1, 0x1F4, 2, 8), RK_PINDRIVE(4, 1, 0x1F4, 3, 12), /* GPIO4C */ RK_PINDRIVE(4, 2, 0x1F8, 0, 2), RK_PINDRIVE(4, 2, 0x1F8, 1, 4), RK_PINDRIVE(4, 2, 0x1F8, 2, 8), RK_PINDRIVE(4, 2, 0x1F8, 3, 12), /* GPIO4D */ RK_PINDRIVE(4, 3, 0x1FC, 0, 2), RK_PINDRIVE(4, 3, 0x1FC, 1, 4), RK_PINDRIVE(4, 3, 0x1FC, 2, 8), RK_PINDRIVE(4, 3, 0x1FC, 3, 12), /* GPIO5B */ RK_PINDRIVE(5, 1, 0x204, 0, 2), RK_PINDRIVE(5, 1, 0x204, 1, 4), RK_PINDRIVE(5, 1, 0x204, 2, 8), RK_PINDRIVE(5, 1, 0x204, 3, 12), /* GPIO5C */ RK_PINDRIVE(5, 2, 0x208, 0, 2), RK_PINDRIVE(5, 2, 0x208, 1, 4), RK_PINDRIVE(5, 2, 0x208, 2, 8), RK_PINDRIVE(5, 2, 0x208, 3, 12), /* GPIO6A */ RK_PINDRIVE(6, 0, 0x210, 0, 2), RK_PINDRIVE(6, 0, 0x210, 1, 4), RK_PINDRIVE(6, 0, 0x210, 2, 8), RK_PINDRIVE(6, 0, 0x210, 3, 12), /* GPIO6B */ RK_PINDRIVE(6, 1, 0x214, 0, 2), RK_PINDRIVE(6, 1, 0x214, 1, 4), RK_PINDRIVE(6, 1, 0x214, 2, 8), RK_PINDRIVE(6, 1, 0x214, 3, 12), /* GPIO6C */ RK_PINDRIVE(6, 2, 0x218, 0, 2), RK_PINDRIVE(6, 2, 0x218, 1, 4), RK_PINDRIVE(6, 2, 0x218, 2, 8), RK_PINDRIVE(6, 2, 0x218, 3, 12), /* GPIO7A */ RK_PINDRIVE(7, 0, 0x220, 0, 2), RK_PINDRIVE(7, 0, 0x220, 1, 4), RK_PINDRIVE(7, 0, 0x220, 2, 8), RK_PINDRIVE(7, 0, 0x220, 3, 12), /* GPIO7B */ RK_PINDRIVE(7, 1, 0x224, 0, 2), RK_PINDRIVE(7, 1, 0x224, 1, 4), RK_PINDRIVE(7, 1, 0x224, 2, 8), RK_PINDRIVE(7, 1, 0x224, 3, 12), /* GPIO7C */ RK_PINDRIVE(7, 2, 0x228, 0, 2), RK_PINDRIVE(7, 2, 0x228, 1, 4), RK_PINDRIVE(7, 2, 0x228, 2, 8), RK_PINDRIVE(7, 2, 0x228, 3, 12), /* GPIO8A */ RK_PINDRIVE(8, 0, 0x230, 0, 2), RK_PINDRIVE(8, 0, 0x230, 1, 4), RK_PINDRIVE(8, 0, 0x230, 2, 8), RK_PINDRIVE(8, 0, 0x230, 3, 12), /* GPIO8B */ RK_PINDRIVE(8, 1, 0x234, 0, 2), RK_PINDRIVE(8, 1, 0x234, 1, 4), RK_PINDRIVE(8, 1, 0x234, 2, 8), RK_PINDRIVE(8, 1, 0x234, 3, 12), }; static uint32_t rk3288_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank == 0) return (0x064); /* PMU */ return (0x130); } static struct syscon * rk3288_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank == 0) return (sc->pmu); return (sc->grf); } static int rk3288_parse_bias(phandle_t node, int bank) { if (OF_hasprop(node, "bias-disable")) return (0); if (OF_hasprop(node, "bias-pull-up")) return (1); if (OF_hasprop(node, "bias-pull-down")) return (2); return (-1); } static int rk3288_resolv_bias_value(int bank, int bias) { int rv = 0; if (bias == 1) rv = GPIO_PIN_PULLUP; else if (bias == 2) rv = GPIO_PIN_PULLDOWN; return (rv); } static int rk3288_get_bias_value(int bank, int bias) { int rv = 0; if (bias & GPIO_PIN_PULLUP) rv = 1; else if (bias & GPIO_PIN_PULLDOWN) rv = 2; return (rv); } struct rk_pinctrl_conf rk3288_conf = { .iomux_conf = rk3288_iomux_bank, .iomux_nbanks = nitems(rk3288_iomux_bank), .pin_fixup = rk3288_pin_fixup, .npin_fixup = nitems(rk3288_pin_fixup), .pin_drive = rk3288_pin_drive, .npin_drive = nitems(rk3288_pin_drive), .gpio_bank = rk3288_gpio_bank, .ngpio_bank = nitems(rk3288_gpio_bank), .get_pd_offset = rk3288_get_pd_offset, .get_syscon = rk3288_get_syscon, .parse_bias = rk3288_parse_bias, .resolv_bias_value = rk3288_resolv_bias_value, .get_bias_value = rk3288_get_bias_value, }; static struct rk_pinctrl_gpio rk3328_gpio_bank[] = { RK_GPIO(0, "gpio0"), RK_GPIO(1, "gpio1"), RK_GPIO(2, "gpio2"), RK_GPIO(3, "gpio3"), }; static struct rk_pinctrl_bank rk3328_iomux_bank[] = { /* bank sub offs nbits */ RK_IOMUX(0, 0, 0x0000, 2), RK_IOMUX(0, 1, 0x0004, 2), RK_IOMUX(0, 2, 0x0008, 2), RK_IOMUX(0, 3, 0x000C, 2), RK_IOMUX(1, 0, 0x0010, 2), RK_IOMUX(1, 1, 0x0014, 2), RK_IOMUX(1, 2, 0x0018, 2), RK_IOMUX(1, 3, 0x001C, 2), RK_IOMUX(2, 0, 0x0020, 2), RK_IOMUX(2, 1, 0x0024, 3), RK_IOMUX(2, 2, 0x002c, 3), RK_IOMUX(2, 3, 0x0034, 2), RK_IOMUX(3, 0, 0x0038, 3), RK_IOMUX(3, 1, 0x0040, 3), RK_IOMUX(3, 2, 0x0048, 2), RK_IOMUX(3, 3, 0x004c, 2), }; static struct rk_pinctrl_pin_fixup rk3328_pin_fixup[] = { /* bank pin reg bit mask */ RK_PINFIX(2, 12, 0x24, 8, 0x300), RK_PINFIX(2, 15, 0x28, 0, 0x7), RK_PINFIX(2, 23, 0x30, 14, 0x6000), }; static struct rk_pinctrl_pin_drive rk3328_pin_drive[] = { /* bank sub offs val ma */ RK_PINDRIVE(0, 0, 0x200, 0, 2), RK_PINDRIVE(0, 0, 0x200, 1, 4), RK_PINDRIVE(0, 0, 0x200, 2, 8), RK_PINDRIVE(0, 0, 0x200, 3, 12), RK_PINDRIVE(0, 1, 0x204, 0, 2), RK_PINDRIVE(0, 1, 0x204, 1, 4), RK_PINDRIVE(0, 1, 0x204, 2, 8), RK_PINDRIVE(0, 1, 0x204, 3, 12), RK_PINDRIVE(0, 2, 0x208, 0, 2), RK_PINDRIVE(0, 2, 0x208, 1, 4), RK_PINDRIVE(0, 2, 0x208, 2, 8), RK_PINDRIVE(0, 2, 0x208, 3, 12), RK_PINDRIVE(0, 3, 0x20C, 0, 2), RK_PINDRIVE(0, 3, 0x20C, 1, 4), RK_PINDRIVE(0, 3, 0x20C, 2, 8), RK_PINDRIVE(0, 3, 0x20C, 3, 12), RK_PINDRIVE(1, 0, 0x210, 0, 2), RK_PINDRIVE(1, 0, 0x210, 1, 4), RK_PINDRIVE(1, 0, 0x210, 2, 8), RK_PINDRIVE(1, 0, 0x210, 3, 12), RK_PINDRIVE(1, 1, 0x214, 0, 2), RK_PINDRIVE(1, 1, 0x214, 1, 4), RK_PINDRIVE(1, 1, 0x214, 2, 8), RK_PINDRIVE(1, 1, 0x214, 3, 12), RK_PINDRIVE(1, 2, 0x218, 0, 2), RK_PINDRIVE(1, 2, 0x218, 1, 4), RK_PINDRIVE(1, 2, 0x218, 2, 8), RK_PINDRIVE(1, 2, 0x218, 3, 12), RK_PINDRIVE(1, 3, 0x21C, 0, 2), RK_PINDRIVE(1, 3, 0x21C, 1, 4), RK_PINDRIVE(1, 3, 0x21C, 2, 8), RK_PINDRIVE(1, 3, 0x21C, 3, 12), RK_PINDRIVE(2, 0, 0x220, 0, 2), RK_PINDRIVE(2, 0, 0x220, 1, 4), RK_PINDRIVE(2, 0, 0x220, 2, 8), RK_PINDRIVE(2, 0, 0x220, 3, 12), RK_PINDRIVE(2, 1, 0x224, 0, 2), RK_PINDRIVE(2, 1, 0x224, 1, 4), RK_PINDRIVE(2, 1, 0x224, 2, 8), RK_PINDRIVE(2, 1, 0x224, 3, 12), RK_PINDRIVE(2, 2, 0x228, 0, 2), RK_PINDRIVE(2, 2, 0x228, 1, 4), RK_PINDRIVE(2, 2, 0x228, 2, 8), RK_PINDRIVE(2, 2, 0x228, 3, 12), RK_PINDRIVE(2, 3, 0x22C, 0, 2), RK_PINDRIVE(2, 3, 0x22C, 1, 4), RK_PINDRIVE(2, 3, 0x22C, 2, 8), RK_PINDRIVE(2, 3, 0x22C, 3, 12), RK_PINDRIVE(3, 0, 0x230, 0, 2), RK_PINDRIVE(3, 0, 0x230, 1, 4), RK_PINDRIVE(3, 0, 0x230, 2, 8), RK_PINDRIVE(3, 0, 0x230, 3, 12), RK_PINDRIVE(3, 1, 0x234, 0, 2), RK_PINDRIVE(3, 1, 0x234, 1, 4), RK_PINDRIVE(3, 1, 0x234, 2, 8), RK_PINDRIVE(3, 1, 0x234, 3, 12), RK_PINDRIVE(3, 2, 0x238, 0, 2), RK_PINDRIVE(3, 2, 0x238, 1, 4), RK_PINDRIVE(3, 2, 0x238, 2, 8), RK_PINDRIVE(3, 2, 0x238, 3, 12), RK_PINDRIVE(3, 3, 0x23C, 0, 2), RK_PINDRIVE(3, 3, 0x23C, 1, 4), RK_PINDRIVE(3, 3, 0x23C, 2, 8), RK_PINDRIVE(3, 3, 0x23C, 3, 12), }; static uint32_t rk3328_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank) { return (0x100); } static struct syscon * rk3328_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank) { return (sc->grf); } struct rk_pinctrl_conf rk3328_conf = { .iomux_conf = rk3328_iomux_bank, .iomux_nbanks = nitems(rk3328_iomux_bank), .pin_fixup = rk3328_pin_fixup, .npin_fixup = nitems(rk3328_pin_fixup), .pin_drive = rk3328_pin_drive, .npin_drive = nitems(rk3328_pin_drive), .gpio_bank = rk3328_gpio_bank, .ngpio_bank = nitems(rk3328_gpio_bank), .get_pd_offset = rk3328_get_pd_offset, .get_syscon = rk3328_get_syscon, .parse_bias = rk3288_parse_bias, .resolv_bias_value = rk3288_resolv_bias_value, .get_bias_value = rk3288_get_bias_value, }; static struct rk_pinctrl_gpio rk3399_gpio_bank[] = { RK_GPIO(0, "gpio0"), RK_GPIO(1, "gpio1"), RK_GPIO(2, "gpio2"), RK_GPIO(3, "gpio3"), RK_GPIO(4, "gpio4"), }; static struct rk_pinctrl_bank rk3399_iomux_bank[] = { /* bank sub offs nbits */ RK_IOMUX(0, 0, 0x0000, 2), RK_IOMUX(0, 1, 0x0004, 2), RK_IOMUX(0, 2, 0x0008, 2), RK_IOMUX(0, 3, 0x000C, 2), RK_IOMUX(1, 0, 0x0010, 2), RK_IOMUX(1, 1, 0x0014, 2), RK_IOMUX(1, 2, 0x0018, 2), RK_IOMUX(1, 3, 0x001C, 2), RK_IOMUX(2, 0, 0xE000, 2), RK_IOMUX(2, 1, 0xE004, 2), RK_IOMUX(2, 2, 0xE008, 2), RK_IOMUX(2, 3, 0xE00C, 2), RK_IOMUX(3, 0, 0xE010, 2), RK_IOMUX(3, 1, 0xE014, 2), RK_IOMUX(3, 2, 0xE018, 2), RK_IOMUX(3, 3, 0xE01C, 2), RK_IOMUX(4, 0, 0xE020, 2), RK_IOMUX(4, 1, 0xE024, 2), RK_IOMUX(4, 2, 0xE028, 2), RK_IOMUX(4, 3, 0xE02C, 2), }; static struct rk_pinctrl_pin_fixup rk3399_pin_fixup[] = {}; static struct rk_pinctrl_pin_drive rk3399_pin_drive[] = { /* bank sub offs val ma */ /* GPIO0A */ RK_PINDRIVE(0, 0, 0x80, 0, 5), RK_PINDRIVE(0, 0, 0x80, 1, 10), RK_PINDRIVE(0, 0, 0x80, 2, 15), RK_PINDRIVE(0, 0, 0x80, 3, 20), /* GPIOB */ RK_PINDRIVE(0, 1, 0x88, 0, 5), RK_PINDRIVE(0, 1, 0x88, 1, 10), RK_PINDRIVE(0, 1, 0x88, 2, 15), RK_PINDRIVE(0, 1, 0x88, 3, 20), /* GPIO1A */ RK_PINDRIVE(1, 0, 0xA0, 0, 3), RK_PINDRIVE(1, 0, 0xA0, 1, 6), RK_PINDRIVE(1, 0, 0xA0, 2, 9), RK_PINDRIVE(1, 0, 0xA0, 3, 12), /* GPIO1B */ RK_PINDRIVE(1, 1, 0xA8, 0, 3), RK_PINDRIVE(1, 1, 0xA8, 1, 6), RK_PINDRIVE(1, 1, 0xA8, 2, 9), RK_PINDRIVE(1, 1, 0xA8, 3, 12), /* GPIO1C */ RK_PINDRIVE(1, 2, 0xB0, 0, 3), RK_PINDRIVE(1, 2, 0xB0, 1, 6), RK_PINDRIVE(1, 2, 0xB0, 2, 9), RK_PINDRIVE(1, 2, 0xB0, 3, 12), /* GPIO1D */ RK_PINDRIVE(1, 3, 0xB8, 0, 3), RK_PINDRIVE(1, 3, 0xB8, 1, 6), RK_PINDRIVE(1, 3, 0xB8, 2, 9), RK_PINDRIVE(1, 3, 0xB8, 3, 12), }; static uint32_t rk3399_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank < 2) return (0x40); return (0xE040); } static struct syscon * rk3399_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank < 2) return (sc->pmu); return (sc->grf); } static int rk3399_parse_bias(phandle_t node, int bank) { int pullup, pulldown; if (OF_hasprop(node, "bias-disable")) return (0); switch (bank) { case 0: case 2: pullup = 3; pulldown = 1; break; case 1: case 3: case 4: pullup = 1; pulldown = 2; break; } if (OF_hasprop(node, "bias-pull-up")) return (pullup); if (OF_hasprop(node, "bias-pull-down")) return (pulldown); return (-1); } static int rk3399_resolv_bias_value(int bank, int bias) { int rv = 0; switch (bank) { case 0: case 2: if (bias == 3) rv = GPIO_PIN_PULLUP; else if (bias == 1) rv = GPIO_PIN_PULLDOWN; break; case 1: case 3: case 4: if (bias == 1) rv = GPIO_PIN_PULLUP; else if (bias == 2) rv = GPIO_PIN_PULLDOWN; break; } return (rv); } static int rk3399_get_bias_value(int bank, int bias) { int rv = 0; switch (bank) { case 0: case 2: if (bias & GPIO_PIN_PULLUP) rv = 3; else if (bias & GPIO_PIN_PULLDOWN) rv = 1; break; case 1: case 3: case 4: if (bias & GPIO_PIN_PULLUP) rv = 1; else if (bias & GPIO_PIN_PULLDOWN) rv = 2; break; } return (rv); } struct rk_pinctrl_conf rk3399_conf = { .iomux_conf = rk3399_iomux_bank, .iomux_nbanks = nitems(rk3399_iomux_bank), .pin_fixup = rk3399_pin_fixup, .npin_fixup = nitems(rk3399_pin_fixup), .pin_drive = rk3399_pin_drive, .npin_drive = nitems(rk3399_pin_drive), .gpio_bank = rk3399_gpio_bank, .ngpio_bank = nitems(rk3399_gpio_bank), .get_pd_offset = rk3399_get_pd_offset, .get_syscon = rk3399_get_syscon, .parse_bias = rk3399_parse_bias, .resolv_bias_value = rk3399_resolv_bias_value, .get_bias_value = rk3399_get_bias_value, }; #define GRF_IOFUNC_SEL0 0x0300 #define GMAC1_IOMUX_SEL_M0 0x01000000 #define GMAC1_IOMUX_SEL_M1 0x01000100 static struct rk_pinctrl_gpio rk3568_gpio_bank[] = { RK_GPIO(0, "gpio0"), RK_GPIO(1, "gpio1"), RK_GPIO(2, "gpio2"), RK_GPIO(3, "gpio3"), RK_GPIO(4, "gpio4"), }; static struct rk_pinctrl_bank rk3568_iomux_bank[] = { /* bank sub offs nbits */ RK_IOMUX(0, 0, 0x0000, 4), /* PMU_GRF */ RK_IOMUX(0, 1, 0x0008, 4), RK_IOMUX(0, 2, 0x0010, 4), RK_IOMUX(0, 3, 0x0018, 4), RK_IOMUX(1, 0, 0x0000, 4), /* SYS_GRF */ RK_IOMUX(1, 1, 0x0008, 4), RK_IOMUX(1, 2, 0x0010, 4), RK_IOMUX(1, 3, 0x0018, 4), RK_IOMUX(2, 0, 0x0020, 4), RK_IOMUX(2, 1, 0x0028, 4), RK_IOMUX(2, 2, 0x0030, 4), RK_IOMUX(2, 3, 0x0038, 4), RK_IOMUX(3, 0, 0x0040, 4), RK_IOMUX(3, 1, 0x0048, 4), RK_IOMUX(3, 2, 0x0050, 4), RK_IOMUX(3, 3, 0x0058, 4), RK_IOMUX(4, 0, 0x0060, 4), RK_IOMUX(4, 1, 0x0068, 4), RK_IOMUX(4, 2, 0x0070, 4), RK_IOMUX(4, 3, 0x0078, 4), }; static struct rk_pinctrl_pin_fixup rk3568_pin_fixup[] = {}; static struct rk_pinctrl_pin_drive rk3568_pin_drive[] = { /* bank sub offs val ma */ /* GPIO0A */ RK_PINDRIVE(0, 0, 0x0020, 0, 2), RK_PINDRIVE(0, 0, 0x0020, 1, 4), RK_PINDRIVE(0, 0, 0x0020, 2, 8), RK_PINDRIVE(0, 0, 0x0020, 3, 12), /* GPIO0B */ RK_PINDRIVE(0, 1, 0x0024, 0, 2), RK_PINDRIVE(0, 1, 0x0024, 1, 4), RK_PINDRIVE(0, 1, 0x0024, 2, 8), RK_PINDRIVE(0, 1, 0x0024, 3, 12), /* GPIO0C */ RK_PINDRIVE(0, 1, 0x0028, 0, 2), RK_PINDRIVE(0, 1, 0x0028, 1, 4), RK_PINDRIVE(0, 1, 0x0028, 2, 8), RK_PINDRIVE(0, 1, 0x0028, 3, 12), /* GPIO0D */ RK_PINDRIVE(0, 1, 0x002c, 0, 2), RK_PINDRIVE(0, 1, 0x002c, 1, 4), RK_PINDRIVE(0, 1, 0x002c, 2, 8), RK_PINDRIVE(0, 1, 0x002c, 3, 12), /* GPIO1A */ RK_PINDRIVE(1, 0, 0x0080, 0, 2), RK_PINDRIVE(1, 0, 0x0080, 1, 4), RK_PINDRIVE(1, 0, 0x0080, 2, 8), RK_PINDRIVE(1, 0, 0x0080, 3, 12), /* GPIO1B */ RK_PINDRIVE(1, 1, 0x0084, 0, 2), RK_PINDRIVE(1, 1, 0x0084, 1, 4), RK_PINDRIVE(1, 1, 0x0084, 2, 8), RK_PINDRIVE(1, 1, 0x0084, 3, 12), /* GPIO1C */ RK_PINDRIVE(1, 2, 0x0088, 0, 2), RK_PINDRIVE(1, 2, 0x0088, 1, 4), RK_PINDRIVE(1, 2, 0x0088, 2, 8), RK_PINDRIVE(1, 2, 0x0088, 3, 12), /* GPIO1D */ RK_PINDRIVE(1, 3, 0x008c, 0, 2), RK_PINDRIVE(1, 3, 0x008c, 1, 4), RK_PINDRIVE(1, 3, 0x008c, 2, 8), RK_PINDRIVE(1, 3, 0x008c, 3, 12), /* GPIO2A */ RK_PINDRIVE(2, 0, 0x0090, 0, 2), RK_PINDRIVE(2, 0, 0x0090, 1, 4), RK_PINDRIVE(2, 0, 0x0090, 2, 8), RK_PINDRIVE(2, 0, 0x0090, 3, 12), /* GPIO2B */ RK_PINDRIVE(2, 1, 0x0094, 0, 2), RK_PINDRIVE(2, 1, 0x0094, 1, 4), RK_PINDRIVE(2, 1, 0x0094, 2, 8), RK_PINDRIVE(2, 1, 0x0094, 3, 12), /* GPIO2C */ RK_PINDRIVE(2, 2, 0x0098, 0, 2), RK_PINDRIVE(2, 2, 0x0098, 1, 4), RK_PINDRIVE(2, 2, 0x0098, 2, 8), RK_PINDRIVE(2, 2, 0x0098, 3, 12), /* GPIO2D */ RK_PINDRIVE(2, 3, 0x009c, 0, 2), RK_PINDRIVE(2, 3, 0x009c, 1, 4), RK_PINDRIVE(2, 3, 0x009c, 2, 8), RK_PINDRIVE(2, 3, 0x009c, 3, 12), /* GPIO3A */ RK_PINDRIVE(3, 0, 0x00a0, 0, 2), RK_PINDRIVE(3, 0, 0x00a0, 1, 4), RK_PINDRIVE(3, 0, 0x00a0, 2, 8), RK_PINDRIVE(3, 0, 0x00a0, 3, 12), /* GPIO3B */ RK_PINDRIVE(3, 1, 0x00a4, 0, 2), RK_PINDRIVE(3, 1, 0x00a4, 1, 4), RK_PINDRIVE(3, 1, 0x00a4, 2, 8), RK_PINDRIVE(3, 1, 0x00a4, 3, 12), /* GPIO3C */ RK_PINDRIVE(3, 2, 0x00a8, 0, 2), RK_PINDRIVE(3, 2, 0x00a8, 1, 4), RK_PINDRIVE(3, 2, 0x00a8, 2, 8), RK_PINDRIVE(3, 2, 0x00a8, 3, 12), /* GPIO3D */ RK_PINDRIVE(3, 3, 0x00ac, 0, 2), RK_PINDRIVE(3, 3, 0x00ac, 1, 4), RK_PINDRIVE(3, 3, 0x00ac, 2, 8), RK_PINDRIVE(3, 3, 0x00ac, 3, 12), /* GPIO4A */ RK_PINDRIVE(4, 0, 0x00b0, 0, 2), RK_PINDRIVE(4, 0, 0x00b0, 1, 4), RK_PINDRIVE(4, 0, 0x00b0, 2, 8), RK_PINDRIVE(4, 0, 0x00b0, 3, 12), /* GPIO4B */ RK_PINDRIVE(4, 1, 0x00b4, 0, 2), RK_PINDRIVE(4, 1, 0x00b4, 1, 4), RK_PINDRIVE(4, 1, 0x00b4, 2, 8), RK_PINDRIVE(4, 1, 0x00b4, 3, 12), /* GPIO4C */ RK_PINDRIVE(4, 2, 0x00b8, 0, 2), RK_PINDRIVE(4, 2, 0x00b8, 1, 4), RK_PINDRIVE(4, 2, 0x00b8, 2, 8), RK_PINDRIVE(4, 2, 0x00b8, 3, 12), /* GPIO4D */ RK_PINDRIVE(4, 3, 0x00bc, 0, 2), RK_PINDRIVE(4, 3, 0x00bc, 1, 4), RK_PINDRIVE(4, 3, 0x00bc, 2, 8), RK_PINDRIVE(4, 3, 0x00bc, 3, 12), }; static uint32_t rk3568_get_pd_offset(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank == 0) return (0x20); /* * Registers start at 0x80, but bank index starts at 1. Return 0x70 * so later calculations get the correct offset. */ return (0x70); } static struct syscon * rk3568_get_syscon(struct rk_pinctrl_softc *sc, uint32_t bank) { if (bank) return (sc->grf); else return (sc->pmu); } static int rk3568_parse_bias(phandle_t node, int bank) { if (OF_hasprop(node, "bias-disable")) return (0); if (OF_hasprop(node, "bias-pull-up")) return (1); if (OF_hasprop(node, "bias-pull-down")) return (2); return (-1); } static int rk3568_resolv_bias_value(int bank, int bias) { if (bias == 1) return (GPIO_PIN_PULLUP); if (bias == 2) return (GPIO_PIN_PULLDOWN); return (0); } static int rk3568_get_bias_value(int bank, int bias) { if (bias & GPIO_PIN_PULLUP) return (1); if (bias & GPIO_PIN_PULLDOWN) return (2); return (0); } struct rk_pinctrl_conf rk3568_conf = { .iomux_conf = rk3568_iomux_bank, .iomux_nbanks = nitems(rk3568_iomux_bank), .pin_fixup = rk3568_pin_fixup, .npin_fixup = nitems(rk3568_pin_fixup), .pin_drive = rk3568_pin_drive, .npin_drive = nitems(rk3568_pin_drive), .gpio_bank = rk3568_gpio_bank, .ngpio_bank = nitems(rk3568_gpio_bank), .get_pd_offset = rk3568_get_pd_offset, .get_syscon = rk3568_get_syscon, .parse_bias = rk3568_parse_bias, .resolv_bias_value = rk3568_resolv_bias_value, .get_bias_value = rk3568_get_bias_value, }; static struct ofw_compat_data compat_data[] = { {"rockchip,rk3288-pinctrl", (uintptr_t)&rk3288_conf}, {"rockchip,rk3328-pinctrl", (uintptr_t)&rk3328_conf}, {"rockchip,rk3399-pinctrl", (uintptr_t)&rk3399_conf}, {"rockchip,rk3568-pinctrl", (uintptr_t)&rk3568_conf}, {NULL, 0} }; static int rk_pinctrl_parse_drive(struct rk_pinctrl_softc *sc, phandle_t node, uint32_t bank, uint32_t subbank, uint32_t *drive, uint32_t *offset) { uint32_t value; int i; if (OF_getencprop(node, "drive-strength", &value, sizeof(value)) != 0) return (-1); /* Map to the correct drive value */ for (i = 0; i < sc->conf->npin_drive; i++) { if (sc->conf->pin_drive[i].bank != bank && sc->conf->pin_drive[i].subbank != subbank) continue; if (sc->conf->pin_drive[i].ma == value) { *drive = sc->conf->pin_drive[i].value; return (0); } } return (-1); } static void rk_pinctrl_get_fixup(struct rk_pinctrl_softc *sc, uint32_t bank, uint32_t pin, uint32_t *reg, uint32_t *mask, uint32_t *bit) { int i; for (i = 0; i < sc->conf->npin_fixup; i++) if (sc->conf->pin_fixup[i].bank == bank && sc->conf->pin_fixup[i].pin == pin) { *reg = sc->conf->pin_fixup[i].reg; *mask = sc->conf->pin_fixup[i].mask; *bit = sc->conf->pin_fixup[i].bit; return; } } static int rk_pinctrl_handle_io(struct rk_pinctrl_softc *sc, phandle_t node, uint32_t bank, uint32_t pin) { bool have_cfg, have_direction, have_value; uint32_t direction_value, pin_value; struct rk_pinctrl_gpio *gpio; int i, rv; have_cfg = false; have_direction = false; have_value = false; /* Get (subset of) GPIO pin properties. */ if (OF_hasprop(node, "output-disable")) { have_cfg = true; have_direction = true; direction_value = GPIO_PIN_INPUT; } if (OF_hasprop(node, "output-enable")) { have_cfg = true; have_direction = true; direction_value = GPIO_PIN_OUTPUT; } if (OF_hasprop(node, "output-low")) { have_cfg = true; have_direction = true; direction_value = GPIO_PIN_OUTPUT; have_value = true; pin_value = 0; } if (OF_hasprop(node, "output-high")) { have_cfg = true; have_direction = true; direction_value = GPIO_PIN_OUTPUT; have_value = true; pin_value = 1; } if (!have_cfg) return (0); /* Find gpio */ gpio = NULL; for(i = 0; i < sc->conf->ngpio_bank; i++) { if (bank == sc->conf->gpio_bank[i].bank) { gpio = sc->conf->gpio_bank + i; break; } } if (gpio == NULL) { device_printf(sc->dev, "Cannot find GPIO bank %d\n", bank); return (ENXIO); } if (gpio->gpio_dev == NULL) { device_printf(sc->dev, "No GPIO subdevice found for bank %d\n", bank); return (ENXIO); } rv = 0; if (have_value) { rv = GPIO_PIN_SET(gpio->gpio_dev, pin, pin_value); if (rv != 0) { device_printf(sc->dev, "Cannot set GPIO value: %d\n", rv); return (rv); } } if (have_direction) { rv = GPIO_PIN_SETFLAGS(gpio->gpio_dev, pin, direction_value); if (rv != 0) { device_printf(sc->dev, "Cannot set GPIO direction: %d\n", rv); return (rv); } } return (0); } static void rk_pinctrl_configure_pin(struct rk_pinctrl_softc *sc, uint32_t *pindata) { phandle_t pin_conf; struct syscon *syscon; uint32_t bank, subbank, pin, function; uint32_t bit, mask, reg, drive; int i, rv, bias; bank = pindata[0]; pin = pindata[1]; function = pindata[2]; pin_conf = OF_node_from_xref(pindata[3]); subbank = pin / 8; for (i = 0; i < sc->conf->iomux_nbanks; i++) if (sc->conf->iomux_conf[i].bank == bank && sc->conf->iomux_conf[i].subbank == subbank) break; if (i == sc->conf->iomux_nbanks) { device_printf(sc->dev, "Unknown pin %d in bank %d\n", pin, bank); return; } /* Find syscon */ syscon = sc->conf->get_syscon(sc, bank); /* Setup GPIO properties first */ rv = rk_pinctrl_handle_io(sc, pin_conf, bank, pin); /* Then pin pull-up/down */ bias = sc->conf->parse_bias(pin_conf, bank); if (bias >= 0) { reg = sc->conf->get_pd_offset(sc, bank); reg += bank * 0x10 + ((pin / 8) * 0x4); bit = (pin % 8) * 2; mask = (0x3 << bit); SYSCON_MODIFY_4(syscon, reg, mask, bias << bit | (mask << 16)); } /* Then drive strength */ if (ofw_bus_node_is_compatible(ofw_bus_get_node(sc->dev), "rockchip,rk3568-pinctrl")) { uint32_t value; if (OF_getencprop(pin_conf, "drive-strength", &value, sizeof(value)) == 0) { if (bank) reg = 0x01c0 + (bank * 0x40) + (pin / 2 * 4); else reg = 0x0070 + (pin / 2 * 4); drive = ((1 << (value + 1)) - 1) << (pin % 2); mask = 0x3f << (pin % 2); SYSCON_WRITE_4(syscon, reg, drive | (mask << 16)); } } else { rv = rk_pinctrl_parse_drive(sc, pin_conf, bank, subbank, &drive, ®); if (rv == 0) { bit = (pin % 8) * 2; mask = (0x3 << bit); SYSCON_MODIFY_4(syscon, reg, mask, drive << bit | (mask << 16)); } } /* Finally set the pin function */ reg = sc->conf->iomux_conf[i].offset; switch (sc->conf->iomux_conf[i].nbits) { case 4: if ((pin % 8) >= 4) reg += 0x4; bit = (pin % 4) * 4; mask = (0xF << bit); break; case 3: if ((pin % 8) >= 5) reg += 4; bit = (pin % 8 % 5) * 3; mask = (0x7 << bit); break; case 2: bit = (pin % 8) * 2; mask = (0x3 << bit); break; default: device_printf(sc->dev, "Unknown pin stride width %d in bank %d\n", sc->conf->iomux_conf[i].nbits, bank); return; } rk_pinctrl_get_fixup(sc, bank, pin, ®, &mask, &bit); /* * NOTE: not all syscon registers uses hi-word write mask, thus * register modify method should be used. * XXXX We should not pass write mask to syscon register * without hi-word write mask. */ SYSCON_MODIFY_4(syscon, reg, mask, function << bit | (mask << 16)); /* RK3568 specific pin mux for various functionalities */ if (ofw_bus_node_is_compatible(ofw_bus_get_node(sc->dev), "rockchip,rk3568-pinctrl")) { if (bank == 3 && pin == 9 && function == 3) SYSCON_WRITE_4(sc->grf, GRF_IOFUNC_SEL0, GMAC1_IOMUX_SEL_M0); if (bank == 4 && pin == 7 && function == 3) SYSCON_WRITE_4(sc->grf, GRF_IOFUNC_SEL0, GMAC1_IOMUX_SEL_M1); } } static int rk_pinctrl_configure_pins(device_t dev, phandle_t cfgxref) { struct rk_pinctrl_softc *sc; phandle_t node; uint32_t *pins; int i, npins; sc = device_get_softc(dev); node = OF_node_from_xref(cfgxref); npins = OF_getencprop_alloc_multi(node, "rockchip,pins", sizeof(*pins), (void **)&pins); if (npins <= 0) return (ENOENT); for (i = 0; i != npins; i += 4) rk_pinctrl_configure_pin(sc, pins + i); return (0); } static int rk_pinctrl_is_gpio_locked(struct rk_pinctrl_softc *sc, struct syscon *syscon, int bank, uint32_t pin, bool *is_gpio) { uint32_t subbank, bit, mask, reg; uint32_t pinfunc; int i; RK_PINCTRL_LOCK_ASSERT(sc); subbank = pin / 8; *is_gpio = false; for (i = 0; i < sc->conf->iomux_nbanks; i++) if (sc->conf->iomux_conf[i].bank == bank && sc->conf->iomux_conf[i].subbank == subbank) break; if (i == sc->conf->iomux_nbanks) { device_printf(sc->dev, "Unknown pin %d in bank %d\n", pin, bank); return (EINVAL); } syscon = sc->conf->get_syscon(sc, bank); /* Parse pin function */ reg = sc->conf->iomux_conf[i].offset; switch (sc->conf->iomux_conf[i].nbits) { case 4: if ((pin % 8) >= 4) reg += 0x4; bit = (pin % 4) * 4; mask = (0xF << bit); break; case 3: if ((pin % 8) >= 5) reg += 4; bit = (pin % 8 % 5) * 3; mask = (0x7 << bit); break; case 2: bit = (pin % 8) * 2; mask = (0x3 << bit); break; default: device_printf(sc->dev, "Unknown pin stride width %d in bank %d\n", sc->conf->iomux_conf[i].nbits, bank); return (EINVAL); } rk_pinctrl_get_fixup(sc, bank, pin, ®, &mask, &bit); reg = SYSCON_READ_4(syscon, reg); pinfunc = (reg & mask) >> bit; /* Test if the pin is in gpio mode */ if (pinfunc == 0) *is_gpio = true; return (0); } static int rk_pinctrl_get_bank(struct rk_pinctrl_softc *sc, device_t gpio, int *bank) { int i; for (i = 0; i < sc->conf->ngpio_bank; i++) { if (sc->conf->gpio_bank[i].gpio_dev == gpio) break; } if (i == sc->conf->ngpio_bank) return (EINVAL); *bank = i; return (0); } static int rk_pinctrl_is_gpio(device_t pinctrl, device_t gpio, uint32_t pin, bool *is_gpio) { struct rk_pinctrl_softc *sc; struct syscon *syscon; int bank; int rv; sc = device_get_softc(pinctrl); RK_PINCTRL_LOCK(sc); rv = rk_pinctrl_get_bank(sc, gpio, &bank); if (rv != 0) goto done; syscon = sc->conf->get_syscon(sc, bank); rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, is_gpio); done: RK_PINCTRL_UNLOCK(sc); return (rv); } static int rk_pinctrl_get_flags(device_t pinctrl, device_t gpio, uint32_t pin, uint32_t *flags) { struct rk_pinctrl_softc *sc; struct syscon *syscon; uint32_t reg, bit; uint32_t bias; int bank; int rv = 0; bool is_gpio; sc = device_get_softc(pinctrl); RK_PINCTRL_LOCK(sc); rv = rk_pinctrl_get_bank(sc, gpio, &bank); if (rv != 0) goto done; syscon = sc->conf->get_syscon(sc, bank); rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, &is_gpio); if (rv != 0) goto done; if (!is_gpio) { rv = EINVAL; goto done; } /* Get the pullup/pulldown configuration */ reg = sc->conf->get_pd_offset(sc, bank); reg += bank * 0x10 + ((pin / 8) * 0x4); bit = (pin % 8) * 2; reg = SYSCON_READ_4(syscon, reg); reg = (reg >> bit) & 0x3; bias = sc->conf->resolv_bias_value(bank, reg); *flags = bias; done: RK_PINCTRL_UNLOCK(sc); return (rv); } static int rk_pinctrl_set_flags(device_t pinctrl, device_t gpio, uint32_t pin, uint32_t flags) { struct rk_pinctrl_softc *sc; struct syscon *syscon; uint32_t bit, mask, reg; uint32_t bias; int bank; int rv = 0; bool is_gpio; sc = device_get_softc(pinctrl); RK_PINCTRL_LOCK(sc); rv = rk_pinctrl_get_bank(sc, gpio, &bank); if (rv != 0) goto done; syscon = sc->conf->get_syscon(sc, bank); rv = rk_pinctrl_is_gpio_locked(sc, syscon, bank, pin, &is_gpio); if (rv != 0) goto done; if (!is_gpio) { rv = EINVAL; goto done; } /* Get the pullup/pulldown configuration */ reg = sc->conf->get_pd_offset(sc, bank); reg += bank * 0x10 + ((pin / 8) * 0x4); bit = (pin % 8) * 2; mask = (0x3 << bit); bias = sc->conf->get_bias_value(bank, flags); SYSCON_MODIFY_4(syscon, reg, mask, bias << bit | (mask << 16)); done: RK_PINCTRL_UNLOCK(sc); return (rv); } static int rk_pinctrl_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RockChip Pinctrl controller"); return (BUS_PROBE_DEFAULT); } static int rk_pinctrl_attach(device_t dev) { struct rk_pinctrl_softc *sc; phandle_t node; device_t cdev; int rv, gpio_unit; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (OF_hasprop(node, "rockchip,grf") && syscon_get_by_ofw_property(dev, node, "rockchip,grf", &sc->grf) != 0) { device_printf(dev, "cannot get grf driver handle\n"); return (ENXIO); } /* RK3568,RK3399,RK3288 have banks in PMU. RK3328 doesn't have a PMU. */ if (ofw_bus_node_is_compatible(node, "rockchip,rk3568-pinctrl") || ofw_bus_node_is_compatible(node, "rockchip,rk3399-pinctrl") || ofw_bus_node_is_compatible(node, "rockchip,rk3288-pinctrl")) { if (OF_hasprop(node, "rockchip,pmu") && syscon_get_by_ofw_property(dev, node, "rockchip,pmu", &sc->pmu) != 0) { device_printf(dev, "cannot get pmu driver handle\n"); return (ENXIO); } } mtx_init(&sc->mtx, "rk pinctrl", "pinctrl", MTX_SPIN); sc->conf = (struct rk_pinctrl_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; fdt_pinctrl_register(dev, "rockchip,pins"); simplebus_init(dev, node); - bus_generic_probe(dev); + bus_identify_children(dev); /* Attach child devices */ for (node = OF_child(node), gpio_unit = 0; node > 0; node = OF_peer(node)) { if (!ofw_bus_node_is_compatible(node, "rockchip,gpio-bank")) continue; cdev = simplebus_add_device(dev, node, 0, NULL, -1, NULL); if (cdev == NULL) { device_printf(dev, " Cannot add GPIO subdevice\n"); gpio_unit += 1; continue; } rv = device_probe_and_attach(cdev); if (rv != 0) { device_printf(sc->dev, "Cannot attach GPIO subdevice\n"); gpio_unit += 1; continue; } sc->conf->gpio_bank[gpio_unit].gpio_dev = cdev; gpio_unit += 1; } fdt_pinctrl_configure_tree(dev); return (bus_generic_attach(dev)); } static int rk_pinctrl_detach(device_t dev) { return (EBUSY); } static device_method_t rk_pinctrl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rk_pinctrl_probe), DEVMETHOD(device_attach, rk_pinctrl_attach), DEVMETHOD(device_detach, rk_pinctrl_detach), /* fdt_pinctrl interface */ DEVMETHOD(fdt_pinctrl_configure, rk_pinctrl_configure_pins), DEVMETHOD(fdt_pinctrl_is_gpio, rk_pinctrl_is_gpio), DEVMETHOD(fdt_pinctrl_get_flags, rk_pinctrl_get_flags), DEVMETHOD(fdt_pinctrl_set_flags, rk_pinctrl_set_flags), DEVMETHOD_END }; DEFINE_CLASS_1(rk_pinctrl, rk_pinctrl_driver, rk_pinctrl_methods, sizeof(struct rk_pinctrl_softc), simplebus_driver); EARLY_DRIVER_MODULE(rk_pinctrl, simplebus, rk_pinctrl_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(rk_pinctrl, 1); diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c index 5b47377735f1..1a1f18d825bf 100644 --- a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c +++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c @@ -1,239 +1,239 @@ /*- * Copyright (c) 2012-2015 Oleksandr Tymoshenko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "vchiq_arm.h" #include "vchiq_2835.h" #define VCHIQ_LOCK do { \ mtx_lock(&bcm_vchiq_sc->lock); \ } while(0) #define VCHIQ_UNLOCK do { \ mtx_unlock(&bcm_vchiq_sc->lock); \ } while(0) #ifdef DEBUG #define dprintf(fmt, args...) printf(fmt, ##args) #else #define dprintf(fmt, args...) #endif struct bcm_vchiq_softc { struct mtx lock; struct resource * mem_res; struct resource * irq_res; void* intr_hl; bus_space_tag_t bst; bus_space_handle_t bsh; int regs_offset; }; static struct bcm_vchiq_softc *bcm_vchiq_sc = NULL; #define BSD_DTB 1 #define UPSTREAM_DTB 2 static struct ofw_compat_data compat_data[] = { {"broadcom,bcm2835-vchiq", BSD_DTB}, {"brcm,bcm2835-vchiq", UPSTREAM_DTB}, {"brcm,bcm2711-vchiq", UPSTREAM_DTB}, {NULL, 0} }; #define vchiq_read_4(reg) \ bus_space_read_4(bcm_vchiq_sc->bst, bcm_vchiq_sc->bsh, (reg) + \ bcm_vchiq_sc->regs_offset) #define vchiq_write_4(reg, val) \ bus_space_write_4(bcm_vchiq_sc->bst, bcm_vchiq_sc->bsh, (reg) + \ bcm_vchiq_sc->regs_offset, val) /* * Extern functions */ void vchiq_exit(void); int vchiq_init(void); extern VCHIQ_STATE_T g_state; extern int g_cache_line_size; static void bcm_vchiq_intr(void *arg) { VCHIQ_STATE_T *state = &g_state; unsigned int status; /* Read (and clear) the doorbell */ status = vchiq_read_4(0x40); if (status & 0x4) { /* Was the doorbell rung? */ remote_event_pollall(state); } } void remote_event_signal(REMOTE_EVENT_T *event) { event->fired = 1; /* The test on the next line also ensures the write on the previous line has completed */ if (event->armed) { /* trigger vc interrupt */ dsb(); vchiq_write_4(0x48, 0); } } static int bcm_vchiq_probe(device_t dev) { if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "BCM2835 VCHIQ"); return (BUS_PROBE_DEFAULT); } static int bcm_vchiq_attach(device_t dev) { struct bcm_vchiq_softc *sc = device_get_softc(dev); phandle_t node; pcell_t cell; int rid = 0; if (bcm_vchiq_sc != NULL) return (EINVAL); sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->mem_res); sc->bsh = rman_get_bushandle(sc->mem_res); rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); return (ENXIO); } if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == UPSTREAM_DTB) sc->regs_offset = -0x40; node = ofw_bus_get_node(dev); if ((OF_getencprop(node, "cache-line-size", &cell, sizeof(cell))) > 0) g_cache_line_size = cell; vchiq_core_initialize(); /* Setup and enable the timer */ if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_vchiq_intr, sc, &sc->intr_hl) != 0) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->irq_res); device_printf(dev, "Unable to setup the clock irq handler.\n"); return (ENXIO); } mtx_init(&sc->lock, "vchiq", 0, MTX_DEF); bcm_vchiq_sc = sc; vchiq_init(); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int bcm_vchiq_detach(device_t dev) { struct bcm_vchiq_softc *sc = device_get_softc(dev); vchiq_exit(); if (sc->intr_hl) bus_teardown_intr(dev, sc->irq_res, sc->intr_hl); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); mtx_destroy(&sc->lock); return (0); } static device_method_t bcm_vchiq_methods[] = { DEVMETHOD(device_probe, bcm_vchiq_probe), DEVMETHOD(device_attach, bcm_vchiq_attach), DEVMETHOD(device_detach, bcm_vchiq_detach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), { 0, 0 } }; static driver_t bcm_vchiq_driver = { "vchiq", bcm_vchiq_methods, sizeof(struct bcm_vchiq_softc), }; DRIVER_MODULE(vchiq, simplebus, bcm_vchiq_driver, 0, 0); MODULE_VERSION(vchiq, 1); diff --git a/sys/dev/acpi_support/acpi_wmi.c b/sys/dev/acpi_support/acpi_wmi.c index 6601db4317cb..d4b90ad9a508 100644 --- a/sys/dev/acpi_support/acpi_wmi.c +++ b/sys/dev/acpi_support/acpi_wmi.c @@ -1,1045 +1,1045 @@ /*- * Copyright (c) 2009 Michael Gmelin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver for acpi-wmi mapping, provides an interface for vendor specific * implementations (e.g. HP and Acer laptops). * Inspired by the ACPI-WMI mapping driver (c) 2008-2008 Carlos Corbacho which * implements this functionality for Linux. * * WMI and ACPI: http://www.microsoft.com/whdc/system/pnppwr/wmi/wmi-acpi.mspx * acpi-wmi for Linux: http://www.kernel.org */ #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_wmi_if.h" static MALLOC_DEFINE(M_ACPIWMI, "acpiwmi", "ACPI-WMI mapping"); #define _COMPONENT ACPI_OEM ACPI_MODULE_NAME("ACPI_WMI"); #define ACPI_WMI_REGFLAG_EXPENSIVE 0x1 /* GUID flag: Expensive operation */ #define ACPI_WMI_REGFLAG_METHOD 0x2 /* GUID flag: Method call */ #define ACPI_WMI_REGFLAG_STRING 0x4 /* GUID flag: String */ #define ACPI_WMI_REGFLAG_EVENT 0x8 /* GUID flag: Event */ #define ACPI_WMI_BMOF_UUID "05901221-D566-11D1-B2F0-00A0C9062910" /* * acpi_wmi driver private structure */ struct acpi_wmi_softc { device_t wmi_dev; /* wmi device id */ ACPI_HANDLE wmi_handle; /* handle of the PNP0C14 node */ device_t ec_dev; /* acpi_ec0 */ struct cdev *wmistat_dev_t; /* wmistat device handle */ struct sbuf wmistat_sbuf; /* sbuf for /dev/wmistat output */ pid_t wmistat_open_pid; /* pid operating on /dev/wmistat */ int wmistat_bufptr; /* /dev/wmistat ptr to buffer position */ char *mofbuf; TAILQ_HEAD(wmi_info_list_head, wmi_info) wmi_info_list; }; /* * Struct that holds information about * about a single GUID entry in _WDG */ struct guid_info { char guid[16]; /* 16 byte non human readable GUID */ char oid[2]; /* object id or event notify id (first byte) */ UINT8 max_instance; /* highest instance known for this GUID */ UINT8 flags; /* ACPI_WMI_REGFLAG_%s */ }; /* WExx event generation state (on/off) */ enum event_generation_state { EVENT_GENERATION_ON = 1, EVENT_GENERATION_OFF = 0 }; /* * Information about one entry in _WDG. * List of those is used to lookup information by GUID. */ struct wmi_info { TAILQ_ENTRY(wmi_info) wmi_list; struct guid_info ginfo; /* information on guid */ ACPI_NOTIFY_HANDLER event_handler;/* client provided event handler */ void *event_handler_user_data; /* ev handler cookie */ }; ACPI_SERIAL_DECL(acpi_wmi, "ACPI-WMI Mapping"); /* public interface - declaration */ /* standard device interface*/ static int acpi_wmi_probe(device_t dev); static int acpi_wmi_attach(device_t dev); static int acpi_wmi_detach(device_t dev); /* see acpi_wmi_if.m */ static int acpi_wmi_provides_guid_string_method(device_t dev, const char *guid_string); static ACPI_STATUS acpi_wmi_evaluate_call_method(device_t dev, const char *guid_string, UINT8 instance, UINT32 method_id, const ACPI_BUFFER *in, ACPI_BUFFER *out); static ACPI_STATUS acpi_wmi_install_event_handler_method(device_t dev, const char *guid_string, ACPI_NOTIFY_HANDLER handler, void *data); static ACPI_STATUS acpi_wmi_remove_event_handler_method(device_t dev, const char *guid_string); static ACPI_STATUS acpi_wmi_get_event_data_method(device_t dev, UINT32 event_id, ACPI_BUFFER *out); static ACPI_STATUS acpi_wmi_get_block_method(device_t dev, const char *guid_string, UINT8 instance, ACPI_BUFFER *out); static ACPI_STATUS acpi_wmi_set_block_method(device_t dev, const char *guid_string, UINT8 instance, const ACPI_BUFFER *in); /* private interface - declaration */ /* callbacks */ static void acpi_wmi_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context); static ACPI_STATUS acpi_wmi_ec_handler(UINT32 function, ACPI_PHYSICAL_ADDRESS address, UINT32 width, UINT64 *value, void *context, void *region_context); /* helpers */ static ACPI_STATUS acpi_wmi_read_wdg_blocks(struct acpi_wmi_softc *sc, ACPI_HANDLE h); static ACPI_STATUS acpi_wmi_toggle_we_event_generation(device_t dev, struct wmi_info *winfo, enum event_generation_state state); static int acpi_wmi_guid_string_to_guid(const UINT8 *guid_string, UINT8 *guid); static struct wmi_info* acpi_wmi_lookup_wmi_info_by_guid_string(struct acpi_wmi_softc *sc, const char *guid_string); static d_open_t acpi_wmi_wmistat_open; static d_close_t acpi_wmi_wmistat_close; static d_read_t acpi_wmi_wmistat_read; /* handler /dev/wmistat device */ static struct cdevsw wmistat_cdevsw = { .d_version = D_VERSION, .d_open = acpi_wmi_wmistat_open, .d_close = acpi_wmi_wmistat_close, .d_read = acpi_wmi_wmistat_read, .d_name = "wmistat", }; static device_method_t acpi_wmi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_wmi_probe), DEVMETHOD(device_attach, acpi_wmi_attach), DEVMETHOD(device_detach, acpi_wmi_detach), /* bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), /* acpi_wmi interface */ DEVMETHOD(acpi_wmi_provides_guid_string, acpi_wmi_provides_guid_string_method), DEVMETHOD(acpi_wmi_evaluate_call, acpi_wmi_evaluate_call_method), DEVMETHOD(acpi_wmi_install_event_handler, acpi_wmi_install_event_handler_method), DEVMETHOD(acpi_wmi_remove_event_handler, acpi_wmi_remove_event_handler_method), DEVMETHOD(acpi_wmi_get_event_data, acpi_wmi_get_event_data_method), DEVMETHOD(acpi_wmi_get_block, acpi_wmi_get_block_method), DEVMETHOD(acpi_wmi_set_block, acpi_wmi_set_block_method), DEVMETHOD_END }; static driver_t acpi_wmi_driver = { "acpi_wmi", acpi_wmi_methods, sizeof(struct acpi_wmi_softc), }; DRIVER_MODULE(acpi_wmi, acpi, acpi_wmi_driver, 0, 0); MODULE_VERSION(acpi_wmi, 1); MODULE_DEPEND(acpi_wmi, acpi, 1, 1, 1); static char *wmi_ids[] = {"PNP0C14", NULL}; ACPI_PNP_INFO(wmi_ids); /* * Probe for the PNP0C14 ACPI node */ static int acpi_wmi_probe(device_t dev) { int rv; if (acpi_disabled("wmi")) return (ENXIO); rv = ACPI_ID_PROBE(device_get_parent(dev), dev, wmi_ids, NULL); if (rv <= 0) device_set_desc(dev, "ACPI-WMI mapping"); return (rv); } /* * Attach the device by: * - Looking for the first ACPI EC device * - Install the notify handler * - Install the EC address space handler * - Look for the _WDG node and read GUID information blocks */ static int acpi_wmi_attach(device_t dev) { struct acpi_wmi_softc *sc; int ret; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); ret = ENXIO; ACPI_SERIAL_BEGIN(acpi_wmi); sc->wmi_dev = dev; sc->wmi_handle = acpi_get_handle(dev); TAILQ_INIT(&sc->wmi_info_list); /* XXX Only works with one EC, but nearly all systems only have one. */ if ((sc->ec_dev = devclass_get_device(devclass_find("acpi_ec"), 0)) == NULL) device_printf(dev, "cannot find EC device\n"); if (ACPI_FAILURE((status = AcpiInstallNotifyHandler(sc->wmi_handle, ACPI_DEVICE_NOTIFY, acpi_wmi_notify_handler, sc)))) device_printf(sc->wmi_dev, "couldn't install notify handler - %s\n", AcpiFormatException(status)); else if (ACPI_FAILURE((status = AcpiInstallAddressSpaceHandler( sc->wmi_handle, ACPI_ADR_SPACE_EC, acpi_wmi_ec_handler, NULL, sc)))) { device_printf(sc->wmi_dev, "couldn't install EC handler - %s\n", AcpiFormatException(status)); AcpiRemoveNotifyHandler(sc->wmi_handle, ACPI_DEVICE_NOTIFY, acpi_wmi_notify_handler); } else if (ACPI_FAILURE((status = acpi_wmi_read_wdg_blocks(sc, sc->wmi_handle)))) { device_printf(sc->wmi_dev, "couldn't parse _WDG - %s\n", AcpiFormatException(status)); AcpiRemoveNotifyHandler(sc->wmi_handle, ACPI_DEVICE_NOTIFY, acpi_wmi_notify_handler); AcpiRemoveAddressSpaceHandler(sc->wmi_handle, ACPI_ADR_SPACE_EC, acpi_wmi_ec_handler); } else { sc->wmistat_dev_t = make_dev(&wmistat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644, "wmistat%d", device_get_unit(dev)); sc->wmistat_dev_t->si_drv1 = sc; sc->wmistat_open_pid = 0; sc->wmistat_bufptr = -1; ret = 0; } ACPI_SERIAL_END(acpi_wmi); if (acpi_wmi_provides_guid_string_method(dev, ACPI_WMI_BMOF_UUID)) { ACPI_BUFFER out = { ACPI_ALLOCATE_BUFFER, NULL }; ACPI_OBJECT *obj; device_printf(dev, "Embedded MOF found\n"); status = acpi_wmi_get_block_method(dev, ACPI_WMI_BMOF_UUID, 0, &out); if (ACPI_SUCCESS(status)) { obj = out.Pointer; if (obj && obj->Type == ACPI_TYPE_BUFFER) { SYSCTL_ADD_OPAQUE(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN( device_get_sysctl_tree(dev)), OID_AUTO, "bmof", CTLFLAG_RD | CTLFLAG_MPSAFE, obj->Buffer.Pointer, obj->Buffer.Length, "A", "MOF Blob"); } } sc->mofbuf = out.Pointer; } if (ret == 0) { - bus_generic_probe(dev); + bus_identify_children(dev); ret = bus_generic_attach(dev); } return (ret); } /* * Detach the driver by: * - Removing notification handler * - Removing address space handler * - Turning off event generation for all WExx event activated by * child drivers */ static int acpi_wmi_detach(device_t dev) { struct wmi_info *winfo, *tmp; struct acpi_wmi_softc *sc; int ret; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); ACPI_SERIAL_BEGIN(acpi_wmi); if (sc->wmistat_open_pid != 0) { ret = EBUSY; } else { AcpiRemoveNotifyHandler(sc->wmi_handle, ACPI_DEVICE_NOTIFY, acpi_wmi_notify_handler); AcpiRemoveAddressSpaceHandler(sc->wmi_handle, ACPI_ADR_SPACE_EC, acpi_wmi_ec_handler); TAILQ_FOREACH_SAFE(winfo, &sc->wmi_info_list, wmi_list, tmp) { if (winfo->event_handler) acpi_wmi_toggle_we_event_generation(dev, winfo, EVENT_GENERATION_OFF); TAILQ_REMOVE(&sc->wmi_info_list, winfo, wmi_list); free(winfo, M_ACPIWMI); } if (sc->wmistat_bufptr != -1) { sbuf_delete(&sc->wmistat_sbuf); sc->wmistat_bufptr = -1; } sc->wmistat_open_pid = 0; destroy_dev(sc->wmistat_dev_t); ret = 0; AcpiOsFree(sc->mofbuf); } ACPI_SERIAL_END(acpi_wmi); return (ret); } /* * Check if the given GUID string (human readable format * AABBCCDD-EEFF-GGHH-IIJJ-KKLLMMNNOOPP) * exists within _WDG */ static int acpi_wmi_provides_guid_string_method(device_t dev, const char *guid_string) { struct acpi_wmi_softc *sc; struct wmi_info *winfo; int ret; sc = device_get_softc(dev); ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_BEGIN(acpi_wmi); winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string); ret = (winfo == NULL)?0:winfo->ginfo.max_instance+1; ACPI_SERIAL_END(acpi_wmi); return (ret); } /* * Call a method "method_id" on the given GUID block * write result into user provided output buffer */ static ACPI_STATUS acpi_wmi_evaluate_call_method(device_t dev, const char *guid_string, UINT8 instance, UINT32 method_id, const ACPI_BUFFER *in, ACPI_BUFFER *out) { ACPI_OBJECT params[3]; ACPI_OBJECT_LIST input; char method[5] = "WMxx"; struct wmi_info *winfo; struct acpi_wmi_softc *sc; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); ACPI_SERIAL_BEGIN(acpi_wmi); if ((winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string)) == NULL) status = AE_NOT_FOUND; else if (!(winfo->ginfo.flags & ACPI_WMI_REGFLAG_METHOD)) status = AE_BAD_DATA; else if (instance > winfo->ginfo.max_instance) status = AE_BAD_PARAMETER; else { params[0].Type = ACPI_TYPE_INTEGER; params[0].Integer.Value = instance; params[1].Type = ACPI_TYPE_INTEGER; params[1].Integer.Value = method_id; input.Pointer = params; input.Count = 2; if (in) { params[2].Type = (winfo->ginfo.flags & ACPI_WMI_REGFLAG_STRING) ?ACPI_TYPE_STRING:ACPI_TYPE_BUFFER; params[2].Buffer.Length = in->Length; params[2].Buffer.Pointer = in->Pointer; input.Count = 3; } method[2] = winfo->ginfo.oid[0]; method[3] = winfo->ginfo.oid[1]; status = AcpiEvaluateObject(sc->wmi_handle, method, &input, out); } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Install a user provided event_handler on the given GUID * provided *data will be passed on callback * If there is already an existing event handler registered it will be silently * discarded */ static ACPI_STATUS acpi_wmi_install_event_handler_method(device_t dev, const char *guid_string, ACPI_NOTIFY_HANDLER event_handler, void *data) { struct acpi_wmi_softc *sc = device_get_softc(dev); struct wmi_info *winfo; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); status = AE_OK; ACPI_SERIAL_BEGIN(acpi_wmi); if (guid_string == NULL || event_handler == NULL) status = AE_BAD_PARAMETER; else if ((winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string)) == NULL) status = AE_NOT_EXIST; else if (winfo->event_handler != NULL || (status = acpi_wmi_toggle_we_event_generation(dev, winfo, EVENT_GENERATION_ON)) == AE_OK) { winfo->event_handler = event_handler; winfo->event_handler_user_data = data; } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Remove a previously installed event handler from the given GUID * If there was none installed, this call is silently discarded and * reported as AE_OK */ static ACPI_STATUS acpi_wmi_remove_event_handler_method(device_t dev, const char *guid_string) { struct acpi_wmi_softc *sc = device_get_softc(dev); struct wmi_info *winfo; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); status = AE_OK; ACPI_SERIAL_BEGIN(acpi_wmi); if (guid_string && (winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string)) != NULL && winfo->event_handler) { status = acpi_wmi_toggle_we_event_generation(dev, winfo, EVENT_GENERATION_OFF); winfo->event_handler = NULL; winfo->event_handler_user_data = NULL; } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Get details on an event received through a callback registered * through ACPI_WMI_REMOVE_EVENT_HANDLER into a user provided output buffer. * (event_id equals "notify" passed in the callback) */ static ACPI_STATUS acpi_wmi_get_event_data_method(device_t dev, UINT32 event_id, ACPI_BUFFER *out) { ACPI_OBJECT_LIST input; ACPI_OBJECT params[1]; struct acpi_wmi_softc *sc; struct wmi_info *winfo; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); status = AE_NOT_FOUND; ACPI_SERIAL_BEGIN(acpi_wmi); params[0].Type = ACPI_TYPE_INTEGER; params[0].Integer.Value = event_id; input.Pointer = params; input.Count = 1; TAILQ_FOREACH(winfo, &sc->wmi_info_list, wmi_list) { if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EVENT) && ((UINT8) winfo->ginfo.oid[0] == event_id)) { status = AcpiEvaluateObject(sc->wmi_handle, "_WED", &input, out); break; } } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Read a block of data from the given GUID (using WQxx (query)) * Will be returned in a user provided buffer (out). * If the method is marked as expensive (ACPI_WMI_REGFLAG_EXPENSIVE) * we will first call the WCxx control method to lock the node to * lock the node for data collection and release it afterwards. * (Failed WCxx calls are ignored to "support" broken implementations) */ static ACPI_STATUS acpi_wmi_get_block_method(device_t dev, const char *guid_string, UINT8 instance, ACPI_BUFFER *out) { char wc_method[5] = "WCxx"; char wq_method[5] = "WQxx"; ACPI_OBJECT_LIST wc_input; ACPI_OBJECT_LIST wq_input; ACPI_OBJECT wc_params[1]; ACPI_OBJECT wq_params[1]; ACPI_HANDLE wc_handle; struct acpi_wmi_softc *sc; struct wmi_info *winfo; ACPI_STATUS status; ACPI_STATUS wc_status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); wc_status = AE_ERROR; ACPI_SERIAL_BEGIN(acpi_wmi); if (guid_string == NULL || out == NULL) status = AE_BAD_PARAMETER; else if ((winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string)) == NULL) status = AE_ERROR; else if (instance > winfo->ginfo.max_instance) status = AE_BAD_PARAMETER; else if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EVENT) || (winfo->ginfo.flags & ACPI_WMI_REGFLAG_METHOD)) status = AE_ERROR; else { wq_params[0].Type = ACPI_TYPE_INTEGER; wq_params[0].Integer.Value = instance; wq_input.Pointer = wq_params; wq_input.Count = 1; if (winfo->ginfo.flags & ACPI_WMI_REGFLAG_EXPENSIVE) { wc_params[0].Type = ACPI_TYPE_INTEGER; wc_params[0].Integer.Value = 1; wc_input.Pointer = wc_params; wc_input.Count = 1; wc_method[2] = winfo->ginfo.oid[0]; wc_method[3] = winfo->ginfo.oid[1]; wc_status = AcpiGetHandle(sc->wmi_handle, wc_method, &wc_handle); if (ACPI_SUCCESS(wc_status)) wc_status = AcpiEvaluateObject(wc_handle, wc_method, &wc_input, NULL); } wq_method[2] = winfo->ginfo.oid[0]; wq_method[3] = winfo->ginfo.oid[1]; status = AcpiEvaluateObject(sc->wmi_handle, wq_method, &wq_input, out); if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { wc_params[0].Integer.Value = 0; status = AcpiEvaluateObject(wc_handle, wc_method, &wc_input, NULL); /* XXX this might be the wrong status to return? */ } } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Write a block of data to the given GUID (using WSxx) */ static ACPI_STATUS acpi_wmi_set_block_method(device_t dev, const char *guid_string, UINT8 instance, const ACPI_BUFFER *in) { char method[5] = "WSxx"; ACPI_OBJECT_LIST input; ACPI_OBJECT params[2]; struct wmi_info *winfo; struct acpi_wmi_softc *sc; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); ACPI_SERIAL_BEGIN(acpi_wmi); if (guid_string == NULL || in == NULL) status = AE_BAD_DATA; else if ((winfo = acpi_wmi_lookup_wmi_info_by_guid_string(sc, guid_string)) == NULL) status = AE_ERROR; else if (instance > winfo->ginfo.max_instance) status = AE_BAD_PARAMETER; else if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EVENT) || (winfo->ginfo.flags & ACPI_WMI_REGFLAG_METHOD)) status = AE_ERROR; else { params[0].Type = ACPI_TYPE_INTEGER; params[0].Integer.Value = instance; input.Pointer = params; input.Count = 2; params[1].Type = (winfo->ginfo.flags & ACPI_WMI_REGFLAG_STRING) ?ACPI_TYPE_STRING:ACPI_TYPE_BUFFER; params[1].Buffer.Length = in->Length; params[1].Buffer.Pointer = in->Pointer; method[2] = winfo->ginfo.oid[0]; method[3] = winfo->ginfo.oid[1]; status = AcpiEvaluateObject(sc->wmi_handle, method, &input, NULL); } ACPI_SERIAL_END(acpi_wmi); return (status); } /* * Handle events received and dispatch them to * stakeholders that registered through ACPI_WMI_INSTALL_EVENT_HANDLER */ static void acpi_wmi_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_wmi_softc *sc = context; ACPI_NOTIFY_HANDLER handler; void *handler_data; struct wmi_info *winfo; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify); handler = NULL; handler_data = NULL; ACPI_SERIAL_BEGIN(acpi_wmi); TAILQ_FOREACH(winfo, &sc->wmi_info_list, wmi_list) { if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EVENT) && ((UINT8) winfo->ginfo.oid[0] == notify)) { if (winfo->event_handler) { handler = winfo->event_handler; handler_data = winfo->event_handler_user_data; break; } } } ACPI_SERIAL_END(acpi_wmi); if (handler) { handler(h, notify, handler_data); } } /* * Handle EC address space notifications reveived on the WDG node * (this mimics EcAddressSpaceHandler in acpi_ec.c) */ static ACPI_STATUS acpi_wmi_ec_handler(UINT32 function, ACPI_PHYSICAL_ADDRESS address, UINT32 width, UINT64 *value, void *context, void *region_context) { struct acpi_wmi_softc *sc; int i; UINT64 ec_data; UINT8 ec_addr; ACPI_STATUS status; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)address); sc = (struct acpi_wmi_softc *)context; if (width % 8 != 0 || value == NULL || context == NULL) return (AE_BAD_PARAMETER); if (address + (width / 8) - 1 > 0xFF) return (AE_BAD_ADDRESS); if (sc->ec_dev == NULL) return (AE_NOT_FOUND); if (function == ACPI_READ) *value = 0; ec_addr = address; status = AE_ERROR; for (i = 0; i < width; i += 8, ++ec_addr) { switch (function) { case ACPI_READ: status = ACPI_EC_READ(sc->ec_dev, ec_addr, &ec_data, 1); if (ACPI_SUCCESS(status)) *value |= ((UINT64)ec_data) << i; break; case ACPI_WRITE: ec_data = (UINT8)((*value) >> i); status = ACPI_EC_WRITE(sc->ec_dev, ec_addr, ec_data, 1); break; default: device_printf(sc->wmi_dev, "invalid acpi_wmi_ec_handler function %d\n", function); status = AE_BAD_PARAMETER; break; } if (ACPI_FAILURE(status)) break; } return (status); } /* * Read GUID blocks from the _WDG node * into wmi_info_list. */ static ACPI_STATUS acpi_wmi_read_wdg_blocks(struct acpi_wmi_softc *sc, ACPI_HANDLE h) { ACPI_BUFFER out = {ACPI_ALLOCATE_BUFFER, NULL}; struct guid_info *ginfo; ACPI_OBJECT *obj; struct wmi_info *winfo; UINT32 i; UINT32 wdg_block_count; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(acpi_wmi); if (ACPI_FAILURE(status = AcpiEvaluateObject(h, "_WDG", NULL, &out))) return (status); obj = (ACPI_OBJECT*) out.Pointer; wdg_block_count = obj->Buffer.Length / sizeof(struct guid_info); if ((ginfo = malloc(obj->Buffer.Length, M_ACPIWMI, M_NOWAIT)) == NULL) { AcpiOsFree(out.Pointer); return (AE_NO_MEMORY); } memcpy(ginfo, obj->Buffer.Pointer, obj->Buffer.Length); for (i = 0; i < wdg_block_count; ++i) { if ((winfo = malloc(sizeof(struct wmi_info), M_ACPIWMI, M_NOWAIT | M_ZERO)) == NULL) { AcpiOsFree(out.Pointer); free(ginfo, M_ACPIWMI); return (AE_NO_MEMORY); } winfo->ginfo = ginfo[i]; TAILQ_INSERT_TAIL(&sc->wmi_info_list, winfo, wmi_list); } AcpiOsFree(out.Pointer); free(ginfo, M_ACPIWMI); return (status); } /* * Toggle event generation in for the given GUID (passed by winfo) * Turn on to get notified (through acpi_wmi_notify_handler) if events happen * on the given GUID. */ static ACPI_STATUS acpi_wmi_toggle_we_event_generation(device_t dev, struct wmi_info *winfo, enum event_generation_state state) { char method[5] = "WExx"; ACPI_OBJECT_LIST input; ACPI_OBJECT params[1]; struct acpi_wmi_softc *sc; ACPI_STATUS status; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); ACPI_SERIAL_ASSERT(acpi_wmi); params[0].Type = ACPI_TYPE_INTEGER; params[0].Integer.Value = state==EVENT_GENERATION_ON?1:0; input.Pointer = params; input.Count = 1; UINT8 hi = ((UINT8) winfo->ginfo.oid[0]) >> 4; UINT8 lo = ((UINT8) winfo->ginfo.oid[0]) & 0xf; method[2] = (hi > 9 ? hi + 55: hi + 48); method[3] = (lo > 9 ? lo + 55: lo + 48); status = AcpiEvaluateObject(sc->wmi_handle, method, &input, NULL); if (status == AE_NOT_FOUND) status = AE_OK; return (status); } /* * Convert given two digit hex string (hexin) to an UINT8 referenced * by byteout. * Return != 0 if the was a problem (invalid input) */ static __inline int acpi_wmi_hex_to_int(const UINT8 *hexin, UINT8 *byteout) { unsigned int hi; unsigned int lo; hi = hexin[0]; lo = hexin[1]; if ('0' <= hi && hi <= '9') hi -= '0'; else if ('A' <= hi && hi <= 'F') hi -= ('A' - 10); else if ('a' <= hi && hi <= 'f') hi -= ('a' - 10); else return (1); if ('0' <= lo && lo <= '9') lo -= '0'; else if ('A' <= lo && lo <= 'F') lo -= ('A' - 10); else if ('a' <= lo && lo <= 'f') lo -= ('a' - 10); else return (1); *byteout = (hi << 4) + lo; return (0); } /* * Convert a human readable 36 character GUID into a 16byte * machine readable one. * The basic algorithm looks as follows: * Input: AABBCCDD-EEFF-GGHH-IIJJ-KKLLMMNNOOPP * Output: DCBAFEHGIJKLMNOP * (AA BB CC etc. represent two digit hex numbers == bytes) * Return != 0 if passed guid string is invalid */ static int acpi_wmi_guid_string_to_guid(const UINT8 *guid_string, UINT8 *guid) { static const int mapping[20] = {3, 2, 1, 0, -1, 5, 4, -1, 7, 6, -1, 8, 9, -1, 10, 11, 12, 13, 14, 15}; int i; for (i = 0; i < 20; ++i, ++guid_string) { if (mapping[i] >= 0) { if (acpi_wmi_hex_to_int(guid_string, &guid[mapping[i]])) return (-1); ++guid_string; } else if (*guid_string != '-') return (-1); } return (0); } /* * Lookup a wmi_info structure in wmi_list based on a * human readable GUID * Return NULL if the GUID is unknown in the _WDG */ static struct wmi_info* acpi_wmi_lookup_wmi_info_by_guid_string(struct acpi_wmi_softc *sc, const char *guid_string) { char guid[16]; struct wmi_info *winfo; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); ACPI_SERIAL_ASSERT(acpi_wmi); if (!acpi_wmi_guid_string_to_guid(guid_string, guid)) { TAILQ_FOREACH(winfo, &sc->wmi_info_list, wmi_list) { if (!memcmp(winfo->ginfo.guid, guid, 16)) { return (winfo); } } } return (NULL); } /* * open wmistat device */ static int acpi_wmi_wmistat_open(struct cdev* dev, int flags, int mode, struct thread *td) { struct acpi_wmi_softc *sc; int ret; if (dev == NULL || dev->si_drv1 == NULL) return (EBADF); sc = dev->si_drv1; ACPI_SERIAL_BEGIN(acpi_wmi); if (sc->wmistat_open_pid != 0) { ret = EBUSY; } else { if (sbuf_new(&sc->wmistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) { ret = ENXIO; } else { sc->wmistat_open_pid = td->td_proc->p_pid; sc->wmistat_bufptr = 0; ret = 0; } } ACPI_SERIAL_END(acpi_wmi); return (ret); } /* * close wmistat device */ static int acpi_wmi_wmistat_close(struct cdev* dev, int flags, int mode, struct thread *td) { struct acpi_wmi_softc *sc; int ret; if (dev == NULL || dev->si_drv1 == NULL) return (EBADF); sc = dev->si_drv1; ACPI_SERIAL_BEGIN(acpi_wmi); if (sc->wmistat_open_pid == 0) { ret = EBADF; } else { if (sc->wmistat_bufptr != -1) { sbuf_delete(&sc->wmistat_sbuf); sc->wmistat_bufptr = -1; } sc->wmistat_open_pid = 0; ret = 0; } ACPI_SERIAL_END(acpi_wmi); return (ret); } /* * Read from wmistat guid information */ static int acpi_wmi_wmistat_read(struct cdev *dev, struct uio *buf, int flag) { struct acpi_wmi_softc *sc; struct wmi_info *winfo; int l; int ret; UINT8* guid; if (dev == NULL || dev->si_drv1 == NULL) return (EBADF); sc = dev->si_drv1; ACPI_SERIAL_BEGIN(acpi_wmi); if (sc->wmistat_bufptr == -1) { ret = EBADF; } else { if (!sbuf_done(&sc->wmistat_sbuf)) { sbuf_printf(&sc->wmistat_sbuf, "GUID " " INST EXPE METH STR " "EVENT OID\n"); TAILQ_FOREACH(winfo, &sc->wmi_info_list, wmi_list) { guid = (UINT8*)winfo->ginfo.guid; sbuf_printf(&sc->wmistat_sbuf, "{%02X%02X%02X%02X-%02X%02X-" "%02X%02X-%02X%02X-%02X%02X" "%02X%02X%02X%02X} %3d %-5s", guid[3], guid[2], guid[1], guid[0], guid[5], guid[4], guid[7], guid[6], guid[8], guid[9], guid[10], guid[11], guid[12], guid[13], guid[14], guid[15], winfo->ginfo.max_instance, (winfo->ginfo.flags& ACPI_WMI_REGFLAG_EXPENSIVE)? "YES":"NO" ); if (winfo->ginfo.flags&ACPI_WMI_REGFLAG_METHOD) sbuf_printf(&sc->wmistat_sbuf, "WM%c%c ", winfo->ginfo.oid[0], winfo->ginfo.oid[1]); else sbuf_printf(&sc->wmistat_sbuf, "NO "); sbuf_printf(&sc->wmistat_sbuf, "%-4s", (winfo->ginfo.flags& ACPI_WMI_REGFLAG_STRING)?"YES":"NO" ); if (winfo->ginfo.flags&ACPI_WMI_REGFLAG_EVENT) sbuf_printf(&sc->wmistat_sbuf, "0x%02X%s -\n", (UINT8)winfo->ginfo.oid[0], winfo->event_handler==NULL? " ":"+"); else sbuf_printf(&sc->wmistat_sbuf, "NO %c%c\n", winfo->ginfo.oid[0], winfo->ginfo.oid[1]); } sbuf_finish(&sc->wmistat_sbuf); } if (sbuf_len(&sc->wmistat_sbuf) <= 0) { sbuf_delete(&sc->wmistat_sbuf); sc->wmistat_bufptr = -1; sc->wmistat_open_pid = 0; ret = ENOMEM; } else { l = min(buf->uio_resid, sbuf_len(&sc->wmistat_sbuf) - sc->wmistat_bufptr); ret = (l > 0)?uiomove(sbuf_data(&sc->wmistat_sbuf) + sc->wmistat_bufptr, l, buf) : 0; sc->wmistat_bufptr += l; } } ACPI_SERIAL_END(acpi_wmi); return (ret); } diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index a7ff4e302bed..a16719dd0941 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4679 +1,4679 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static device_probe_t acpi_probe; static device_attach_t acpi_attach; static device_suspend_t acpi_suspend; static device_resume_t acpi_resume; static device_shutdown_t acpi_shutdown; static bus_add_child_t acpi_add_child; static bus_print_child_t acpi_print_child; static bus_probe_nomatch_t acpi_probe_nomatch; static bus_driver_added_t acpi_driver_added; static bus_child_deleted_t acpi_child_deleted; static bus_read_ivar_t acpi_read_ivar; static bus_write_ivar_t acpi_write_ivar; static bus_get_resource_list_t acpi_get_rlist; static bus_get_rman_t acpi_get_rman; static bus_set_resource_t acpi_set_resource; static bus_alloc_resource_t acpi_alloc_resource; static bus_adjust_resource_t acpi_adjust_resource; static bus_release_resource_t acpi_release_resource; static bus_delete_resource_t acpi_delete_resource; static bus_activate_resource_t acpi_activate_resource; static bus_deactivate_resource_t acpi_deactivate_resource; static bus_map_resource_t acpi_map_resource; static bus_unmap_resource_t acpi_unmap_resource; static bus_child_pnpinfo_t acpi_child_pnpinfo_method; static bus_child_location_t acpi_child_location_method; static bus_hint_device_unit_t acpi_hint_device_unit; static bus_get_property_t acpi_bus_get_prop; static bus_get_device_path_t acpi_get_device_path; static bus_get_domain_t acpi_get_domain_method; static acpi_id_probe_t acpi_device_id_probe; static acpi_evaluate_object_t acpi_device_eval_obj; static acpi_get_property_t acpi_device_get_prop; static acpi_scan_children_t acpi_device_scan_children; static isa_pnp_probe_t acpi_isa_pnp_probe; static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); static void acpi_enable_pcie(void); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_get_rman, acpi_get_rman), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), DEVMETHOD(bus_activate_resource, acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_deactivate_resource), DEVMETHOD(bus_map_resource, acpi_map_resource), DEVMETHOD(bus_unmap_resource, acpi_unmap_resource), DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain_method), DEVMETHOD(bus_get_property, acpi_bus_get_prop), DEVMETHOD(bus_get_device_path, acpi_get_device_path), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_get_property, acpi_device_get_prop), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_MPSAFE, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); #if defined(__amd64__) || defined(__i386__) int acpi_override_isa_irq_polarity; #endif /* * ACPI standard UUID for Device Specific Data Package * "Device Properties UUID for _DSD" Rev. 2.0 */ static const struct uuid acpi_dsd_uuid = { 0xdaffd814, 0x6eba, 0x4d8c, 0x8a, 0x91, { 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01 } }; /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); resource_list_init(&sc->sysres_rl); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); #if defined(__amd64__) || defined(__i386__) /* * Enable workaround for incorrect ISA IRQ polarity by default on * systems with Intel CPUs. */ if (cpu_vendor_id == CPU_VENDOR_INTEL) acpi_override_isa_irq_polarity = 1; SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "override_isa_irq_polarity", CTLFLAG_RDTUN, &acpi_override_isa_irq_polarity, 0, "Force active-hi polarity for edge-triggered ISA IRQs"); #endif /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST + 150); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; bus_topo_assert(); error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { bus_topo_assert(); acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { bus_topo_assert(); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); ad->ad_domain = ACPI_DEV_DOMAIN_UNKNOWN; resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); int pxm; if (dinfo->ad_handle) { sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { sbuf_printf(sb, " _PXM=%d", pxm); } } return (0); } /* PnP information for devctl(8) */ int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { sbuf_printf(sb, "unknown"); return (0); } sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Note: the check for ACPI locator may be redundant. However, this routine is * suitable for both busses whose only locator is ACPI and as a building block * for busses that have multiple locators to cope with. */ int acpi_get_acpi_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) { ACPI_HANDLE *handle = acpi_get_handle(child); if (handle != NULL) sbuf_printf(sb, "%s", acpi_name(handle)); return (0); } return (bus_generic_get_device_path(bus, child, locator, sb)); } static int acpi_get_device_path(device_t bus, device_t child, const char *locator, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); if (strcmp(locator, BUS_LOCATOR_ACPI) == 0) return (acpi_get_acpi_device_path(bus, child, locator, sb)); if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) { ACPI_DEVICE_INFO *adinfo; if (!ACPI_FAILURE(AcpiGetObjectInfo(dinfo->ad_handle, &adinfo)) && dinfo->ad_handle != 0 && (adinfo->Valid & ACPI_VALID_HID)) { const char *hid = adinfo->HardwareId.String; u_long uid = (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL; u_long hidval; /* * In UEFI Stanard Version 2.6, Section 9.6.1.6 Text * Device Node Reference, there's an insanely long table * 98. This implements the relevant bits from that * table. Newer versions appear to have not required * anything new. The EDK2 firmware presents both PciRoot * and PcieRoot as PciRoot. Follow the EDK2 standard. */ if (strncmp("PNP", hid, 3) != 0) goto nomatch; hidval = strtoul(hid + 3, NULL, 16); switch (hidval) { case 0x0301: sbuf_printf(sb, "Keyboard(0x%lx)", uid); break; case 0x0401: sbuf_printf(sb, "ParallelPort(0x%lx)", uid); break; case 0x0501: sbuf_printf(sb, "Serial(0x%lx)", uid); break; case 0x0604: sbuf_printf(sb, "Floppy(0x%lx)", uid); break; case 0x0a03: case 0x0a08: sbuf_printf(sb, "PciRoot(0x%lx)", uid); break; default: /* Everything else gets a generic encode */ nomatch: sbuf_printf(sb, "Acpi(%s,0x%lx)", hid, uid); break; } } /* Not handled: AcpiAdr... unsure how to know it's one */ } /* For the rest, punt to the default handler */ return (bus_generic_get_device_path(bus, child, locator, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ACPI_IVAR_DOMAIN: *(int *)result = ad->ad_domain; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; case ACPI_IVAR_DOMAIN: ad->ad_domain = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Does this device match because the resources match? */ static bool acpi_hint_device_matches_resources(device_t child, const char *name, int unit) { long value; bool matches; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = false; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches = true; else return false; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches = true; else return false; } /* * If either the I/O address and/or the memory address matched, then * assumed this devices matches and that any mismatch in other resources * will be resolved by siltently ignoring those other resources. Otherwise * all further resources must match. */ if (matches) { return (true); } if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches = true; else return false; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches = true; else return false; } return matches; } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { device_location_cache_t *cache; const char *s; int line, unit; bool matches; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; cache = dev_wired_cache_init(); while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); matches = false; if (strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0) matches = acpi_hint_device_matches_resources(child, name, unit); else matches = dev_wired_cache_match(cache, child, s); if (matches) { /* We have a winner! */ *unitp = unit; break; } } dev_wired_cache_fini(cache); } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ int acpi_pxm_parse(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_pxm_parse(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } static int acpi_get_domain_method(device_t dev, device_t child, int *domain) { int error; error = acpi_read_ivar(dev, child, ACPI_IVAR_DOMAIN, (uintptr_t *)domain); if (error == 0 && *domain != ACPI_DEV_DOMAIN_UNKNOWN) return (0); return (ENOENT); } static struct rman * acpi_get_rman(device_t bus, int type, u_int flags) { /* Only memory and IO resources are managed. */ switch (type) { case SYS_RES_IOPORT: return (&acpi_rman_io); case SYS_RES_MEMORY: return (&acpi_rman_mem); default: return (NULL); } } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct acpi_softc *sc = device_get_softc(dev); struct resource *res; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ rm = acpi_get_rman(dev, rle->type, 0); if (rm == NULL) continue; /* Pre-allocate resource and add to our rman pool. */ res = bus_alloc_resource(dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for active devices found during the * namespace scan once the boot-time attach of devices has completed. * * Ideally reserving firmware-assigned resources would work in a * depth-first traversal of the device namespace, but this is * complicated. In particular, not all resources are enumerated by * ACPI (e.g. PCI bridges and devices enumerate their resources via * other means). Some systems also enumerate devices via ACPI behind * PCI bridges but without a matching a PCI device_t enumerated via * PCI bus scanning, the device_t's end up as direct children of * acpi0. Doing this scan late is not ideal, but works for now. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; device_t *children; int child_count, i; if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; rman_res_t end; #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } static bool acpi_is_resource_managed(device_t bus, struct resource *r) { struct rman *rm; rm = acpi_get_rman(bus, rman_get_type(r), rman_get_flags(r)); if (rm == NULL) return (false); return (rman_is_region_manager(r, rm)); } static struct resource * acpi_managed_resource(device_t bus, struct resource *r) { struct acpi_softc *sc = device_get_softc(bus); struct resource_list_entry *rle; KASSERT(acpi_is_resource_managed(bus, r), ("resource %p is not suballocated", r)); STAILQ_FOREACH(rle, &sc->sysres_rl, link) { if (rle->type != rman_get_type(r) || rle->res == NULL) continue; if (rman_get_start(r) >= rman_get_start(rle->res) && rman_get_end(r) <= rman_get_end(rle->res)) return (rle->res); } return (NULL); } static int acpi_adjust_resource(device_t bus, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(bus, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, struct resource *r) { /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_release_resource(bus, child, r)); return (bus_generic_rl_release_resource(bus, child, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } static int acpi_activate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_activate_resource(bus, child, r)); return (bus_generic_activate_resource(bus, child, r)); } static int acpi_deactivate_resource(device_t bus, device_t child, struct resource *r) { if (acpi_is_resource_managed(bus, r)) return (bus_generic_rman_deactivate_resource(bus, child, r)); return (bus_generic_deactivate_resource(bus, child, r)); } static int acpi_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; struct resource *sysres; rman_res_t length, start; int error; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_map_resource(bus, child, r, argsp, map)); /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); args.offset = start - rman_get_start(sysres); args.length = length; return (bus_map_resource(bus, sysres, &args, map)); } static int acpi_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { struct resource *sysres; if (!acpi_is_resource_managed(bus, r)) return (bus_generic_unmap_resource(bus, child, r, map)); sysres = acpi_managed_resource(bus, r); if (sysres == NULL) return (ENOENT); return (bus_unmap_resource(bus, sysres, map)); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } static ACPI_STATUS acpi_device_get_prop(device_t bus, device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { const ACPI_OBJECT *pkg, *name, *val; struct acpi_device *ad; ACPI_STATUS status; int i; ad = device_get_ivars(dev); if (ad == NULL || propname == NULL) return (AE_BAD_PARAMETER); if (ad->dsd_pkg == NULL) { if (ad->dsd.Pointer == NULL) { status = acpi_find_dsd(ad); if (ACPI_FAILURE(status)) return (status); } else { return (AE_NOT_FOUND); } } for (i = 0; i < ad->dsd_pkg->Package.Count; i ++) { pkg = &ad->dsd_pkg->Package.Elements[i]; if (pkg->Type != ACPI_TYPE_PACKAGE || pkg->Package.Count != 2) continue; name = &pkg->Package.Elements[0]; val = &pkg->Package.Elements[1]; if (name->Type != ACPI_TYPE_STRING) continue; if (strncmp(propname, name->String.Pointer, name->String.Length) == 0) { if (value != NULL) *value = val; return (AE_OK); } } return (AE_NOT_FOUND); } static ACPI_STATUS acpi_find_dsd(struct acpi_device *ad) { const ACPI_OBJECT *dsd, *guid, *pkg; ACPI_STATUS status; ad->dsd.Length = ACPI_ALLOCATE_BUFFER; ad->dsd.Pointer = NULL; ad->dsd_pkg = NULL; status = AcpiEvaluateObject(ad->ad_handle, "_DSD", NULL, &ad->dsd); if (ACPI_FAILURE(status)) return (status); dsd = ad->dsd.Pointer; guid = &dsd->Package.Elements[0]; pkg = &dsd->Package.Elements[1]; if (guid->Type != ACPI_TYPE_BUFFER || pkg->Type != ACPI_TYPE_PACKAGE || guid->Buffer.Length != sizeof(acpi_dsd_uuid)) return (AE_NOT_FOUND); if (memcmp(guid->Buffer.Pointer, &acpi_dsd_uuid, sizeof(acpi_dsd_uuid)) == 0) { ad->dsd_pkg = pkg; return (AE_OK); } return (AE_NOT_FOUND); } static ssize_t acpi_bus_get_prop_handle(const ACPI_OBJECT *hobj, void *propvalue, size_t size) { ACPI_OBJECT *pobj; ACPI_HANDLE h; if (hobj->Type != ACPI_TYPE_PACKAGE) goto err; if (hobj->Package.Count != 1) goto err; pobj = &hobj->Package.Elements[0]; if (pobj == NULL) goto err; if (pobj->Type != ACPI_TYPE_LOCAL_REFERENCE) goto err; h = acpi_GetReference(NULL, pobj); if (h == NULL) goto err; if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) *(ACPI_HANDLE *)propvalue = h; return (sizeof(ACPI_HANDLE)); err: return (-1); } static ssize_t acpi_bus_get_prop(device_t bus, device_t child, const char *propname, void *propvalue, size_t size, device_property_type_t type) { ACPI_STATUS status; const ACPI_OBJECT *obj; status = acpi_device_get_prop(bus, child, __DECONST(char *, propname), &obj); if (ACPI_FAILURE(status)) return (-1); switch (type) { case DEVICE_PROP_ANY: case DEVICE_PROP_BUFFER: case DEVICE_PROP_UINT32: case DEVICE_PROP_UINT64: break; case DEVICE_PROP_HANDLE: return (acpi_bus_get_prop_handle(obj, propvalue, size)); default: return (-1); } switch (obj->Type) { case ACPI_TYPE_INTEGER: if (type == DEVICE_PROP_UINT32) { if (propvalue != NULL && size >= sizeof(uint32_t)) *((uint32_t *)propvalue) = obj->Integer.Value; return (sizeof(uint32_t)); } if (propvalue != NULL && size >= sizeof(uint64_t)) *((uint64_t *) propvalue) = obj->Integer.Value; return (sizeof(uint64_t)); case ACPI_TYPE_STRING: if (type != DEVICE_PROP_ANY && type != DEVICE_PROP_BUFFER) return (-1); if (propvalue != NULL && size > 0) memcpy(propvalue, obj->String.Pointer, MIN(size, obj->String.Length)); return (obj->String.Length); case ACPI_TYPE_BUFFER: if (propvalue != NULL && size > 0) memcpy(propvalue, obj->Buffer.Pointer, MIN(size, obj->Buffer.Length)); return (obj->Buffer.Length); case ACPI_TYPE_PACKAGE: if (propvalue != NULL && size >= sizeof(ACPI_OBJECT *)) { *((ACPI_OBJECT **) propvalue) = __DECONST(ACPI_OBJECT *, obj); } return (sizeof(ACPI_OBJECT *)); case ACPI_TYPE_LOCAL_REFERENCE: if (propvalue != NULL && size >= sizeof(ACPI_HANDLE)) { ACPI_HANDLE h; h = acpi_GetReference(NULL, __DECONST(ACPI_OBJECT *, obj)); memcpy(propvalue, h, sizeof(ACPI_HANDLE)); } return (sizeof(ACPI_HANDLE)); default: return (0); } } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { pcie_cfgregopen(alloc->Address, alloc->PciSegment, alloc->StartBusNumber, alloc->EndBusNumber); alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); - bus_generic_probe(bus); + bus_identify_children(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* * Reserve resources allocated to children but not yet allocated * by a driver. */ acpi_reserve_resources(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int d, order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, DEVICE_UNIT_ANY); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * RTC Device should be enabled for CMOS register space * unless FADT indicate it is not present. * (checked in RTC probe routine.) */ if (acpi_MatchHid(handle, "PNP0B00")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } d = acpi_pxm_parse(child); if (d >= 0) ad->ad_domain = d; break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } ACPI_STATUS acpi_GetProperty(device_t dev, ACPI_STRING propname, const ACPI_OBJECT **value) { device_t bus = device_get_parent(dev); return (ACPI_GET_PROPERTY(bus, dev, propname, value)); } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME */ bus_topo_lock(); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; bus_topo_unlock(); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc = (struct acpi_softc *)context; #else (void)context; #endif ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); #if defined(__amd64__) || defined(__i386__) if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); #else shutdown_nice(RB_POWEROFF); #endif return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_MPSAFE, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_container.c b/sys/dev/acpica/acpi_container.c index 81a1ee8ce25c..ec4150ab186f 100644 --- a/sys/dev/acpica/acpi_container.c +++ b/sys/dev/acpica/acpi_container.c @@ -1,151 +1,151 @@ /*- * Copyright (c) 2017 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include "pcib_if.h" ACPI_MODULE_NAME("CONTAINER") static int acpi_syscont_probe(device_t); static int acpi_syscont_attach(device_t); static int acpi_syscont_alloc_msi(device_t, device_t, int count, int maxcount, int *irqs); static int acpi_syscont_release_msi(device_t bus, device_t dev, int count, int *irqs); static int acpi_syscont_alloc_msix(device_t bus, device_t dev, int *irq); static int acpi_syscont_release_msix(device_t bus, device_t dev, int irq); static int acpi_syscont_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data); static device_method_t acpi_syscont_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_syscont_probe), DEVMETHOD(device_attach, acpi_syscont_attach), DEVMETHOD(device_detach, bus_generic_detach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), /* pcib interface */ DEVMETHOD(pcib_alloc_msi, acpi_syscont_alloc_msi), DEVMETHOD(pcib_release_msi, acpi_syscont_release_msi), DEVMETHOD(pcib_alloc_msix, acpi_syscont_alloc_msix), DEVMETHOD(pcib_release_msix, acpi_syscont_release_msix), DEVMETHOD(pcib_map_msi, acpi_syscont_map_msi), DEVMETHOD_END }; static driver_t acpi_syscont_driver = { "acpi_syscontainer", acpi_syscont_methods, 0, }; DRIVER_MODULE(acpi_syscontainer, acpi, acpi_syscont_driver, NULL, NULL); MODULE_DEPEND(acpi_syscontainer, acpi, 1, 1, 1); static int acpi_syscont_probe(device_t dev) { static char *syscont_ids[] = { "ACPI0004", "PNP0A05", "PNP0A06", NULL }; int rv; if (acpi_disabled("syscontainer")) return (ENXIO); rv = ACPI_ID_PROBE(device_get_parent(dev), dev, syscont_ids, NULL); if (rv <= 0) device_set_desc(dev, "System Container"); return (rv); } static int acpi_syscont_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } static int acpi_syscont_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) { device_t parent = device_get_parent(bus); return (PCIB_ALLOC_MSI(device_get_parent(parent), dev, count, maxcount, irqs)); } static int acpi_syscont_release_msi(device_t bus, device_t dev, int count, int *irqs) { device_t parent = device_get_parent(bus); return (PCIB_RELEASE_MSI(device_get_parent(parent), dev, count, irqs)); } static int acpi_syscont_alloc_msix(device_t bus, device_t dev, int *irq) { device_t parent = device_get_parent(bus); return (PCIB_ALLOC_MSIX(device_get_parent(parent), dev, irq)); } static int acpi_syscont_release_msix(device_t bus, device_t dev, int irq) { device_t parent = device_get_parent(bus); return (PCIB_RELEASE_MSIX(device_get_parent(parent), dev, irq)); } static int acpi_syscont_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t parent = device_get_parent(bus); return (PCIB_MAP_MSI(device_get_parent(parent), dev, irq, addr, data)); } diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c index 63e17de1ff28..58be4b228507 100644 --- a/sys/dev/acpica/acpi_cpu.c +++ b/sys/dev/acpica/acpi_cpu.c @@ -1,1532 +1,1532 @@ /*- * Copyright (c) 2003-2005 Nate Lawson (SDG) * Copyright (c) 2001 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #include #endif #include #include #include #include /* * Support for ACPI Processor devices, including C[1-3] sleep states. */ /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_PROCESSOR ACPI_MODULE_NAME("PROCESSOR") struct acpi_cx { struct resource *p_lvlx; /* Register to read to enter state. */ uint32_t type; /* C1-3 (C4 and up treated as C3). */ uint32_t trans_lat; /* Transition latency (usec). */ uint32_t power; /* Power consumed (mW). */ int res_type; /* Resource type for p_lvlx. */ int res_rid; /* Resource ID for p_lvlx. */ bool do_mwait; uint32_t mwait_hint; bool mwait_hw_coord; bool mwait_bm_avoidance; }; #define MAX_CX_STATES 8 struct acpi_cpu_softc { device_t cpu_dev; ACPI_HANDLE cpu_handle; struct pcpu *cpu_pcpu; uint32_t cpu_acpi_id; /* ACPI processor id */ uint32_t cpu_p_blk; /* ACPI P_BLK location */ uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ struct acpi_cx cpu_cx_states[MAX_CX_STATES]; int cpu_cx_count; /* Number of valid Cx states. */ int cpu_prev_sleep;/* Last idle sleep duration. */ int cpu_features; /* Child driver supported features. */ /* Runtime state. */ int cpu_non_c2; /* Index of lowest non-C2 state. */ int cpu_non_c3; /* Index of lowest non-C3 state. */ u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ /* Values for sysctl. */ struct sysctl_ctx_list cpu_sysctl_ctx; struct sysctl_oid *cpu_sysctl_tree; int cpu_cx_lowest; int cpu_cx_lowest_lim; int cpu_disable_idle; /* Disable entry to idle function */ char cpu_cx_supported[64]; }; struct acpi_cpu_device { struct resource_list ad_rl; }; #define CPU_GET_REG(reg, width) \ (bus_space_read_ ## width(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0)) #define CPU_SET_REG(reg, width, val) \ (bus_space_write_ ## width(rman_get_bustag((reg)), \ rman_get_bushandle((reg)), 0, (val))) #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ #define PCI_VENDOR_INTEL 0x8086 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ #define PCI_REVISION_A_STEP 0 #define PCI_REVISION_B_STEP 1 #define PCI_REVISION_4E 2 #define PCI_REVISION_4M 3 #define PIIX4_DEVACTB_REG 0x58 #define PIIX4_BRLD_EN_IRQ0 (1<<0) #define PIIX4_BRLD_EN_IRQ (1<<1) #define PIIX4_BRLD_EN_IRQ8 (1<<5) #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) #define PIIX4_PCNTRL_BST_EN (1<<10) #define CST_FFH_VENDOR_INTEL 1 #define CST_FFH_VENDOR_AMD 2 #define CST_FFH_INTEL_CL_C1IO 1 #define CST_FFH_INTEL_CL_MWAIT 2 #define CST_FFH_MWAIT_HW_COORD 0x0001 #define CST_FFH_MWAIT_BM_AVOID 0x0002 #define CPUDEV_DEVICE_ID "ACPI0007" /* Knob to disable acpi_cpu devices */ bool acpi_cpu_disabled = false; /* Platform hardware resource information. */ static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ static int cpu_quirks; /* Indicate any hardware bugs. */ /* Values for sysctl. */ static struct sysctl_ctx_list cpu_sysctl_ctx; static struct sysctl_oid *cpu_sysctl_tree; static int cpu_cx_generic; static int cpu_cx_lowest_lim; #if defined(__i386__) || defined(__amd64__) static bool cppc_notify; #endif static struct acpi_cpu_softc **cpu_softc; ACPI_SERIAL_DECL(cpu, "ACPI CPU"); static int acpi_cpu_probe(device_t dev); static int acpi_cpu_attach(device_t dev); static int acpi_cpu_suspend(device_t dev); static int acpi_cpu_resume(device_t dev); static int acpi_pcpu_get_id(device_t dev, uint32_t acpi_id, u_int *cpu_id); static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child); static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit); static int acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int acpi_cpu_shutdown(device_t dev); static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); static void acpi_cpu_startup(void *arg); static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc); #if defined(__i386__) || defined(__amd64__) static void acpi_cpu_idle(sbintime_t sbt); #endif static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); static void acpi_cpu_quirks(void); static void acpi_cpu_quirks_piix4(void); static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc); static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); #if defined(__i386__) || defined(__amd64__) static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS); #endif static device_method_t acpi_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_cpu_probe), DEVMETHOD(device_attach, acpi_cpu_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, acpi_cpu_shutdown), DEVMETHOD(device_suspend, acpi_cpu_suspend), DEVMETHOD(device_resume, acpi_cpu_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_cpu_add_child), DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar), DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t acpi_cpu_driver = { "cpu", acpi_cpu_methods, sizeof(struct acpi_cpu_softc), }; DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, 0, 0); MODULE_DEPEND(cpu, acpi, 1, 1, 1); static int acpi_cpu_probe(device_t dev) { static char *cpudev_ids[] = { CPUDEV_DEVICE_ID, NULL }; int acpi_id, cpu_id; ACPI_BUFFER buf; ACPI_HANDLE handle; ACPI_OBJECT *obj; ACPI_STATUS status; ACPI_OBJECT_TYPE type; if (acpi_disabled("cpu") || acpi_cpu_disabled) return (ENXIO); type = acpi_get_type(dev); if (type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_DEVICE) return (ENXIO); if (type == ACPI_TYPE_DEVICE && ACPI_ID_PROBE(device_get_parent(dev), dev, cpudev_ids, NULL) >= 0) return (ENXIO); handle = acpi_get_handle(dev); if (cpu_softc == NULL) cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); if (type == ACPI_TYPE_PROCESSOR) { /* Get our Processor object. */ buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { device_printf(dev, "probe failed to get Processor obj - %s\n", AcpiFormatException(status)); return (ENXIO); } obj = (ACPI_OBJECT *)buf.Pointer; if (obj->Type != ACPI_TYPE_PROCESSOR) { device_printf(dev, "Processor object has bad type %d\n", obj->Type); AcpiOsFree(obj); return (ENXIO); } /* * Find the processor associated with our unit. We could use the * ProcId as a key, however, some boxes do not have the same values * in their Processor object as the ProcId values in the MADT. */ acpi_id = obj->Processor.ProcId; AcpiOsFree(obj); } else { status = acpi_GetInteger(handle, "_UID", &acpi_id); if (ACPI_FAILURE(status)) { device_printf(dev, "Device object has bad value - %s\n", AcpiFormatException(status)); return (ENXIO); } } if (acpi_pcpu_get_id(dev, acpi_id, &cpu_id) != 0) { if (bootverbose && (type != ACPI_TYPE_PROCESSOR || acpi_id != 255)) printf("ACPI: Processor %s (ACPI ID %u) ignored\n", acpi_name(acpi_get_handle(dev)), acpi_id); return (ENXIO); } if (device_set_unit(dev, cpu_id) != 0) return (ENXIO); device_set_desc(dev, "ACPI CPU"); if (!bootverbose && device_get_unit(dev) != 0) { device_quiet(dev); device_quiet_children(dev); } return (BUS_PROBE_DEFAULT); } static int acpi_cpu_attach(device_t dev) { ACPI_BUFFER buf; ACPI_OBJECT arg, *obj; ACPI_OBJECT_LIST arglist; struct pcpu *pcpu_data; struct acpi_cpu_softc *sc; struct acpi_softc *acpi_sc; ACPI_STATUS status; u_int features; int cpu_id, drv_count, i; driver_t **drivers; uint32_t cap_set[3]; /* UUID needed by _OSC evaluation */ static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53 }; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->cpu_dev = dev; sc->cpu_handle = acpi_get_handle(dev); cpu_id = device_get_unit(dev); cpu_softc[cpu_id] = sc; pcpu_data = pcpu_find(cpu_id); pcpu_data->pc_device = dev; sc->cpu_pcpu = pcpu_data; cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; cpu_cst_cnt = AcpiGbl_FADT.CstControl; if (acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) { buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { device_printf(dev, "attach failed to get Processor obj - %s\n", AcpiFormatException(status)); return (ENXIO); } obj = (ACPI_OBJECT *)buf.Pointer; sc->cpu_p_blk = obj->Processor.PblkAddress; sc->cpu_p_blk_len = obj->Processor.PblkLength; sc->cpu_acpi_id = obj->Processor.ProcId; AcpiOsFree(obj); } else { KASSERT(acpi_get_type(dev) == ACPI_TYPE_DEVICE, ("Unexpected ACPI object")); status = acpi_GetInteger(sc->cpu_handle, "_UID", &sc->cpu_acpi_id); if (ACPI_FAILURE(status)) { device_printf(dev, "Device object has bad value - %s\n", AcpiFormatException(status)); return (ENXIO); } sc->cpu_p_blk = 0; sc->cpu_p_blk_len = 0; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); /* * If this is the first cpu we attach, create and initialize the generic * resources that will be used by all acpi cpu devices. */ if (device_get_unit(dev) == 0) { /* Assume we won't be using generic Cx mode by default */ cpu_cx_generic = FALSE; /* Install hw.acpi.cpu sysctl tree */ acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&cpu_sysctl_ctx); cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "node for CPU children"); #if defined(__i386__) || defined(__amd64__) /* Add sysctl handler to control registering for CPPC notifications */ cppc_notify = 1; SYSCTL_ADD_BOOL(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), OID_AUTO, "cppc_notify", CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &cppc_notify, 0, "Register for CPPC Notifications"); #endif } /* * Before calling any CPU methods, collect child driver feature hints * and notify ACPI of them. We support unified SMP power control * so advertise this ourselves. Note this is not the same as independent * SMP control where each CPU can have different settings. */ sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 | ACPI_CAP_C1_IO_HALT; #if defined(__i386__) || defined(__amd64__) /* * Ask for MWAIT modes if not disabled and interrupts work * reasonable with MWAIT. */ if (!acpi_disabled("mwait") && cpu_mwait_usable()) sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE; /* * Work around a lingering SMM bug which leads to freezes when handling * CPPC notifications. Tell the SMM we will handle any CPPC notifications. */ if ((cpu_power_eax & CPUTPM1_HWP_NOTIFICATION) && cppc_notify) sc->cpu_features |= ACPI_CAP_INTR_CPPC; #endif if (devclass_get_drivers(device_get_devclass(dev), &drivers, &drv_count) == 0) { for (i = 0; i < drv_count; i++) { if (ACPI_GET_FEATURES(drivers[i], &features) == 0) sc->cpu_features |= features; } free(drivers, M_TEMP); } /* * CPU capabilities are specified in * Intel Processor Vendor-Specific ACPI Interface Specification. */ if (sc->cpu_features) { cap_set[1] = sc->cpu_features; status = acpi_EvaluateOSC(sc->cpu_handle, cpu_oscuuid, 1, 2, cap_set, cap_set, false); if (ACPI_SUCCESS(status)) { if (cap_set[0] != 0) device_printf(dev, "_OSC returned status %#x\n", cap_set[0]); } else { arglist.Pointer = &arg; arglist.Count = 1; arg.Type = ACPI_TYPE_BUFFER; arg.Buffer.Length = sizeof(cap_set); arg.Buffer.Pointer = (uint8_t *)cap_set; cap_set[0] = 1; /* revision */ cap_set[1] = 1; /* number of capabilities integers */ cap_set[2] = sc->cpu_features; AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL); } } /* Probe for Cx state support. */ acpi_cpu_cx_probe(sc); return (0); } static void acpi_cpu_postattach(void *unused __unused) { struct acpi_cpu_softc *sc; int attached = 0, i; if (cpu_softc == NULL) return; bus_topo_lock(); CPU_FOREACH(i) { if ((sc = cpu_softc[i]) != NULL) - bus_generic_probe(sc->cpu_dev); + bus_identify_children(sc->cpu_dev); } CPU_FOREACH(i) { if ((sc = cpu_softc[i]) != NULL) { bus_generic_attach(sc->cpu_dev); attached = 1; } } bus_topo_unlock(); if (attached) { #ifdef EARLY_AP_STARTUP acpi_cpu_startup(NULL); #else /* Queue post cpu-probing task handler */ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); #endif } } SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, acpi_cpu_postattach, NULL); static void disable_idle(struct acpi_cpu_softc *sc) { cpuset_t cpuset; CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset); sc->cpu_disable_idle = TRUE; /* * Ensure that the CPU is not in idle state or in acpi_cpu_idle(). * Note that this code depends on the fact that the rendezvous IPI * can not penetrate context where interrupts are disabled and acpi_cpu_idle * is called and executed in such a context with interrupts being re-enabled * right before return. */ smp_rendezvous_cpus(cpuset, smp_no_rendezvous_barrier, NULL, smp_no_rendezvous_barrier, NULL); } static void enable_idle(struct acpi_cpu_softc *sc) { if (sc->cpu_cx_count > sc->cpu_non_c3 + 1 && (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); sc->cpu_disable_idle = FALSE; } #if defined(__i386__) || defined(__amd64__) static int is_idle_disabled(struct acpi_cpu_softc *sc) { return (sc->cpu_disable_idle); } #endif /* * Disable any entry to the idle function during suspend and re-enable it * during resume. */ static int acpi_cpu_suspend(device_t dev) { int error; error = bus_generic_suspend(dev); if (error) return (error); disable_idle(device_get_softc(dev)); return (0); } static int acpi_cpu_resume(device_t dev) { enable_idle(device_get_softc(dev)); return (bus_generic_resume(dev)); } /* * Find the processor associated with a given ACPI ID. */ static int acpi_pcpu_get_id(device_t dev, uint32_t acpi_id, u_int *cpu_id) { struct pcpu *pc; u_int i; CPU_FOREACH(i) { pc = pcpu_find(i); if (pc->pc_acpi_id == acpi_id) { *cpu_id = pc->pc_cpuid; return (0); } } /* * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC * UP box) use the ACPI ID from the first processor we find. */ if (mp_ncpus == 1) { pc = pcpu_find(0); if (pc->pc_acpi_id == 0xffffffff) pc->pc_acpi_id = acpi_id; *cpu_id = 0; return (0); } return (ESRCH); } static struct resource_list * acpi_cpu_get_rlist(device_t dev, device_t child) { struct acpi_cpu_device *ad; ad = device_get_ivars(child); if (ad == NULL) return (NULL); return (&ad->ad_rl); } static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit) { struct acpi_cpu_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(dev, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_TEMP); return (child); } static int acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_cpu_softc *sc; sc = device_get_softc(dev); switch (index) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->cpu_handle; break; case CPU_IVAR_PCPU: *result = (uintptr_t)sc->cpu_pcpu; break; #if defined(__amd64__) || defined(__i386__) case CPU_IVAR_NOMINAL_MHZ: if (tsc_is_invariant) { *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000); break; } /* FALLTHROUGH */ #endif default: return (ENOENT); } return (0); } static int acpi_cpu_shutdown(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Disable any entry to the idle function. */ disable_idle(device_get_softc(dev)); /* * CPU devices are not truly detached and remain referenced, * so their resources are not freed. */ return_VALUE (0); } static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Use initial sleep value of 1 sec. to start with lowest idle state. */ sc->cpu_prev_sleep = 1000000; sc->cpu_cx_lowest = 0; sc->cpu_cx_lowest_lim = 0; /* * Check for the ACPI 2.0 _CST sleep states object. If we can't find * any, we'll revert to generic FADT/P_BLK Cx control method which will * be handled by acpi_cpu_startup. We need to defer to after having * probed all the cpus in the system before probing for generic Cx * states as we may already have found cpus with valid _CST packages */ if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { /* * We were unable to find a _CST package for this cpu or there * was an error parsing it. Switch back to generic mode. */ cpu_cx_generic = TRUE; if (bootverbose) device_printf(sc->cpu_dev, "switching to generic Cx mode\n"); } /* * TODO: _CSD Package should be checked here. */ } static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) { ACPI_GENERIC_ADDRESS gas; struct acpi_cx *cx_ptr; sc->cpu_cx_count = 0; cx_ptr = sc->cpu_cx_states; /* Use initial sleep value of 1 sec. to start with lowest idle state. */ sc->cpu_prev_sleep = 1000000; /* C1 has been required since just after ACPI 1.0 */ cx_ptr->type = ACPI_STATE_C1; cx_ptr->trans_lat = 0; cx_ptr++; sc->cpu_non_c2 = sc->cpu_cx_count; sc->cpu_non_c3 = sc->cpu_cx_count; sc->cpu_cx_count++; /* * The spec says P_BLK must be 6 bytes long. However, some systems * use it to indicate a fractional set of features present so we * take 5 as C2. Some may also have a value of 7 to indicate * another C3 but most use _CST for this (as required) and having * "only" C1-C3 is not a hardship. */ if (sc->cpu_p_blk_len < 5) return; /* Validate and allocate resources for C2 (P_LVL2). */ gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; gas.BitWidth = 8; if (AcpiGbl_FADT.C2Latency <= 100) { gas.Address = sc->cpu_p_blk + 4; cx_ptr->res_rid = 0; acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid, &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); if (cx_ptr->p_lvlx != NULL) { cx_ptr->type = ACPI_STATE_C2; cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; cx_ptr++; sc->cpu_non_c3 = sc->cpu_cx_count; sc->cpu_cx_count++; } } if (sc->cpu_p_blk_len < 6) return; /* Validate and allocate resources for C3 (P_LVL3). */ if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) { gas.Address = sc->cpu_p_blk + 5; cx_ptr->res_rid = 1; acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid, &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); if (cx_ptr->p_lvlx != NULL) { cx_ptr->type = ACPI_STATE_C3; cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; cx_ptr++; sc->cpu_cx_count++; } } } #if defined(__i386__) || defined(__amd64__) static void acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize) { cx_ptr->do_mwait = true; cx_ptr->mwait_hint = address & 0xffffffff; cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0; cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0; } #endif static void acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr) { if (cx_ptr->p_lvlx == NULL) return; bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid, cx_ptr->p_lvlx); cx_ptr->p_lvlx = NULL; } /* * Parse a _CST package and set up its Cx states. Since the _CST object * can change dynamically, our notify handler may call this function * to clean up and probe the new _CST package. */ static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) { struct acpi_cx *cx_ptr; ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT *top; ACPI_OBJECT *pkg; uint32_t count; int i; #if defined(__i386__) || defined(__amd64__) uint64_t address; int vendor, class, accsize; #endif ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); if (ACPI_FAILURE(status)) return (ENXIO); /* _CST is a package with a count and at least one Cx package. */ top = (ACPI_OBJECT *)buf.Pointer; if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { device_printf(sc->cpu_dev, "invalid _CST package\n"); AcpiOsFree(buf.Pointer); return (ENXIO); } if (count != top->Package.Count - 1) { device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n", count, top->Package.Count - 1); count = top->Package.Count - 1; } if (count > MAX_CX_STATES) { device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); count = MAX_CX_STATES; } sc->cpu_non_c2 = 0; sc->cpu_non_c3 = 0; sc->cpu_cx_count = 0; cx_ptr = sc->cpu_cx_states; /* * C1 has been required since just after ACPI 1.0. * Reserve the first slot for it. */ cx_ptr->type = ACPI_STATE_C0; cx_ptr++; sc->cpu_cx_count++; /* Set up all valid states. */ for (i = 0; i < count; i++) { pkg = &top->Package.Elements[i + 1]; if (!ACPI_PKG_VALID(pkg, 4) || acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); continue; } /* Validate the state to see if we should use it. */ switch (cx_ptr->type) { case ACPI_STATE_C1: acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr); #if defined(__i386__) || defined(__amd64__) if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address, &accsize) == 0 && (vendor == CST_FFH_VENDOR_INTEL || vendor == CST_FFH_VENDOR_AMD)) { if (class == CST_FFH_INTEL_CL_C1IO) { /* C1 I/O then Halt */ cx_ptr->res_rid = sc->cpu_cx_count; bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, cx_ptr->res_rid, address, 1); cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev, SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE | RF_SHAREABLE); if (cx_ptr->p_lvlx == NULL) { bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT, cx_ptr->res_rid); device_printf(sc->cpu_dev, "C1 I/O failed to allocate port %d, " "degrading to C1 Halt", (int)address); } } else if (class == CST_FFH_INTEL_CL_MWAIT) { if (vendor == CST_FFH_VENDOR_INTEL || (vendor == CST_FFH_VENDOR_AMD && cpu_mon_mwait_edx != 0)) acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize); } } #endif if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) { /* This is the first C1 state. Use the reserved slot. */ sc->cpu_cx_states[0] = *cx_ptr; } else { sc->cpu_non_c2 = sc->cpu_cx_count; sc->cpu_non_c3 = sc->cpu_cx_count; cx_ptr++; sc->cpu_cx_count++; } continue; case ACPI_STATE_C2: sc->cpu_non_c3 = sc->cpu_cx_count; break; case ACPI_STATE_C3: default: if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: C3[%d] not available.\n", device_get_unit(sc->cpu_dev), i)); continue; } break; } /* Free up any previous register. */ acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr); /* Allocate the control register for C2 or C3. */ #if defined(__i386__) || defined(__amd64__) if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address, &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL && class == CST_FFH_INTEL_CL_MWAIT) { /* Native C State Instruction use (mwait) */ acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: Got C%d/mwait - %d latency\n", device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat)); cx_ptr++; sc->cpu_cx_count++; } else #endif { cx_ptr->res_rid = sc->cpu_cx_count; acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE); if (cx_ptr->p_lvlx) { cx_ptr->do_mwait = false; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: Got C%d - %d latency\n", device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat)); cx_ptr++; sc->cpu_cx_count++; } } } AcpiOsFree(buf.Pointer); /* If C1 state was not found, we need one now. */ cx_ptr = sc->cpu_cx_states; if (cx_ptr->type == ACPI_STATE_C0) { cx_ptr->type = ACPI_STATE_C1; cx_ptr->trans_lat = 0; } return (0); } /* * Call this *after* all CPUs have been attached. */ static void acpi_cpu_startup(void *arg) { struct acpi_cpu_softc *sc; int i; /* * Setup any quirks that might necessary now that we have probed * all the CPUs */ acpi_cpu_quirks(); if (cpu_cx_generic) { /* * We are using generic Cx mode, probe for available Cx states * for all processors. */ CPU_FOREACH(i) { if ((sc = cpu_softc[i]) != NULL) acpi_cpu_generic_cx_probe(sc); } } else { /* * We are using _CST mode, remove C3 state if necessary. * As we now know for sure that we will be using _CST mode * install our notify handler. */ CPU_FOREACH(i) { if ((sc = cpu_softc[i]) == NULL) continue; if (cpu_quirks & CPU_QUIRK_NO_C3) { sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1); } AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, acpi_cpu_notify, sc); } } /* Perform Cx final initialization. */ CPU_FOREACH(i) { if ((sc = cpu_softc[i]) != NULL) acpi_cpu_startup_cx(sc); } /* Add a sysctl handler to handle global Cx lowest setting */ SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A", "Global lowest Cx sleep state to use"); /* Take over idling from cpu_idle_default(). */ cpu_cx_lowest_lim = 0; CPU_FOREACH(i) { if ((sc = cpu_softc[i]) != NULL) enable_idle(sc); } #if defined(__i386__) || defined(__amd64__) cpu_idle_hook = acpi_cpu_idle; #endif } static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc) { struct sbuf sb; int i; /* * Set up the list of Cx states */ sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), SBUF_FIXEDLEN); for (i = 0; i < sc->cpu_cx_count; i++) sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type, sc->cpu_cx_states[i].trans_lat); sbuf_trim(&sb); sbuf_finish(&sb); } static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc) { acpi_cpu_cx_list(sc); SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO, "cx_supported", CTLFLAG_RD, sc->cpu_cx_supported, 0, "Cx/microsecond values for supported Cx states"); SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", "lowest Cx sleep state to use"); SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)sc, 0, acpi_cpu_usage_sysctl, "A", "percent usage for each Cx state"); SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A", "Cx sleep state counters"); #if defined(__i386__) || defined(__amd64__) SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, (void *)sc, 0, acpi_cpu_method_sysctl, "A", "Cx entrance methods"); #endif /* Signal platform that we can handle _CST notification. */ if (!cpu_cx_generic && cpu_cst_cnt != 0) { ACPI_LOCK(acpi); AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); ACPI_UNLOCK(acpi); } } #if defined(__i386__) || defined(__amd64__) /* * Idle the CPU in the lowest state possible. This function is called with * interrupts disabled. Note that once it re-enables interrupts, a task * switch can occur so do not access shared data (i.e. the softc) after * interrupts are re-enabled. */ static void acpi_cpu_idle(sbintime_t sbt) { struct acpi_cpu_softc *sc; struct acpi_cx *cx_next; uint64_t start_ticks, end_ticks; uint32_t start_time, end_time; ACPI_STATUS status; int bm_active, cx_next_idx, i, us; /* * Look up our CPU id to get our softc. If it's NULL, we'll use C1 * since there is no ACPI processor object for this CPU. This occurs * for logical CPUs in the HTT case. */ sc = cpu_softc[PCPU_GET(cpuid)]; if (sc == NULL) { acpi_cpu_c1(); return; } /* If disabled, take the safe path. */ if (is_idle_disabled(sc)) { acpi_cpu_c1(); return; } /* Find the lowest state that has small enough latency. */ us = sc->cpu_prev_sleep; if (sbt >= 0 && us > (sbt >> 12)) us = (sbt >> 12); cx_next_idx = 0; if (cpu_disable_c2_sleep) i = min(sc->cpu_cx_lowest, sc->cpu_non_c2); else if (cpu_disable_c3_sleep) i = min(sc->cpu_cx_lowest, sc->cpu_non_c3); else i = sc->cpu_cx_lowest; for (; i >= 0; i--) { if (sc->cpu_cx_states[i].trans_lat * 3 <= us) { cx_next_idx = i; break; } } /* * Check for bus master activity. If there was activity, clear * the bit and use the lowest non-C3 state. Note that the USB * driver polling for new devices keeps this bit set all the * time if USB is loaded. */ cx_next = &sc->cpu_cx_states[cx_next_idx]; if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 && cx_next_idx > sc->cpu_non_c3 && (!cx_next->do_mwait || cx_next->mwait_bm_avoidance)) { status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); if (ACPI_SUCCESS(status) && bm_active != 0) { AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); cx_next_idx = sc->cpu_non_c3; cx_next = &sc->cpu_cx_states[cx_next_idx]; } } /* Select the next state and update statistics. */ sc->cpu_cx_stats[cx_next_idx]++; KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); /* * Execute HLT (or equivalent) and wait for an interrupt. We can't * precisely calculate the time spent in C1 since the place we wake up * is an ISR. Assume we slept no more then half of quantum, unless * we are called inside critical section, delaying context switch. */ if (cx_next->type == ACPI_STATE_C1) { start_ticks = cpu_ticks(); if (cx_next->p_lvlx != NULL) { /* C1 I/O then Halt */ CPU_GET_REG(cx_next->p_lvlx, 1); } if (cx_next->do_mwait) acpi_cpu_idle_mwait(cx_next->mwait_hint); else acpi_cpu_c1(); end_ticks = cpu_ticks(); /* acpi_cpu_c1() returns with interrupts enabled. */ if (cx_next->do_mwait) ACPI_ENABLE_IRQS(); end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate(); if (!cx_next->do_mwait && curthread->td_critnest == 0) end_time = min(end_time, 500000 / hz); sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4; return; } /* * For C3, disable bus master arbitration if BM control is available. * CPU may have to wake up to handle it. Otherwise flush the CPU cache. */ if (cx_next->type == ACPI_STATE_C3) { if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); else ACPI_FLUSH_CPU_CACHE(); } /* * Read from P_LVLx to enter C2(+), checking time spent asleep. * Use the ACPI timer for measuring sleep time. Since we need to * get the time very close to the CPU start/stop clock logic, this * is the only reliable time source. */ if (cx_next->type == ACPI_STATE_C3) { AcpiGetTimer(&start_time); start_ticks = 0; } else { start_time = 0; start_ticks = cpu_ticks(); } if (cx_next->do_mwait) { acpi_cpu_idle_mwait(cx_next->mwait_hint); } else { CPU_GET_REG(cx_next->p_lvlx, 1); /* * Read the end time twice. Since it may take an arbitrary time * to enter the idle state, the first read may be executed before * the processor has stopped. Doing it again provides enough * margin that we are certain to have a correct value. */ AcpiGetTimer(&end_time); } if (cx_next->type == ACPI_STATE_C3) AcpiGetTimer(&end_time); else end_ticks = cpu_ticks(); /* Enable bus master arbitration. */ if (cx_next->type == ACPI_STATE_C3 && (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); ACPI_ENABLE_IRQS(); if (cx_next->type == ACPI_STATE_C3) AcpiGetTimerDuration(start_time, end_time, &end_time); else end_time = ((end_ticks - start_ticks) << 20) / cpu_tickrate(); sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4; } #endif /* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; if (notify != ACPI_NOTIFY_CX_STATES) return; /* * C-state data for target CPU is going to be in flux while we execute * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle. * Also, it may happen that multiple ACPI taskqueues may concurrently * execute notifications for the same CPU. ACPI_SERIAL is used to * protect against that. */ ACPI_SERIAL_BEGIN(cpu); disable_idle(sc); /* Update the list of Cx states. */ acpi_cpu_cx_cst(sc); acpi_cpu_cx_list(sc); acpi_cpu_set_cx_lowest(sc); enable_idle(sc); ACPI_SERIAL_END(cpu); acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify); } static void acpi_cpu_quirks(void) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Bus mastering arbitration control is needed to keep caches coherent * while sleeping in C3. If it's not present but a working flush cache * instruction is present, flush the caches before entering C3 instead. * Otherwise, just disable C3 completely. */ if (AcpiGbl_FADT.Pm2ControlBlock == 0 || AcpiGbl_FADT.Pm2ControlLength == 0) { if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: no BM control, using flush cache method\n")); } else { cpu_quirks |= CPU_QUIRK_NO_C3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: no BM control, C3 not available\n")); } } /* * If we are using generic Cx mode, C3 on multiple CPUs requires using * the expensive flush cache instruction. */ if (cpu_cx_generic && mp_ncpus > 1) { cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: SMP, using flush cache mode for C3\n")); } /* Look for various quirks of the PIIX4 part. */ acpi_cpu_quirks_piix4(); } static void acpi_cpu_quirks_piix4(void) { #ifdef __i386__ device_t acpi_dev; uint32_t val; ACPI_STATUS status; acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); if (acpi_dev != NULL) { switch (pci_get_revid(acpi_dev)) { /* * Disable C3 support for all PIIX4 chipsets. Some of these parts * do not report the BMIDE status to the BM status register and * others have a livelock bug if Type-F DMA is enabled. Linux * works around the BMIDE bug by reading the BM status directly * but we take the simpler approach of disabling C3 for these * parts. * * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA * Livelock") from the January 2002 PIIX4 specification update. * Applies to all PIIX4 models. * * Also, make sure that all interrupts cause a "Stop Break" * event to exit from C2 state. * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) * should be set to zero, otherwise it causes C2 to short-sleep. * PIIX4 doesn't properly support C3 and bus master activity * need not break out of C2. */ case PCI_REVISION_A_STEP: case PCI_REVISION_B_STEP: case PCI_REVISION_4E: case PCI_REVISION_4M: cpu_quirks |= CPU_QUIRK_NO_C3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: working around PIIX4 bug, disabling C3\n")); val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); val |= PIIX4_STOP_BREAK_MASK; pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); } status = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); if (ACPI_SUCCESS(status) && val != 0) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); } break; default: break; } } #endif } static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1; struct sbuf sb; char buf[128]; int error, i; uintmax_t fract, sum, whole; sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req); sum = 0; for (i = 0; i < sc->cpu_cx_count; i++) sum += sc->cpu_cx_stats[i]; for (i = 0; i < sc->cpu_cx_count; i++) { if (sum > 0) { whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; fract = (whole % sum) * 100; sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), (u_int)(fract / sum)); } else sbuf_printf(&sb, "0.00%% "); } sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep); error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } /* * XXX TODO: actually add support to count each entry/exit * from the Cx states. */ static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1; struct sbuf sb; char buf[128]; int error, i; sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req); for (i = 0; i < sc->cpu_cx_count; i++) { if (i > 0) sbuf_putc(&sb, ' '); sbuf_printf(&sb, "%u", sc->cpu_cx_stats[i]); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } #if defined(__i386__) || defined(__amd64__) static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)arg1; struct acpi_cx *cx; struct sbuf sb; char buf[128]; int error, i; sbuf_new_for_sysctl(&sb, buf, sizeof(buf), req); for (i = 0; i < sc->cpu_cx_count; i++) { cx = &sc->cpu_cx_states[i]; if (i > 0) sbuf_putc(&sb, ' '); sbuf_printf(&sb, "C%d/", i + 1); if (cx->do_mwait) { sbuf_cat(&sb, "mwait"); if (cx->mwait_hw_coord) sbuf_cat(&sb, "/hwc"); if (cx->mwait_bm_avoidance) sbuf_cat(&sb, "/bma"); } else if (cx->type == ACPI_STATE_C1) { sbuf_cat(&sb, "hlt"); } else { sbuf_cat(&sb, "io"); } if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL) sbuf_cat(&sb, "/iohlt"); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } #endif static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc) { int i; ACPI_SERIAL_ASSERT(cpu); sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1); /* If not disabling, cache the new lowest non-C3 state. */ sc->cpu_non_c3 = 0; for (i = sc->cpu_cx_lowest; i >= 0; i--) { if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { sc->cpu_non_c3 = i; break; } } /* Reset the statistics counters. */ bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); return (0); } static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc; char state[8]; int val, error; sc = (struct acpi_cpu_softc *) arg1; snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); if (strcasecmp(state, "Cmax") == 0) val = MAX_CX_STATES; else { val = (int) strtol(state + 1, NULL, 10); if (val < 1 || val > MAX_CX_STATES) return (EINVAL); } ACPI_SERIAL_BEGIN(cpu); sc->cpu_cx_lowest_lim = val - 1; acpi_cpu_set_cx_lowest(sc); ACPI_SERIAL_END(cpu); return (0); } static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc; char state[8]; int val, error, i; snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); if (strcasecmp(state, "Cmax") == 0) val = MAX_CX_STATES; else { val = (int) strtol(state + 1, NULL, 10); if (val < 1 || val > MAX_CX_STATES) return (EINVAL); } /* Update the new lowest useable Cx state for all CPUs. */ ACPI_SERIAL_BEGIN(cpu); cpu_cx_lowest_lim = val - 1; CPU_FOREACH(i) { if ((sc = cpu_softc[i]) == NULL) continue; sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim; acpi_cpu_set_cx_lowest(sc); } ACPI_SERIAL_END(cpu); return (0); } diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c index ff211102cc3c..a268181ff2d6 100644 --- a/sys/dev/acpica/acpi_pcib_acpi.c +++ b/sys/dev/acpica/acpi_pcib_acpi.c @@ -1,772 +1,772 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_acpi.h" #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI_ACPI") struct acpi_hpcib_softc { device_t ap_dev; ACPI_HANDLE ap_handle; bus_dma_tag_t ap_dma_tag; int ap_flags; uint32_t ap_osc_ctl; int ap_segment; /* PCI domain */ int ap_bus; /* bios-assigned bus number */ int ap_addr; /* device/func of PCI-Host bridge */ ACPI_BUFFER ap_prt; /* interrupt routing table */ struct pcib_host_resources ap_host_res; }; static int acpi_pcib_acpi_probe(device_t bus); static int acpi_pcib_acpi_attach(device_t bus); static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes); static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin); static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq); static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end); static int acpi_pcib_acpi_release_resource(device_t dev, device_t child, struct resource *r); static int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, struct resource *r); static int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, struct resource *r); static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature); static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pcib_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pcib_acpi_probe), DEVMETHOD(device_attach, acpi_pcib_acpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_pcib_acpi_release_resource), DEVMETHOD(bus_activate_resource, acpi_pcib_acpi_activate_resource), DEVMETHOD(bus_deactivate_resource, acpi_pcib_acpi_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, acpi_pcib_read_config), DEVMETHOD(pcib_write_config, acpi_pcib_write_config), DEVMETHOD(pcib_route_interrupt, acpi_pcib_acpi_route_interrupt), DEVMETHOD(pcib_alloc_msi, acpi_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, acpi_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, acpi_pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, acpi_pcib_power_for_sleep), DEVMETHOD(pcib_request_feature, acpi_pcib_request_feature), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, acpi_pcib_acpi_driver, acpi_pcib_acpi_methods, sizeof(struct acpi_hpcib_softc)); DRIVER_MODULE(acpi_pcib, acpi, acpi_pcib_acpi_driver, 0, 0); MODULE_DEPEND(acpi_pcib, acpi, 1, 1, 1); static int acpi_pcib_acpi_probe(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; int root; if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ENXIO); root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0; AcpiOsFree(devinfo); if (!root || pci_cfgregopen() == 0) return (ENXIO); device_set_desc(dev, "ACPI Host-PCI bridge"); return (0); } static ACPI_STATUS acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context) { struct acpi_hpcib_softc *sc; UINT64 length, min, max; u_int flags; int error, type; sc = context; switch (res->Type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: case ACPI_RESOURCE_TYPE_END_DEPENDENT: panic("host bridge has depenedent resources"); case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS64: case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: if (res->Data.Address.ProducerConsumer != ACPI_PRODUCER) break; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: min = res->Data.Address16.Address.Minimum; max = res->Data.Address16.Address.Maximum; length = res->Data.Address16.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS32: min = res->Data.Address32.Address.Minimum; max = res->Data.Address32.Address.Maximum; length = res->Data.Address32.Address.AddressLength; break; case ACPI_RESOURCE_TYPE_ADDRESS64: min = res->Data.Address64.Address.Minimum; max = res->Data.Address64.Address.Maximum; length = res->Data.Address64.Address.AddressLength; break; default: KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, ("should never happen")); min = res->Data.ExtAddress64.Address.Minimum; max = res->Data.ExtAddress64.Address.Maximum; length = res->Data.ExtAddress64.Address.AddressLength; break; } if (length == 0) break; if (min + length - 1 != max && (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED || res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED)) break; flags = 0; switch (res->Data.Address.ResourceType) { case ACPI_MEMORY_RANGE: type = SYS_RES_MEMORY; if (res->Type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) { if (res->Data.Address.Info.Mem.Caching == ACPI_PREFETCHABLE_MEMORY) flags |= RF_PREFETCHABLE; } else { /* * XXX: Parse prefetch flag out of * TypeSpecific. */ } break; case ACPI_IO_RANGE: type = SYS_RES_IOPORT; break; case ACPI_BUS_NUMBER_RANGE: type = PCI_RES_BUS; break; default: return (AE_OK); } if (min + length - 1 != max) device_printf(sc->ap_dev, "Length mismatch for %d range: %jx vs %jx\n", type, (uintmax_t)(max - min + 1), (uintmax_t)length); #ifdef __i386__ if (min > ULONG_MAX) { device_printf(sc->ap_dev, "Ignoring %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); break; } if (max > ULONG_MAX) { device_printf(sc->ap_dev, "Truncating end of %d range above 4GB (%#jx-%#jx)\n", type, (uintmax_t)min, (uintmax_t)max); max = ULONG_MAX; } #endif error = pcib_host_res_decodes(&sc->ap_host_res, type, min, max, flags); if (error) panic("Failed to manage %d range (%#jx-%#jx): %d", type, (uintmax_t)min, (uintmax_t)max, error); break; default: break; } return (AE_OK); } static bool get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp, rman_res_t *endp) { struct resource_list_entry *rle; rle = resource_list_find(&sc->ap_host_res.hr_rl, PCI_RES_BUS, 0); if (rle == NULL) return (false); *startp = rle->start; *endp = rle->end; return (true); } static int acpi_pcib_osc(struct acpi_hpcib_softc *sc, uint32_t osc_ctl) { ACPI_STATUS status; uint32_t cap_set[3]; static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = { 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40, 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66 }; /* * Don't invoke _OSC if a control is already granted. * However, always invoke _OSC during attach when 0 is passed. */ if (osc_ctl != 0 && (sc->ap_osc_ctl & osc_ctl) == osc_ctl) return (0); /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */ cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF | PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI; /* Active State Power Management, Clock Power Management Capability */ if (pci_enable_aspm) cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM | PCIM_OSC_SUPPORT_CPMC; /* Control Field */ cap_set[PCI_OSC_CTL] = sc->ap_osc_ctl | osc_ctl; status = acpi_EvaluateOSC(sc->ap_handle, pci_host_bridge_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { sc->ap_osc_ctl |= osc_ctl; return (0); } device_printf(sc->ap_dev, "_OSC failed: %s\n", AcpiFormatException(status)); return (EIO); } /* * _OSC may return an error in the status word, but will * update the control mask always. _OSC should not revoke * previously-granted controls. */ if ((cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) != sc->ap_osc_ctl) device_printf(sc->ap_dev, "_OSC revoked %#x\n", (cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) ^ sc->ap_osc_ctl); sc->ap_osc_ctl = cap_set[PCI_OSC_CTL]; if ((sc->ap_osc_ctl & osc_ctl) != osc_ctl) return (EIO); return (0); } static int acpi_pcib_acpi_attach(device_t dev) { struct acpi_hpcib_softc *sc; ACPI_STATUS status; static int bus0_seen = 0; u_int slot, func, busok; struct resource *bus_res; rman_res_t end, start; int rid; int error, domain; uint8_t busno; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->ap_dev = dev; sc->ap_handle = acpi_get_handle(dev); /* * Don't attach if we're not really there. */ if (!acpi_DeviceIsPresent(dev)) return (ENXIO); acpi_pcib_osc(sc, 0); /* * Get our segment number by evaluating _SEG. * It's OK for this to not exist. */ status = acpi_GetInteger(sc->ap_handle, "_SEG", &sc->ap_segment); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _SEG - %s\n", AcpiFormatException(status)); return_VALUE (ENXIO); } /* If it's not found, assume 0. */ sc->ap_segment = 0; } /* * Get the address (device and function) of the associated * PCI-Host bridge device from _ADR. Assume we don't have one if * it doesn't exist. */ status = acpi_GetInteger(sc->ap_handle, "_ADR", &sc->ap_addr); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) device_printf(dev, "could not evaluate _ADR - %s\n", AcpiFormatException(status)); sc->ap_addr = -1; } /* * Determine which address ranges this bridge decodes and setup * resource managers for those ranges. */ if (pcib_host_res_init(sc->ap_dev, &sc->ap_host_res) != 0) panic("failed to init hostb resources"); if (!acpi_disabled("hostres")) { status = AcpiWalkResources(sc->ap_handle, "_CRS", acpi_pcib_producer_handler, sc); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) device_printf(sc->ap_dev, "failed to parse resources: %s\n", AcpiFormatException(status)); } /* * Get our base bus number by evaluating _BBN. * If this doesn't work, we assume we're bus number 0. * * XXX note that it may also not exist in the case where we are * meant to use a private configuration space mechanism for this bus, * so we should dig out our resources and check to see if we have * anything like that. How do we do this? * XXX If we have the requisite information, and if we don't think the * default PCI configuration space handlers can deal with this bus, * we should attach our own handler. * XXX invoke _REG on this for the PCI config space address space? * XXX It seems many BIOS's with multiple Host-PCI bridges do not set * _BBN correctly. They set _BBN to zero for all bridges. Thus, * if _BBN is zero and PCI bus 0 already exists, we try to read our * bus number from the configuration registers at address _ADR. * We only do this for domain/segment 0 in the hopes that this is * only needed for old single-domain machines. */ status = acpi_GetInteger(sc->ap_handle, "_BBN", &sc->ap_bus); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { device_printf(dev, "could not evaluate _BBN - %s\n", AcpiFormatException(status)); return (ENXIO); } else { /* If it's not found, assume 0. */ sc->ap_bus = 0; } } /* * If this is segment 0, the bus is zero, and PCI bus 0 already * exists, read the bus number via PCI config space. */ busok = 1; if (sc->ap_segment == 0 && sc->ap_bus == 0 && bus0_seen) { busok = 0; if (sc->ap_addr != -1) { /* XXX: We assume bus 0. */ slot = ACPI_ADR_PCI_SLOT(sc->ap_addr); func = ACPI_ADR_PCI_FUNC(sc->ap_addr); if (bootverbose) device_printf(dev, "reading config registers from 0:%d:%d\n", slot, func); if (host_pcib_get_busno(pci_cfgregread, 0, slot, func, &busno) == 0) device_printf(dev, "couldn't read bus number from cfg space\n"); else { sc->ap_bus = busno; busok = 1; } } } /* * If nothing else worked, hope that ACPI at least lays out the * Host-PCI bridges in order and that as a result the next free * bus number is our bus number. */ if (busok == 0) { /* * If we have a region of bus numbers, use the first * number for our bus. */ if (get_decoded_bus_range(sc, &start, &end)) sc->ap_bus = start; else { rid = 0; bus_res = pci_domain_alloc_bus(sc->ap_segment, dev, &rid, 0, PCI_BUSMAX, 1, 0); if (bus_res == NULL) { device_printf(dev, "could not allocate bus number\n"); pcib_host_res_free(dev, &sc->ap_host_res); return (ENXIO); } sc->ap_bus = rman_get_start(bus_res); pci_domain_release_bus(sc->ap_segment, dev, bus_res); } } else { /* * If there is a decoded bus range, assume the bus number is * the first value in the range. Warn if _BBN doesn't match. */ if (get_decoded_bus_range(sc, &start, &end)) { if (sc->ap_bus != start) { device_printf(dev, "WARNING: BIOS configured bus number (%d) is " "not within decoded bus number range " "(%ju - %ju).\n", sc->ap_bus, (uintmax_t)start, (uintmax_t)end); device_printf(dev, "Using range start (%ju) as bus number.\n", (uintmax_t)start); sc->ap_bus = start; } } } /* If this is bus 0 on segment 0, note that it has been seen already. */ if (sc->ap_segment == 0 && sc->ap_bus == 0) bus0_seen = 1; acpi_pcib_fetch_prt(dev, &sc->ap_prt); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag); if (error != 0) goto errout; error = bus_get_domain(dev, &domain); if (error == 0) error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain); /* Don't fail to attach if the domain can't be queried or set. */ error = 0; - bus_generic_probe(dev); + bus_identify_children(dev); if (device_add_child(dev, "pci", -1) == NULL) { bus_dma_tag_destroy(sc->ap_dma_tag); sc->ap_dma_tag = NULL; error = ENXIO; goto errout; } return (bus_generic_attach(dev)); errout: device_printf(device_get_parent(dev), "couldn't attach pci bus\n"); pcib_host_res_free(dev, &sc->ap_host_res); return (error); } /* * Support for standard PCI bridge ivars. */ static int acpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->ap_segment; return (0); case PCIB_IVAR_BUS: *result = sc->ap_bus; return (0); case ACPI_IVAR_HANDLE: *result = (uintptr_t)sc->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)sc->ap_flags; return (0); } return (ENOENT); } static int acpi_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_hpcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->ap_bus = value; return (0); case ACPI_IVAR_HANDLE: sc->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: sc->ap_flags = (int)value; return (0); } return (ENOENT); } static uint32_t acpi_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); return (pci_cfgregread(sc->ap_segment, bus, slot, func, reg, bytes)); } static void acpi_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct acpi_hpcib_softc *sc = device_get_softc(dev); pci_cfgregwrite(sc->ap_segment, bus, slot, func, reg, data, bytes); } static int acpi_pcib_acpi_route_interrupt(device_t pcib, device_t dev, int pin) { struct acpi_hpcib_softc *sc = device_get_softc(pcib); return (acpi_pcib_route_interrupt(pcib, dev, pin, &sc->ap_prt)); } static int acpi_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int acpi_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int acpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { struct acpi_hpcib_softc *sc; device_t bus, hostb; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); sc = device_get_softc(pcib); if (sc->ap_addr == -1) return (0); /* XXX: Assumes all bridges are on bus 0. */ hostb = pci_find_dbsf(sc->ap_segment, 0, ACPI_ADR_PCI_SLOT(sc->ap_addr), ACPI_ADR_PCI_FUNC(sc->ap_addr)); if (hostb != NULL) pci_ht_map_msi(hostb, *addr); return (0); } struct resource * acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct acpi_hpcib_softc *sc; struct resource *res; #if defined(__i386__) || defined(__amd64__) start = hostb_alloc_start(type, start, end, count); #endif sc = device_get_softc(dev); if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end, count, flags)); res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end, count, flags); /* * XXX: If this is a request for a specific range, assume it is * correct and pass it up to the parent. What we probably want to * do long-term is explicitly trust any firmware-configured * resources during the initial bus scan on boot and then disable * this after that. */ if (res == NULL && start + count - 1 == end) res = bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); return (res); } int acpi_pcib_acpi_adjust_resource(device_t dev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ap_segment, child, r, start, end)); return (pcib_host_res_adjust(&sc->ap_host_res, child, r, start, end)); } int acpi_pcib_acpi_release_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_release_bus(sc->ap_segment, child, r)); return (bus_generic_release_resource(dev, child, r)); } int acpi_pcib_acpi_activate_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_activate_bus(sc->ap_segment, child, r)); return (bus_generic_activate_resource(dev, child, r)); } int acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child, struct resource *r) { struct acpi_hpcib_softc *sc; sc = device_get_softc(dev); if (rman_get_type(r) == PCI_RES_BUS) return (pci_domain_deactivate_bus(sc->ap_segment, child, r)); return (bus_generic_deactivate_resource(dev, child, r)); } static int acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature) { uint32_t osc_ctl; struct acpi_hpcib_softc *sc; sc = device_get_softc(pcib); switch (feature) { case PCI_FEATURE_HP: osc_ctl = PCIM_OSC_CTL_PCIE_HP; break; case PCI_FEATURE_AER: osc_ctl = PCIM_OSC_CTL_PCIE_AER; break; default: return (EINVAL); } return (acpi_pcib_osc(sc, osc_ctl)); } static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child) { struct acpi_hpcib_softc *sc; sc = device_get_softc(bus); return (sc->ap_dma_tag); } diff --git a/sys/dev/atkbdc/atkbdc_isa.c b/sys/dev/atkbdc/atkbdc_isa.c index 2f7b9eceda94..f73589cdf2a2 100644 --- a/sys/dev/atkbdc/atkbdc_isa.c +++ b/sys/dev/atkbdc/atkbdc_isa.c @@ -1,321 +1,321 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_kbd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include static int atkbdc_isa_probe(device_t dev); static int atkbdc_isa_attach(device_t dev); static device_t atkbdc_isa_add_child(device_t bus, u_int order, const char *name, int unit); static struct resource *atkbdc_isa_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int atkbdc_isa_release_resource(device_t dev, device_t child, struct resource *r); static device_method_t atkbdc_isa_methods[] = { DEVMETHOD(device_probe, atkbdc_isa_probe), DEVMETHOD(device_attach, atkbdc_isa_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, atkbdc_isa_add_child), DEVMETHOD(bus_print_child, atkbdc_print_child), DEVMETHOD(bus_read_ivar, atkbdc_read_ivar), DEVMETHOD(bus_write_ivar, atkbdc_write_ivar), DEVMETHOD(bus_get_resource_list,atkbdc_get_resource_list), DEVMETHOD(bus_alloc_resource, atkbdc_isa_alloc_resource), DEVMETHOD(bus_release_resource, atkbdc_isa_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t atkbdc_isa_driver = { ATKBDC_DRIVER_NAME, atkbdc_isa_methods, sizeof(atkbdc_softc_t *), }; static struct isa_pnp_id atkbdc_ids[] = { { 0x0303d041, "Keyboard controller (i8042)" }, /* PNP0303 */ { 0x0b03d041, "Keyboard controller (i8042)" }, /* PNP030B */ { 0x2003d041, "Keyboard controller (i8042)" }, /* PNP0320 */ { 0 } }; static int atkbdc_isa_probe(device_t dev) { struct resource *port0; struct resource *port1; rman_res_t start; rman_res_t count; int error; int rid; #if defined(__i386__) || defined(__amd64__) bus_space_tag_t tag; bus_space_handle_t ioh1; volatile int i; register_t flags; #endif /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Some brain-damaged ACPI BIOS has reversed * command/data port and status port. Furthermore, /boot/device.hints * may contain just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources * in correct order. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (start == IO_KBD + KBD_STATUS_PORT) { start = IO_KBD; count++; } if (count > 1) /* adjust the count and/or start port */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } #if defined(__i386__) || defined(__amd64__) /* * Check if we really have AT keyboard controller. Poll status * register until we get "all clear" indication. If no such * indication comes, it probably means that there is no AT * keyboard controller present. Give up in such case. Check relies * on the fact that reading from non-existing in/out port returns * 0xff on i386. May or may not be true on other platforms. */ tag = rman_get_bustag(port0); ioh1 = rman_get_bushandle(port1); flags = intr_disable(); for (i = 0; i != 65535; i++) { if ((bus_space_read_1(tag, ioh1, 0) & 0x2) == 0) break; } intr_restore(flags); if (i == 65535) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); if (bootverbose) device_printf(dev, "AT keyboard controller not found\n"); return ENXIO; } #endif device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; } static int atkbdc_isa_attach(device_t dev) { atkbdc_softc_t *sc; int unit; int error; int rid; unit = device_get_unit(dev); sc = *(atkbdc_softc_t **)device_get_softc(dev); if (sc == NULL) { /* * We have to maintain two copies of the kbdc_softc struct, * as the low-level console needs to have access to the * keyboard controller before kbdc is probed and attached. * kbdc_soft[] contains the default entry for that purpose. * See atkbdc.c. XXX */ sc = atkbdc_get_softc(unit); if (sc == NULL) return ENOMEM; } rid = 0; sc->retry = 5000; sc->port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port0 == NULL) return ENXIO; rid = 1; sc->port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); return ENXIO; } /* * If the device is not created by the PnP BIOS or ACPI, then * the hint for the IRQ is on the child atkbd device, not the * keyboard controller, so this can fail. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); error = atkbdc_attach_unit(unit, sc, sc->port0, sc->port1); if (error) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->port1); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); return error; } *(atkbdc_softc_t **)device_get_softc(dev) = sc; - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return 0; } static device_t atkbdc_isa_add_child(device_t bus, u_int order, const char *name, int unit) { atkbdc_device_t *ivar; atkbdc_softc_t *sc; device_t child; int t; sc = *(atkbdc_softc_t **)device_get_softc(bus); ivar = malloc(sizeof(struct atkbdc_device), M_ATKBDDEV, M_NOWAIT | M_ZERO); if (!ivar) return NULL; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { free(ivar, M_ATKBDDEV); return child; } resource_list_init(&ivar->resources); ivar->rid = order; /* * If the device is not created by the PnP BIOS or ACPI, refer * to device hints for IRQ. We always populate the resource * list entry so we can use a standard bus_get_resource() * method. */ if (order == KBDC_RID_KBD) { if (sc->irq == NULL) { if (resource_int_value(name, unit, "irq", &t) != 0) t = -1; } else t = rman_get_start(sc->irq); if (t > 0) resource_list_add(&ivar->resources, SYS_RES_IRQ, ivar->rid, t, t, 1); } if (resource_disabled(name, unit)) device_disable(child); device_set_ivars(child, ivar); return child; } struct resource * atkbdc_isa_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { atkbdc_softc_t *sc; sc = *(atkbdc_softc_t **)device_get_softc(dev); if (type == SYS_RES_IRQ && *rid == KBDC_RID_KBD && sc->irq != NULL) return (sc->irq); return (bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags)); } static int atkbdc_isa_release_resource(device_t dev, device_t child, struct resource *r) { atkbdc_softc_t *sc; sc = *(atkbdc_softc_t **)device_get_softc(dev); if (r == sc->irq) return (0); return (bus_generic_rl_release_resource(dev, child, r)); } DRIVER_MODULE(atkbdc, isa, atkbdc_isa_driver, 0, 0); DRIVER_MODULE(atkbdc, acpi, atkbdc_isa_driver, 0, 0); ISA_PNP_INFO(atkbdc_ids); diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index a7dd532cc077..2030b287a496 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -1,13332 +1,13328 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_kern_tls.h" #include "opt_ratelimit.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #ifdef KERN_TLS #include #endif #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #ifdef DDB #include #include #endif #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "cudbg/cudbg.h" #include "t4_clip.h" #include "t4_ioctl.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #include "t4_if.h" #include "t4_smt.h" /* T4 bus driver interface */ static int t4_probe(device_t); static int t4_attach(device_t); static int t4_detach(device_t); static int t4_child_location(device_t, device_t, struct sbuf *); static int t4_ready(device_t); static int t4_read_port_device(device_t, int, device_t *); static int t4_suspend(device_t); static int t4_resume(device_t); static int t4_reset_prepare(device_t, device_t); static int t4_reset_post(device_t, device_t); static device_method_t t4_methods[] = { DEVMETHOD(device_probe, t4_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t4_driver = { "t4nex", t4_methods, sizeof(struct adapter) }; /* T4 port (cxgbe) interface */ static int cxgbe_probe(device_t); static int cxgbe_attach(device_t); static int cxgbe_detach(device_t); device_method_t cxgbe_methods[] = { DEVMETHOD(device_probe, cxgbe_probe), DEVMETHOD(device_attach, cxgbe_attach), DEVMETHOD(device_detach, cxgbe_detach), { 0, 0 } }; static driver_t cxgbe_driver = { "cxgbe", cxgbe_methods, sizeof(struct port_info) }; /* T4 VI (vcxgbe) interface */ static int vcxgbe_probe(device_t); static int vcxgbe_attach(device_t); static int vcxgbe_detach(device_t); static device_method_t vcxgbe_methods[] = { DEVMETHOD(device_probe, vcxgbe_probe), DEVMETHOD(device_attach, vcxgbe_attach), DEVMETHOD(device_detach, vcxgbe_detach), { 0, 0 } }; static driver_t vcxgbe_driver = { "vcxgbe", vcxgbe_methods, sizeof(struct vi_info) }; static d_ioctl_t t4_ioctl; static struct cdevsw t4_cdevsw = { .d_version = D_VERSION, .d_ioctl = t4_ioctl, .d_name = "t4nex", }; /* T5 bus driver interface */ static int t5_probe(device_t); static device_method_t t5_methods[] = { DEVMETHOD(device_probe, t5_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t5_driver = { "t5nex", t5_methods, sizeof(struct adapter) }; /* T5 port (cxl) interface */ static driver_t cxl_driver = { "cxl", cxgbe_methods, sizeof(struct port_info) }; /* T5 VI (vcxl) interface */ static driver_t vcxl_driver = { "vcxl", vcxgbe_methods, sizeof(struct vi_info) }; /* T6 bus driver interface */ static int t6_probe(device_t); static device_method_t t6_methods[] = { DEVMETHOD(device_probe, t6_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t6_driver = { "t6nex", t6_methods, sizeof(struct adapter) }; /* T6 port (cc) interface */ static driver_t cc_driver = { "cc", cxgbe_methods, sizeof(struct port_info) }; /* T6 VI (vcc) interface */ static driver_t vcc_driver = { "vcc", vcxgbe_methods, sizeof(struct vi_info) }; /* ifnet interface */ static void cxgbe_init(void *); static int cxgbe_ioctl(if_t, unsigned long, caddr_t); static int cxgbe_transmit(if_t, struct mbuf *); static void cxgbe_qflush(if_t); #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(if_t, union if_snd_tag_alloc_params *, struct m_snd_tag **); #endif MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); /* * Correct lock order when you need to acquire multiple locks is t4_list_lock, * then ADAPTER_LOCK, then t4_uld_list_lock. */ static struct sx t4_list_lock; SLIST_HEAD(, adapter) t4_list; #ifdef TCP_OFFLOAD static struct sx t4_uld_list_lock; struct uld_info *t4_uld_list[ULD_MAX + 1]; #endif /* * Tunables. See tweak_tunables() too. * * Each tunable is set to a default value here if it's known at compile-time. * Otherwise it is set to -n as an indication to tweak_tunables() that it should * provide a reasonable default (upto n) when the driver is loaded. * * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to * T5 are under hw.cxl. */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) parameters"); SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) T5+ parameters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE parameters"); /* * Number of queues for tx and rx, NIC and offload. */ #define NTXQ 16 int t4_ntxq = -NTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0, "Number of TX queues per port"); TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ #define NRXQ 8 int t4_nrxq = -NRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0, "Number of RX queues per port"); TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ #define NTXQ_VI 1 static int t4_ntxq_vi = -NTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0, "Number of TX queues per VI"); #define NRXQ_VI 1 static int t4_nrxq_vi = -NRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0, "Number of RX queues per VI"); static int t4_rsrv_noflowq = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq, 0, "Reserve TX queue 0 of each VI for non-flowid packets"); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) #define NOFLDTXQ 8 static int t4_nofldtxq = -NOFLDTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0, "Number of offload TX queues per port"); #define NOFLDRXQ 2 static int t4_nofldrxq = -NOFLDRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0, "Number of offload RX queues per port"); #define NOFLDTXQ_VI 1 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0, "Number of offload TX queues per VI"); #define NOFLDRXQ_VI 1 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0, "Number of offload RX queues per VI"); #define TMR_IDX_OFLD 1 int t4_tmr_idx_ofld = TMR_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN, &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues"); #define PKTC_IDX_OFLD (-1) int t4_pktc_idx_ofld = PKTC_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN, &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_idle = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN, &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_interval = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN, &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)"); /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ static int t4_toe_keepalive_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN, &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_min = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN, &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_max = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN, &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ static int t4_toe_rexmt_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN, &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort"); /* -1 means chip/fw default, other values are raw backoff values to use */ static int t4_toe_rexmt_backoff[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE retransmit backoff values"); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[0], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[1], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[2], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[3], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[4], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[5], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[6], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[7], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[8], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[9], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[10], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[11], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[12], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[13], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[14], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[15], 0, ""); int t4_ddp_rcvbuf_len = 256 * 1024; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_len, CTLFLAG_RWTUN, &t4_ddp_rcvbuf_len, 0, "length of each DDP RX buffer"); unsigned int t4_ddp_rcvbuf_cache = 4; SYSCTL_UINT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_cache, CTLFLAG_RWTUN, &t4_ddp_rcvbuf_cache, 0, "maximum number of free DDP RX buffers to cache per connection"); #endif #ifdef DEV_NETMAP #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */ #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */ static int t4_native_netmap = NN_EXTRA_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap, 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs"); #define NNMTXQ 8 static int t4_nnmtxq = -NNMTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0, "Number of netmap TX queues"); #define NNMRXQ 8 static int t4_nnmrxq = -NNMRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0, "Number of netmap RX queues"); #define NNMTXQ_VI 2 static int t4_nnmtxq_vi = -NNMTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0, "Number of netmap TX queues per VI"); #define NNMRXQ_VI 2 static int t4_nnmrxq_vi = -NNMRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0, "Number of netmap RX queues per VI"); #endif /* * Holdoff parameters for ports. */ #define TMR_IDX 1 int t4_tmr_idx = TMR_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx, 0, "Holdoff timer index"); TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ #define PKTC_IDX (-1) int t4_pktc_idx = PKTC_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx, 0, "Holdoff packet counter index"); TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ /* * Size (# of entries) of each tx and rx queue. */ unsigned int t4_qsize_txq = TX_EQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0, "Number of descriptors in each TX queue"); unsigned int t4_qsize_rxq = RX_IQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0, "Number of descriptors in each RX queue"); /* * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). */ int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types, 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)"); /* * Configuration file. All the _CF names here are special. */ #define DEFAULT_CF "default" #define BUILTIN_CF "built-in" #define FLASH_CF "flash" #define UWIRE_CF "uwire" #define FPGA_CF "fpga" static char t4_cfg_file[32] = DEFAULT_CF; SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file, sizeof(t4_cfg_file), "Firmware configuration file"); /* * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively). * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water * mark or when signalled to do so, 0 to never emit PAUSE. * pause_autoneg = 1 means PAUSE will be negotiated if possible and the * negotiated settings will override rx_pause/tx_pause. * Otherwise rx_pause/tx_pause are applied forcibly. */ static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN, &t4_pause_settings, 0, "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); /* * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively). * -1 to run with the firmware default. Same as FEC_AUTO (bit 5) * 0 to disable FEC. */ static int t4_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0, "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); /* * Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it * issues to the firmware. If the firmware doesn't support FORCE_FEC then the * driver runs as if this is set to 0. * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay. * 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the * transceiver. Multiple FEC bits may not be okay but will be passed on to * the firmware anyway (may result in l1cfg errors with old firmwares). * 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO * means set all FEC bits that are valid for the speed. */ static int t4_force_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0, "Controls the use of FORCE_FEC bit in L1 configuration."); /* * Link autonegotiation. * -1 to run with the firmware default. * 0 to disable. * 1 to enable. */ static int t4_autoneg = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, "Link autonegotiation"); /* * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, * encouraged respectively). '-n' is the same as 'n' except the firmware * version used in the checks is read from the firmware bundled with the driver. */ static int t4_fw_install = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); /* * ASIC features that will be used. Disable the ones you don't want so that the * chip resources aren't wasted on features that will not be used. */ static int t4_nbmcaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN, &t4_nbmcaps_allowed, 0, "Default NBM capabilities"); static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN, &t4_linkcaps_allowed, 0, "Default link capabilities"); static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | FW_CAPS_CONFIG_SWITCH_EGRESS; SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN, &t4_switchcaps_allowed, 0, "Default switch capabilities"); #ifdef RATELIMIT static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD; #else static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER; #endif SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN, &t4_niccaps_allowed, 0, "Default NIC capabilities"); static int t4_toecaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN, &t4_toecaps_allowed, 0, "Default TCP offload capabilities"); static int t4_rdmacaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN, &t4_rdmacaps_allowed, 0, "Default RDMA capabilities"); static int t4_cryptocaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN, &t4_cryptocaps_allowed, 0, "Default crypto capabilities"); static int t4_iscsicaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN, &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities"); static int t4_fcoecaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN, &t4_fcoecaps_allowed, 0, "Default FCoE capabilities"); static int t5_write_combine = 0; SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine, 0, "Use WC instead of UC for BAR2"); /* From t4_sysctls: doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"} */ static int t4_doorbells_allowed = 0xf; SYSCTL_INT(_hw_cxgbe, OID_AUTO, doorbells_allowed, CTLFLAG_RDTUN, &t4_doorbells_allowed, 0, "Limit tx queues to these doorbells"); static int t4_num_vis = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0, "Number of VIs per port"); /* * PCIe Relaxed Ordering. * -1: driver should figure out a good value. * 0: disable RO. * 1: enable RO. * 2: leave RO alone. */ static int pcie_relaxed_ordering = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, &pcie_relaxed_ordering, 0, "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); static int t4_panic_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN, &t4_panic_on_fatal_err, 0, "panic on fatal errors"); static int t4_reset_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN, &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors"); static int t4_clock_gate_on_suspend = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN, &t4_clock_gate_on_suspend, 0, "gate the clock on suspend"); static int t4_tx_vm_wr = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0, "Use VM work requests to transmit packets."); /* * Set to non-zero to enable the attack filter. A packet that matches any of * these conditions will get dropped on ingress: * 1) IP && source address == destination address. * 2) TCP/IP && source address is not a unicast address. * 3) TCP/IP && destination address is not a unicast address. * 4) IP && source address is loopback (127.x.y.z). * 5) IP && destination address is loopback (127.x.y.z). * 6) IPv6 && source address == destination address. * 7) IPv6 && source address is not a unicast address. * 8) IPv6 && source address is loopback (::1/128). * 9) IPv6 && destination address is loopback (::1/128). * 10) IPv6 && source address is unspecified (::/128). * 11) IPv6 && destination address is unspecified (::/128). * 12) TCP/IPv6 && source address is multicast (ff00::/8). * 13) TCP/IPv6 && destination address is multicast (ff00::/8). */ static int t4_attack_filter = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN, &t4_attack_filter, 0, "Drop suspicious traffic"); static int t4_drop_ip_fragments = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN, &t4_drop_ip_fragments, 0, "Drop IP fragments"); static int t4_drop_pkts_with_l2_errors = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l2_errors, 0, "Drop all frames with Layer 2 length or checksum errors"); static int t4_drop_pkts_with_l3_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l3_errors, 0, "Drop all frames with IP version, length, or checksum errors"); static int t4_drop_pkts_with_l4_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l4_errors, 0, "Drop all frames with Layer 4 length, checksum, or other errors"); #ifdef TCP_OFFLOAD /* * TOE tunables. */ static int t4_cop_managed_offloading = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN, &t4_cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); #endif #ifdef KERN_TLS /* * This enables KERN_TLS for all adapters if set. */ static int t4_kern_tls = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0, "Enable KERN_TLS mode for T6 adapters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) KERN_TLS parameters"); static int t4_tls_inline_keys = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN, &t4_tls_inline_keys, 0, "Always pass TLS keys in work requests (1) or attempt to store TLS keys " "in card memory."); static int t4_tls_combo_wrs = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs, 0, "Attempt to combine TCB field updates with TLS record work requests."); #endif /* Functions used by VIs to obtain unique MAC addresses for each VI. */ static int vi_mac_funcs[] = { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, }; struct intrs_and_queues { uint16_t intr_type; /* INTx, MSI, or MSI-X */ uint16_t num_vis; /* number of VIs for each port */ uint16_t nirq; /* Total # of vectors */ uint16_t ntxq; /* # of NIC txq's for each port */ uint16_t nrxq; /* # of NIC rxq's for each port */ uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */ uint16_t nofldrxq; /* # of TOE rxq's for each port */ uint16_t nnmtxq; /* # of netmap txq's */ uint16_t nnmrxq; /* # of netmap rxq's */ /* The vcxgbe/vcxl interfaces use these and not the ones above. */ uint16_t ntxq_vi; /* # of NIC txq's */ uint16_t nrxq_vi; /* # of NIC rxq's */ uint16_t nofldtxq_vi; /* # of TOE txq's */ uint16_t nofldrxq_vi; /* # of TOE rxq's */ uint16_t nnmtxq_vi; /* # of netmap txq's */ uint16_t nnmrxq_vi; /* # of netmap rxq's */ }; static void setup_memwin(struct adapter *); static void position_memwin(struct adapter *, int, uint32_t); static int validate_mem_range(struct adapter *, uint32_t, uint32_t); static int fwmtype_to_hwmtype(int); static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, uint32_t *); static int fixup_devlog_params(struct adapter *); static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); static int contact_firmware(struct adapter *); static int partition_resources(struct adapter *); static int get_params__pre_init(struct adapter *); static int set_params__pre_init(struct adapter *); static int get_params__post_init(struct adapter *); static int set_params__post_init(struct adapter *); static void t4_set_desc(struct adapter *); static bool fixed_ifmedia(struct port_info *); static void build_medialist(struct port_info *); static void init_link_config(struct port_info *); static int fixup_link_config(struct port_info *); static int apply_link_config(struct port_info *); static int cxgbe_init_synchronized(struct vi_info *); static int cxgbe_uninit_synchronized(struct vi_info *); static int adapter_full_init(struct adapter *); static void adapter_full_uninit(struct adapter *); static int vi_full_init(struct vi_info *); static void vi_full_uninit(struct vi_info *); static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *); static void quiesce_txq(struct sge_txq *); static void quiesce_wrq(struct sge_wrq *); static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *); static void quiesce_vi(struct vi_info *); static int t4_alloc_irq(struct adapter *, struct irq *, int rid, driver_intr_t *, void *, char *); static int t4_free_irq(struct adapter *, struct irq *); static void t4_init_atid_table(struct adapter *); static void t4_free_atid_table(struct adapter *); static void stop_atid_allocator(struct adapter *); static void restart_atid_allocator(struct adapter *); static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); static void vi_refresh_stats(struct vi_info *); static void cxgbe_refresh_stats(struct vi_info *); static void cxgbe_tick(void *); static void vi_tick(void *); static void cxgbe_sysctls(struct port_info *); static int sysctl_int_array(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS); static int sysctl_btphy(SYSCTL_HANDLER_ARGS); static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); static int sysctl_link_fec(SYSCTL_HANDLER_ARGS); static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS); static int sysctl_module_fec(SYSCTL_HANDLER_ARGS); static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); static int sysctl_force_fec(SYSCTL_HANDLER_ARGS); static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); static int sysctl_temperature(SYSCTL_HANDLER_ARGS); static int sysctl_vdd(SYSCTL_HANDLER_ARGS); static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS); static int sysctl_loadavg(SYSCTL_HANDLER_ARGS); static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS); static int sysctl_devlog(SYSCTL_HANDLER_ARGS); static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tids(SYSCTL_HANDLER_ARGS); static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); static int sysctl_cpus(SYSCTL_HANDLER_ARGS); static int sysctl_reset(SYSCTL_HANDLER_ARGS); #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS); static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); #endif static int get_sge_context(struct adapter *, struct t4_sge_context *); static int load_fw(struct adapter *, struct t4_data *); static int load_cfg(struct adapter *, struct t4_data *); static int load_boot(struct adapter *, struct t4_bootrom *); static int load_bootcfg(struct adapter *, struct t4_data *); static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); static void free_offload_policy(struct t4_offload_policy *); static int set_offload_policy(struct adapter *, struct t4_offload_policy *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); static int read_i2c(struct adapter *, struct t4_i2c_data *); static int clear_stats(struct adapter *, u_int); static int hold_clip_addr(struct adapter *, struct t4_clip_addr *); static int release_clip_addr(struct adapter *, struct t4_clip_addr *); static inline int stop_adapter(struct adapter *); static inline void set_adapter_hwstatus(struct adapter *, const bool); static int stop_lld(struct adapter *); static inline int restart_adapter(struct adapter *); static int restart_lld(struct adapter *); #ifdef TCP_OFFLOAD static int deactivate_all_uld(struct adapter *); static void stop_all_uld(struct adapter *); static void restart_all_uld(struct adapter *); #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *, bool); #endif static int mod_event(module_t, int, void *); static int notify_siblings(device_t, int); static uint64_t vi_get_counter(if_t, ift_counter); static uint64_t cxgbe_get_counter(if_t, ift_counter); static void enable_vxlan_rx(struct adapter *); static void reset_adapter_task(void *, int); static void fatal_error_task(void *, int); static void dump_devlog(struct adapter *); static void dump_cim_regs(struct adapter *); static void dump_cimla(struct adapter *); struct { uint16_t device; char *desc; } t4_pciids[] = { {0xa000, "Chelsio Terminator 4 FPGA"}, {0x4400, "Chelsio T440-dbg"}, {0x4401, "Chelsio T420-CR"}, {0x4402, "Chelsio T422-CR"}, {0x4403, "Chelsio T440-CR"}, {0x4404, "Chelsio T420-BCH"}, {0x4405, "Chelsio T440-BCH"}, {0x4406, "Chelsio T440-CH"}, {0x4407, "Chelsio T420-SO"}, {0x4408, "Chelsio T420-CX"}, {0x4409, "Chelsio T420-BT"}, {0x440a, "Chelsio T404-BT"}, {0x440e, "Chelsio T440-LP-CR"}, }, t5_pciids[] = { {0xb000, "Chelsio Terminator 5 FPGA"}, {0x5400, "Chelsio T580-dbg"}, {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */ {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */ {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */ {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */ /* Custom */ {0x5483, "Custom T540-CR"}, {0x5484, "Custom T540-BT"}, }, t6_pciids[] = { {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ {0x6405, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */ {0x6406, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */ {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ {0x6414, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */ {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ /* Custom */ {0x6480, "Custom T6225-CR"}, {0x6481, "Custom T62100-CR"}, {0x6482, "Custom T6225-CR"}, {0x6483, "Custom T62100-CR"}, {0x6484, "Custom T64100-CR"}, {0x6485, "Custom T6240-SO"}, {0x6486, "Custom T6225-SO-CR"}, {0x6487, "Custom T6225-CR"}, }; #ifdef TCP_OFFLOAD /* * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should * be exactly the same for both rxq and ofld_rxq. */ CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); #endif CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); static int t4_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xa000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t4_pciids); i++) { if (d == t4_pciids[i].device) { device_set_desc(dev, t4_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t5_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xb000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t5_pciids); i++) { if (d == t5_pciids[i].device) { device_set_desc(dev, t5_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t6_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); for (i = 0; i < nitems(t6_pciids); i++) { if (d == t6_pciids[i].device) { device_set_desc(dev, t6_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void t5_attribute_workaround(device_t dev) { device_t root_port; uint32_t v; /* * The T5 chips do not properly echo the No Snoop and Relaxed * Ordering attributes when replying to a TLP from a Root * Port. As a workaround, find the parent Root Port and * disable No Snoop and Relaxed Ordering. Note that this * affects all devices under this root port. */ root_port = pci_find_pcie_root_port(dev); if (root_port == NULL) { device_printf(dev, "Unable to find parent root port\n"); return; } v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 0) device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", device_get_nameunit(root_port)); } static const struct devnames devnames[] = { { .nexus_name = "t4nex", .ifnet_name = "cxgbe", .vi_ifnet_name = "vcxgbe", .pf03_drv_name = "t4iov", .vf_nexus_name = "t4vf", .vf_ifnet_name = "cxgbev" }, { .nexus_name = "t5nex", .ifnet_name = "cxl", .vi_ifnet_name = "vcxl", .pf03_drv_name = "t5iov", .vf_nexus_name = "t5vf", .vf_ifnet_name = "cxlv" }, { .nexus_name = "t6nex", .ifnet_name = "cc", .vi_ifnet_name = "vcc", .pf03_drv_name = "t6iov", .vf_nexus_name = "t6vf", .vf_ifnet_name = "ccv" } }; void t4_init_devnames(struct adapter *sc) { int id; id = chip_id(sc); if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) sc->names = &devnames[id - CHELSIO_T4]; else { device_printf(sc->dev, "chip id %d is not supported.\n", id); sc->names = NULL; } } static int t4_ifnet_unit(struct adapter *sc, struct port_info *pi) { const char *parent, *name; long value; int line, unit; line = 0; parent = device_get_nameunit(sc->dev); name = sc->names->ifnet_name; while (resource_find_dev(&line, name, &unit, "at", parent) == 0) { if (resource_long_value(name, unit, "port", &value) == 0 && value == pi->port_id) return (unit); } return (-1); } static void t4_calibration(void *arg) { struct adapter *sc; struct clock_sync *cur, *nex; uint64_t hw; sbintime_t sbt; int next_up; sc = (struct adapter *)arg; KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration")); hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO); sbt = sbinuptime(); cur = &sc->cal_info[sc->cal_current]; next_up = (sc->cal_current + 1) % CNT_CAL_INFO; nex = &sc->cal_info[next_up]; if (__predict_false(sc->cal_count == 0)) { /* First time in, just get the values in */ cur->hw_cur = hw; cur->sbt_cur = sbt; sc->cal_count++; goto done; } if (cur->hw_cur == hw) { /* The clock is not advancing? */ sc->cal_count = 0; atomic_store_rel_int(&cur->gen, 0); goto done; } seqc_write_begin(&nex->gen); nex->hw_prev = cur->hw_cur; nex->sbt_prev = cur->sbt_cur; nex->hw_cur = hw; nex->sbt_cur = sbt; seqc_write_end(&nex->gen); sc->cal_current = next_up; done: callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration, sc, C_DIRECT_EXEC); } static void t4_calibration_start(struct adapter *sc) { /* * Here if we have not done a calibration * then do so otherwise start the appropriate * timer. */ int i; for (i = 0; i < CNT_CAL_INFO; i++) { sc->cal_info[i].gen = 0; } sc->cal_current = 0; sc->cal_count = 0; sc->cal_gen = 0; t4_calibration(sc); } static int t4_attach(device_t dev) { struct adapter *sc; int rc = 0, i, j, rqidx, tqidx, nports; struct make_dev_args mda; struct intrs_and_queues iaq; struct sge *s; uint32_t *buf; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) int ofld_tqidx; #endif #ifdef TCP_OFFLOAD int ofld_rqidx; #endif #ifdef DEV_NETMAP int nm_rqidx, nm_tqidx; #endif int num_vis; sc = device_get_softc(dev); sc->dev = dev; sysctl_ctx_init(&sc->ctx); TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); if ((pci_get_device(dev) & 0xff00) == 0x5400) t5_attribute_workaround(dev); pci_enable_busmaster(dev); if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { uint32_t v; pci_set_max_read_req(dev, 4096); v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (pcie_relaxed_ordering == 0 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } else if (pcie_relaxed_ordering == 1 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { v |= PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } } sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); sc->traceq = -1; mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", device_get_nameunit(dev)); snprintf(sc->lockname, sizeof(sc->lockname), "%s", device_get_nameunit(dev)); mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); t4_add_adapter(sc); mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); TAILQ_INIT(&sc->sfl); callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); sc->policy = NULL; rw_init(&sc->policy_lock, "connection offload policy"); callout_init(&sc->ktls_tick, 1); callout_init(&sc->cal_callout, 1); refcount_init(&sc->vxlan_refcount, 0); TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc); TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc); sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); rc = t4_map_bars_0_and_4(sc); if (rc != 0) goto done; /* error message displayed already */ memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); /* Prepare the adapter for operation. */ buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); rc = -t4_prep_adapter(sc, buf); free(buf, M_CXGBE); if (rc != 0) { device_printf(dev, "failed to prepare adapter: %d.\n", rc); goto done; } /* * This is the real PF# to which we're attaching. Works from within PCI * passthrough environments too, where pci_get_function() could return a * different PF# depending on the passthrough configuration. We need to * use the real PF# in all our communication with the firmware. */ j = t4_read_reg(sc, A_PL_WHOAMI); sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); sc->mbox = sc->pf; t4_init_devnames(sc); if (sc->names == NULL) { rc = ENOTSUP; goto done; /* error message displayed already */ } /* * Do this really early, with the memory windows set up even before the * character device. The userland tool's register i/o and mem read * will work even in "recovery mode". */ setup_memwin(sc); if (t4_init_devlog_params(sc, 0) == 0) fixup_devlog_params(sc); make_dev_args_init(&mda); mda.mda_devsw = &t4_cdevsw; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); if (rc != 0) device_printf(dev, "failed to create nexus char device: %d.\n", rc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { device_printf(dev, "recovery mode.\n"); goto done; } #if defined(__i386__) if ((cpu_feature & CPUID_CX8) == 0) { device_printf(dev, "64 bit atomics not available.\n"); rc = ENOTSUP; goto done; } #endif /* Contact the firmware and try to become the master driver. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); rc = get_params__pre_init(sc); if (rc != 0) goto done; /* error message displayed already */ if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_map_bar_2(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_adj_doorbells(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_create_dma_tag(sc); if (rc != 0) goto done; /* error message displayed already */ /* * First pass over all the ports - allocate VIs and initialize some * basic parameters like mac address, port type, etc. */ for_each_port(sc, i) { struct port_info *pi; pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); sc->port[i] = pi; /* These must be set before t4_port_init */ pi->adapter = sc; pi->port_id = i; /* * XXX: vi[0] is special so we can't delay this allocation until * pi->nvi's final value is known. */ pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, M_ZERO | M_WAITOK); /* * Allocate the "main" VI and initialize parameters * like mac addr. */ rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { device_printf(dev, "unable to initialize port %d: %d\n", i, rc); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); sc->port[i] = NULL; goto done; } if (is_bt(pi->port_type)) setbit(&sc->bt_map, pi->tx_chan); else MPASS(!isset(&sc->bt_map, pi->tx_chan)); snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", device_get_nameunit(dev), i); mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); sc->chan_map[pi->tx_chan] = i; /* * The MPS counter for FCS errors doesn't work correctly on the * T6 so we use the MAC counter here. Which MAC is in use * depends on the link settings which will be known when the * link comes up. */ if (is_t6(sc)) pi->fcs_reg = -1; else { pi->fcs_reg = t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); } pi->fcs_base = 0; /* All VIs on this port share this media. */ ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, cxgbe_media_status); PORT_LOCK(pi); init_link_config(pi); fixup_link_config(pi); build_medialist(pi); if (fixed_ifmedia(pi)) pi->flags |= FIXED_IFMEDIA; PORT_UNLOCK(pi); pi->dev = device_add_child(dev, sc->names->ifnet_name, t4_ifnet_unit(sc, pi)); if (pi->dev == NULL) { device_printf(dev, "failed to add device for port %d.\n", i); rc = ENXIO; goto done; } pi->vi[0].dev = pi->dev; device_set_softc(pi->dev, pi); } /* * Interrupt type, # of interrupts, # of rx/tx queues, etc. */ nports = sc->params.nports; rc = cfg_itype_and_nqueues(sc, &iaq); if (rc != 0) goto done; /* error message displayed already */ num_vis = iaq.num_vis; sc->intr_type = iaq.intr_type; sc->intr_count = iaq.nirq; s = &sc->sge; s->nrxq = nports * iaq.nrxq; s->ntxq = nports * iaq.ntxq; if (num_vis > 1) { s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; } s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ s->neq += nports; /* ctrl queues: 1 per port */ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { s->nofldtxq = nports * iaq.nofldtxq; if (num_vis > 1) s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; s->neq += s->nofldtxq; s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { s->nofldrxq = nports * iaq.nofldrxq; if (num_vis > 1) s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; s->neq += s->nofldrxq; /* free list */ s->niq += s->nofldrxq; s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef DEV_NETMAP s->nnmrxq = 0; s->nnmtxq = 0; if (t4_native_netmap & NN_MAIN_VI) { s->nnmrxq += nports * iaq.nnmrxq; s->nnmtxq += nports * iaq.nnmtxq; } if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) { s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; } s->neq += s->nnmtxq + s->nnmrxq; s->niq += s->nnmrxq; s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), M_CXGBE, M_ZERO | M_WAITOK); #endif MPASS(s->niq <= s->iqmap_sz); MPASS(s->neq <= s->eqmap_sz); s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, M_ZERO | M_WAITOK); s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, M_ZERO | M_WAITOK); s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, M_ZERO | M_WAITOK); s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, M_ZERO | M_WAITOK); sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, M_ZERO | M_WAITOK); t4_init_l2t(sc, M_WAITOK); t4_init_smt(sc, M_WAITOK); t4_init_tx_sched(sc); t4_init_atid_table(sc); #ifdef RATELIMIT t4_init_etid_table(sc); #endif #ifdef INET6 t4_init_clip_table(sc); #endif if (sc->vres.key.size != 0) sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); /* * Second pass over the ports. This time we know the number of rx and * tx queues that each port should get. */ rqidx = tqidx = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) ofld_tqidx = 0; #endif #ifdef TCP_OFFLOAD ofld_rqidx = 0; #endif #ifdef DEV_NETMAP nm_rqidx = nm_tqidx = 0; #endif for_each_port(sc, i) { struct port_info *pi = sc->port[i]; struct vi_info *vi; if (pi == NULL) continue; pi->nvi = num_vis; for_each_vi(pi, j, vi) { vi->pi = pi; vi->adapter = sc; vi->first_intr = -1; vi->qsize_rxq = t4_qsize_rxq; vi->qsize_txq = t4_qsize_txq; vi->first_rxq = rqidx; vi->first_txq = tqidx; vi->tmr_idx = t4_tmr_idx; vi->pktc_idx = t4_pktc_idx; vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; rqidx += vi->nrxq; tqidx += vi->ntxq; if (j == 0 && vi->ntxq > 1) vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; else vi->rsrv_noflowq = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->first_ofld_txq = ofld_tqidx; vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; ofld_tqidx += vi->nofldtxq; #endif #ifdef TCP_OFFLOAD vi->ofld_tmr_idx = t4_tmr_idx_ofld; vi->ofld_pktc_idx = t4_pktc_idx_ofld; vi->first_ofld_rxq = ofld_rqidx; vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; ofld_rqidx += vi->nofldrxq; #endif #ifdef DEV_NETMAP vi->first_nm_rxq = nm_rqidx; vi->first_nm_txq = nm_tqidx; if (j == 0) { vi->nnmrxq = iaq.nnmrxq; vi->nnmtxq = iaq.nnmtxq; } else { vi->nnmrxq = iaq.nnmrxq_vi; vi->nnmtxq = iaq.nnmtxq_vi; } nm_rqidx += vi->nnmrxq; nm_tqidx += vi->nnmtxq; #endif } } rc = t4_setup_intr_handlers(sc); if (rc != 0) { device_printf(dev, "failed to setup interrupt handlers: %d\n", rc); goto done; } - rc = bus_generic_probe(dev); - if (rc != 0) { - device_printf(dev, "failed to probe child drivers: %d\n", rc); - goto done; - } + bus_identify_children(dev); /* * Ensure thread-safe mailbox access (in debug builds). * * So far this was the only thread accessing the mailbox but various * ifnets and sysctls are about to be created and their handlers/ioctls * will access the mailbox from different threads. */ sc->flags |= CHK_MBOX_ACCESS; rc = bus_generic_attach(dev); if (rc != 0) { device_printf(dev, "failed to attach all child ports: %d\n", rc); goto done; } t4_calibration_start(sc); device_printf(dev, "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", sc->params.pci.speed, sc->params.pci.width, sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); t4_set_desc(sc); notify_siblings(dev, 0); done: if (rc != 0 && sc->cdev) { /* cdev was created and so cxgbetool works; recover that way. */ device_printf(dev, "error during attach, adapter is now in recovery mode.\n"); rc = 0; } if (rc != 0) t4_detach_common(dev); else t4_sysctls(sc); return (rc); } static int t4_child_location(device_t bus, device_t dev, struct sbuf *sb) { struct adapter *sc; struct port_info *pi; int i; sc = device_get_softc(bus); for_each_port(sc, i) { pi = sc->port[i]; if (pi != NULL && pi->dev == dev) { sbuf_printf(sb, "port=%d", pi->port_id); break; } } return (0); } static int t4_ready(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); if (sc->flags & FW_OK) return (0); return (ENXIO); } static int t4_read_port_device(device_t dev, int port, device_t *child) { struct adapter *sc; struct port_info *pi; sc = device_get_softc(dev); if (port < 0 || port >= MAX_NPORTS) return (EINVAL); pi = sc->port[port]; if (pi == NULL || pi->dev == NULL) return (ENXIO); *child = pi->dev; return (0); } static int notify_siblings(device_t dev, int detaching) { device_t sibling; int error, i; error = 0; for (i = 0; i < PCI_FUNCMAX; i++) { if (i == pci_get_function(dev)) continue; sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), i); if (sibling == NULL || !device_is_attached(sibling)) continue; if (detaching) error = T4_DETACH_CHILD(sibling); else (void)T4_ATTACH_CHILD(sibling); if (error) break; } return (error); } /* * Idempotent */ static int t4_detach(device_t dev) { int rc; rc = notify_siblings(dev, 1); if (rc) { device_printf(dev, "failed to detach sibling devices: %d\n", rc); return (rc); } return (t4_detach_common(dev)); } int t4_detach_common(device_t dev) { struct adapter *sc; struct port_info *pi; int i, rc; sc = device_get_softc(dev); #ifdef TCP_OFFLOAD rc = deactivate_all_uld(sc); if (rc) { device_printf(dev, "failed to detach upper layer drivers: %d\n", rc); return (rc); } #endif if (sc->cdev) { destroy_dev(sc->cdev); sc->cdev = NULL; } sx_xlock(&t4_list_lock); SLIST_REMOVE(&t4_list, sc, adapter, link); sx_xunlock(&t4_list_lock); sc->flags &= ~CHK_MBOX_ACCESS; if (sc->flags & FULL_INIT_DONE) { if (!(sc->flags & IS_VF)) t4_intr_disable(sc); } if (device_is_attached(dev)) { rc = bus_generic_detach(dev); if (rc) { device_printf(dev, "failed to detach child devices: %d\n", rc); return (rc); } } for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_free_tx_sched(sc); for (i = 0; i < MAX_NPORTS; i++) { pi = sc->port[i]; if (pi) { t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); if (pi->dev) device_delete_child(dev, pi->dev); mtx_destroy(&pi->pi_lock); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); } } callout_stop(&sc->cal_callout); callout_drain(&sc->cal_callout); device_delete_children(dev); sysctl_ctx_free(&sc->ctx); adapter_full_uninit(sc); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_fw_bye(sc, sc->mbox); if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) pci_release_msi(dev); if (sc->regs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); if (sc->udbs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->msix_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, sc->msix_res); if (sc->l2t) t4_free_l2t(sc); if (sc->smt) t4_free_smt(sc->smt); t4_free_atid_table(sc); #ifdef RATELIMIT t4_free_etid_table(sc); #endif if (sc->key_map) vmem_destroy(sc->key_map); #ifdef INET6 t4_destroy_clip_table(sc); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) free(sc->sge.ofld_txq, M_CXGBE); #endif #ifdef TCP_OFFLOAD free(sc->sge.ofld_rxq, M_CXGBE); #endif #ifdef DEV_NETMAP free(sc->sge.nm_rxq, M_CXGBE); free(sc->sge.nm_txq, M_CXGBE); #endif free(sc->irq, M_CXGBE); free(sc->sge.rxq, M_CXGBE); free(sc->sge.txq, M_CXGBE); free(sc->sge.ctrlq, M_CXGBE); free(sc->sge.iqmap, M_CXGBE); free(sc->sge.eqmap, M_CXGBE); free(sc->tids.ftid_tab, M_CXGBE); free(sc->tids.hpftid_tab, M_CXGBE); free_hftid_hash(&sc->tids); free(sc->tids.tid_tab, M_CXGBE); t4_destroy_dma_tag(sc); callout_drain(&sc->ktls_tick); callout_drain(&sc->sfl_callout); if (mtx_initialized(&sc->tids.ftid_lock)) { mtx_destroy(&sc->tids.ftid_lock); cv_destroy(&sc->tids.ftid_cv); } if (mtx_initialized(&sc->tids.atid_lock)) mtx_destroy(&sc->tids.atid_lock); if (mtx_initialized(&sc->ifp_lock)) mtx_destroy(&sc->ifp_lock); if (rw_initialized(&sc->policy_lock)) { rw_destroy(&sc->policy_lock); #ifdef TCP_OFFLOAD if (sc->policy != NULL) free_offload_policy(sc->policy); #endif } for (i = 0; i < NUM_MEMWIN; i++) { struct memwin *mw = &sc->memwin[i]; if (rw_initialized(&mw->mw_lock)) rw_destroy(&mw->mw_lock); } mtx_destroy(&sc->sfl_lock); mtx_destroy(&sc->reg_lock); mtx_destroy(&sc->sc_lock); bzero(sc, sizeof(*sc)); return (0); } static inline int stop_adapter(struct adapter *sc) { struct port_info *pi; int i; if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", __func__, curthread, sc->flags, sc->error_flags); return (EALREADY); } CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, sc->flags, sc->error_flags); t4_shutdown_adapter(sc); for_each_port(sc, i) { pi = sc->port[i]; PORT_LOCK(pi); if (pi->up_vis > 0 && pi->link_cfg.link_ok) { /* * t4_shutdown_adapter has already shut down all the * PHYs but it also disables interrupts and DMA so there * won't be a link interrupt. Update the state manually * if the link was up previously and inform the kernel. */ pi->link_cfg.link_ok = false; t4_os_link_changed(pi); } PORT_UNLOCK(pi); } return (0); } static inline int restart_adapter(struct adapter *sc) { uint32_t val; if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) { CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n", __func__, curthread, sc->flags, sc->error_flags); return (EALREADY); } CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread, sc->flags, sc->error_flags); MPASS(hw_off_limits(sc)); MPASS((sc->flags & FW_OK) == 0); MPASS((sc->flags & MASTER_PF) == 0); MPASS(sc->reset_thread == NULL); /* * The adapter is supposed to be back on PCIE with its config space and * BARs restored to their state before reset. Register access via * t4_read_reg BAR0 should just work. */ sc->reset_thread = curthread; val = t4_read_reg(sc, A_PL_WHOAMI); if (val == 0xffffffff || val == 0xeeeeeeee) { CH_ERR(sc, "%s: device registers not readable.\n", __func__); sc->reset_thread = NULL; atomic_set_int(&sc->error_flags, ADAP_STOPPED); return (ENXIO); } atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR); atomic_add_int(&sc->incarnation, 1); atomic_add_int(&sc->num_resets, 1); return (0); } static inline void set_adapter_hwstatus(struct adapter *sc, const bool usable) { if (usable) { /* Must be marked reusable by the designated thread. */ ASSERT_SYNCHRONIZED_OP(sc); MPASS(sc->reset_thread == curthread); mtx_lock(&sc->reg_lock); atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS); mtx_unlock(&sc->reg_lock); } else { /* Mark the adapter totally off limits. */ begin_synchronized_op(sc, NULL, SLEEP_OK, "t4hwsts"); mtx_lock(&sc->reg_lock); atomic_set_int(&sc->error_flags, HW_OFF_LIMITS); mtx_unlock(&sc->reg_lock); sc->flags &= ~(FW_OK | MASTER_PF); sc->reset_thread = NULL; end_synchronized_op(sc, 0); } } static int stop_lld(struct adapter *sc) { struct port_info *pi; struct vi_info *vi; if_t ifp; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif int rc, i, j, k; /* * XXX: Can there be a synch_op in progress that will hang because * hardware has been stopped? We'll hang too and the solution will be * to use a version of begin_synch_op that wakes up existing synch_op * with errors. Maybe stop_adapter should do this wakeup? * * I don't think any synch_op could get stranded waiting for DMA or * interrupt so I think we're okay here. Remove this comment block * after testing. */ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4slld"); if (rc != 0) return (ENXIO); /* Quiesce all activity. */ for_each_port(sc, i) { pi = sc->port[i]; pi->vxlan_tcam_entry = false; for_each_vi(pi, j, vi) { vi->xact_addr_filt = -1; mtx_lock(&vi->tick_mtx); vi->flags |= VI_SKIP_STATS; mtx_unlock(&vi->tick_mtx); if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { mtx_lock(&vi->tick_mtx); callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); callout_drain(&vi->tick); } /* * Note that the HW is not available. */ for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED); TXQ_UNLOCK(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, k, ofld_txq) { TXQ_LOCK(&ofld_txq->wrq); ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED; TXQ_UNLOCK(&ofld_txq->wrq); } #endif for_each_rxq(vi, k, rxq) { rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #if defined(TCP_OFFLOAD) for_each_ofld_rxq(vi, k, ofld_rxq) { ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #endif quiesce_vi(vi); } if (sc->flags & FULL_INIT_DONE) { /* Control queue */ wrq = &sc->sge.ctrlq[i]; TXQ_LOCK(wrq); wrq->eq.flags &= ~EQ_HW_ALLOCATED; TXQ_UNLOCK(wrq); quiesce_wrq(wrq); } if (pi->flags & HAS_TRACEQ) { pi->flags &= ~HAS_TRACEQ; sc->traceq = -1; sc->tracer_valid = 0; sc->tracer_enabled = 0; } } if (sc->flags & FULL_INIT_DONE) { /* Firmware event queue */ sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED; quiesce_iq_fl(sc, &sc->sge.fwq, NULL); } /* Stop calibration */ callout_stop(&sc->cal_callout); callout_drain(&sc->cal_callout); if (t4_clock_gate_on_suspend) { t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN | F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN | F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0); } end_synchronized_op(sc, 0); stop_atid_allocator(sc); t4_stop_l2t(sc); return (rc); } int suspend_adapter(struct adapter *sc) { stop_adapter(sc); stop_lld(sc); #ifdef TCP_OFFLOAD stop_all_uld(sc); #endif set_adapter_hwstatus(sc, false); return (0); } static int t4_suspend(device_t dev) { struct adapter *sc = device_get_softc(dev); int rc; CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); rc = suspend_adapter(sc); CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread); return (rc); } struct adapter_pre_reset_state { u_int flags; uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; u_int cfcsum; char cfg_file[32]; struct adapter_params params; struct t4_virt_res vres; struct tid_info tids; struct sge sge; int rawf_base; int nrawf; }; static void save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { ASSERT_SYNCHRONIZED_OP(sc); o->flags = sc->flags; o->nbmcaps = sc->nbmcaps; o->linkcaps = sc->linkcaps; o->switchcaps = sc->switchcaps; o->niccaps = sc->niccaps; o->toecaps = sc->toecaps; o->rdmacaps = sc->rdmacaps; o->cryptocaps = sc->cryptocaps; o->iscsicaps = sc->iscsicaps; o->fcoecaps = sc->fcoecaps; o->cfcsum = sc->cfcsum; MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file)); memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file)); o->params = sc->params; o->vres = sc->vres; o->tids = sc->tids; o->sge = sc->sge; o->rawf_base = sc->rawf_base; o->nrawf = sc->nrawf; } static int compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { int rc = 0; ASSERT_SYNCHRONIZED_OP(sc); /* Capabilities */ #define COMPARE_CAPS(c) do { \ if (o->c##caps != sc->c##caps) { \ CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \ sc->c##caps); \ rc = EINVAL; \ } \ } while (0) COMPARE_CAPS(nbm); COMPARE_CAPS(link); COMPARE_CAPS(switch); COMPARE_CAPS(nic); COMPARE_CAPS(toe); COMPARE_CAPS(rdma); COMPARE_CAPS(crypto); COMPARE_CAPS(iscsi); COMPARE_CAPS(fcoe); #undef COMPARE_CAPS /* Firmware config file */ if (o->cfcsum != sc->cfcsum) { CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file, o->cfcsum, sc->cfg_file, sc->cfcsum); rc = EINVAL; } #define COMPARE_PARAM(p, name) do { \ if (o->p != sc->p) { \ CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \ rc = EINVAL; \ } \ } while (0) COMPARE_PARAM(sge.iq_start, iq_start); COMPARE_PARAM(sge.eq_start, eq_start); COMPARE_PARAM(tids.ftid_base, ftid_base); COMPARE_PARAM(tids.ftid_end, ftid_end); COMPARE_PARAM(tids.nftids, nftids); COMPARE_PARAM(vres.l2t.start, l2t_start); COMPARE_PARAM(vres.l2t.size, l2t_size); COMPARE_PARAM(sge.iqmap_sz, iqmap_sz); COMPARE_PARAM(sge.eqmap_sz, eqmap_sz); COMPARE_PARAM(tids.tid_base, tid_base); COMPARE_PARAM(tids.hpftid_base, hpftid_base); COMPARE_PARAM(tids.hpftid_end, hpftid_end); COMPARE_PARAM(tids.nhpftids, nhpftids); COMPARE_PARAM(rawf_base, rawf_base); COMPARE_PARAM(nrawf, nrawf); COMPARE_PARAM(params.mps_bg_map, mps_bg_map); COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support); COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl); COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support); COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr); COMPARE_PARAM(tids.ntids, ntids); COMPARE_PARAM(tids.etid_base, etid_base); COMPARE_PARAM(tids.etid_end, etid_end); COMPARE_PARAM(tids.netids, netids); COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred); COMPARE_PARAM(params.ethoffload, ethoffload); COMPARE_PARAM(tids.natids, natids); COMPARE_PARAM(tids.stid_base, stid_base); COMPARE_PARAM(vres.ddp.start, ddp_start); COMPARE_PARAM(vres.ddp.size, ddp_size); COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred); COMPARE_PARAM(vres.stag.start, stag_start); COMPARE_PARAM(vres.stag.size, stag_size); COMPARE_PARAM(vres.rq.start, rq_start); COMPARE_PARAM(vres.rq.size, rq_size); COMPARE_PARAM(vres.pbl.start, pbl_start); COMPARE_PARAM(vres.pbl.size, pbl_size); COMPARE_PARAM(vres.qp.start, qp_start); COMPARE_PARAM(vres.qp.size, qp_size); COMPARE_PARAM(vres.cq.start, cq_start); COMPARE_PARAM(vres.cq.size, cq_size); COMPARE_PARAM(vres.ocq.start, ocq_start); COMPARE_PARAM(vres.ocq.size, ocq_size); COMPARE_PARAM(vres.srq.start, srq_start); COMPARE_PARAM(vres.srq.size, srq_size); COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp); COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter); COMPARE_PARAM(vres.iscsi.start, iscsi_start); COMPARE_PARAM(vres.iscsi.size, iscsi_size); COMPARE_PARAM(vres.key.start, key_start); COMPARE_PARAM(vres.key.size, key_size); #undef COMPARE_PARAM return (rc); } static int restart_lld(struct adapter *sc) { struct adapter_pre_reset_state *old_state = NULL; struct port_info *pi; struct vi_info *vi; if_t ifp; struct sge_txq *txq; int rc, i, j, k; rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rlld"); if (rc != 0) return (ENXIO); /* Restore memory window. */ setup_memwin(sc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { CH_ALERT(sc, "%s: recovery mode during restart.\n", __func__); rc = 0; set_adapter_hwstatus(sc, true); goto done; } old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK); save_caps_and_params(sc, old_state); /* Reestablish contact with firmware and become the primary PF. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = compare_caps_and_params(sc, old_state); if (rc != 0) goto done; /* error message displayed already */ for_each_port(sc, i) { pi = sc->port[i]; MPASS(pi != NULL); MPASS(pi->vi != NULL); MPASS(pi->vi[0].dev == pi->dev); rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { CH_ERR(sc, "failed to re-initialize port %d: %d\n", i, rc); goto done; } MPASS(sc->chan_map[pi->tx_chan] == i); PORT_LOCK(pi); fixup_link_config(pi); build_medialist(pi); PORT_UNLOCK(pi); for_each_vi(pi, j, vi) { if (IS_MAIN_VI(vi)) continue; rc = alloc_extra_vi(sc, pi, vi); if (rc != 0) { CH_ERR(vi, "failed to re-allocate extra VI: %d\n", rc); goto done; } } } /* * Interrupts and queues are about to be enabled and other threads will * want to access the hardware too. It is safe to do so. Note that * this thread is still in the middle of a synchronized_op. */ set_adapter_hwstatus(sc, true); if (sc->flags & FULL_INIT_DONE) { rc = adapter_full_init(sc); if (rc != 0) { CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc); goto done; } if (sc->vxlan_refcount > 0) enable_vxlan_rx(sc); for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { mtx_lock(&vi->tick_mtx); vi->flags &= ~VI_SKIP_STATS; mtx_unlock(&vi->tick_mtx); if (!(vi->flags & VI_INIT_DONE)) continue; rc = vi_full_init(vi); if (rc != 0) { CH_ERR(vi, "failed to re-initialize " "interface: %d\n", rc); goto done; } if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } ifp = vi->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) continue; /* * Note that we do not setup multicast addresses * in the first pass. This ensures that the * unicast DMACs for all VIs on all ports get an * MPS TCAM entry. */ rc = update_mac_settings(ifp, XGMAC_ALL & ~XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MAC: %d\n", rc); goto done; } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { CH_ERR(vi, "failed to re-enable VI: %d\n", rc); goto done; } for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); callout_schedule(&vi->tick, hz); mtx_unlock(&vi->tick_mtx); } PORT_LOCK(pi); if (pi->up_vis > 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); } PORT_UNLOCK(pi); } /* Now reprogram the L2 multicast addresses. */ for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) continue; rc = update_mac_settings(ifp, XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc); rc = 0; /* carry on */ } } } } /* Reset all calibration */ t4_calibration_start(sc); done: end_synchronized_op(sc, 0); free(old_state, M_CXGBE); restart_atid_allocator(sc); t4_restart_l2t(sc); return (rc); } int resume_adapter(struct adapter *sc) { restart_adapter(sc); restart_lld(sc); #ifdef TCP_OFFLOAD restart_all_uld(sc); #endif return (0); } static int t4_resume(device_t dev) { struct adapter *sc = device_get_softc(dev); int rc; CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); rc = resume_adapter(sc); CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread); return (rc); } static int t4_reset_prepare(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); return (0); } static int t4_reset_post(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread); return (0); } static int reset_adapter_with_pci_bus_reset(struct adapter *sc) { int rc; mtx_lock(&Giant); rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0); mtx_unlock(&Giant); return (rc); } static int reset_adapter_with_pl_rst(struct adapter *sc) { suspend_adapter(sc); /* This is a t4_write_reg without the hw_off_limits check. */ MPASS(sc->error_flags & HW_OFF_LIMITS); bus_space_write_4(sc->bt, sc->bh, A_PL_RST, F_PIORSTMODE | F_PIORST | F_AUTOPCIEPAUSE); pause("pl_rst", 1 * hz); /* Wait 1s for reset */ resume_adapter(sc); return (0); } static inline int reset_adapter(struct adapter *sc) { if (vm_guest == 0) return (reset_adapter_with_pci_bus_reset(sc)); else return (reset_adapter_with_pl_rst(sc)); } static void reset_adapter_task(void *arg, int pending) { struct adapter *sc = arg; const int flags = sc->flags; const int eflags = sc->error_flags; int rc; if (pending > 1) CH_ALERT(sc, "%s: pending %d\n", __func__, pending); rc = reset_adapter(sc); if (rc != 0) { CH_ERR(sc, "adapter did not reset properly, rc = %d, " "flags 0x%08x -> 0x%08x, err_flags 0x%08x -> 0x%08x.\n", rc, flags, sc->flags, eflags, sc->error_flags); } } static int cxgbe_probe(device_t dev) { struct port_info *pi = device_get_softc(dev); device_set_descf(dev, "port %d", pi->port_id); return (BUS_PROBE_DEFAULT); } #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \ IFCAP_HWRXTSTMP | IFCAP_MEXTPG) #define T4_CAP_ENABLE (T4_CAP) static void cxgbe_vi_attach(device_t dev, struct vi_info *vi) { if_t ifp; struct sbuf *sb; struct sysctl_ctx_list *ctx = &vi->ctx; struct sysctl_oid_list *children; struct pfil_head_args pa; struct adapter *sc = vi->adapter; sysctl_ctx_init(ctx); children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev)); vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues"); vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues"); #ifdef DEV_NETMAP vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues"); vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues"); #endif #ifdef TCP_OFFLOAD vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues"); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues"); #endif vi->xact_addr_filt = -1; mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF); callout_init_mtx(&vi->tick, &vi->tick_mtx, 0); if (sc->flags & IS_VF || t4_tx_vm_wr != 0) vi->flags |= TX_USES_VM_WR; /* Allocate an ifnet and set it up */ ifp = if_alloc_dev(IFT_ETHER, dev); vi->ifp = ifp; if_setsoftc(ifp, vi); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, cxgbe_init); if_setioctlfn(ifp, cxgbe_ioctl); if_settransmitfn(ifp, cxgbe_transmit); if_setqflushfn(ifp, cxgbe_qflush); if (vi->pi->nvi > 1 || sc->flags & IS_VF) if_setgetcounterfn(ifp, vi_get_counter); else if_setgetcounterfn(ifp, cxgbe_get_counter); #if defined(KERN_TLS) || defined(RATELIMIT) if_setsndtagallocfn(ifp, cxgbe_snd_tag_alloc); #endif #ifdef RATELIMIT if_setratelimitqueryfn(ifp, cxgbe_ratelimit_query); #endif if_setcapabilities(ifp, T4_CAP); if_setcapenable(ifp, T4_CAP_ENABLE); if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (chip_id(sc) >= CHELSIO_T6) { if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0); if_setcapenablebit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0); if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN, 0); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) if_setcapabilitiesbit(ifp, IFCAP_TOE, 0); #endif #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) { if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0); if_setcapenablebit(ifp, IFCAP_TXRTLMT, 0); } #endif if_sethwtsomax(ifp, IP_MAXPACKET); if (vi->flags & TX_USES_VM_WR) if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_VM_TSO); else if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_TSO); #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_EO_TSO); #endif if_sethwtsomaxsegsize(ifp, 65536); #ifdef KERN_TLS if (is_ktls(sc)) { if_setcapabilitiesbit(ifp, IFCAP_TXTLS, 0); if (sc->flags & KERN_TLS_ON || !is_t6(sc)) if_setcapenablebit(ifp, IFCAP_TXTLS, 0); } #endif ether_ifattach(ifp, vi->hw_addr); #ifdef DEV_NETMAP if (vi->nnmrxq != 0) cxgbe_nm_attach(vi); #endif sb = sbuf_new_auto(); sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) switch (if_getcapabilities(ifp) & (IFCAP_TOE | IFCAP_TXRTLMT)) { case IFCAP_TOE: sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); break; case IFCAP_TOE | IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); break; case IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); break; } #endif #ifdef TCP_OFFLOAD if (if_getcapabilities(ifp) & IFCAP_TOE) sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); #endif #ifdef DEV_NETMAP if (if_getcapabilities(ifp) & IFCAP_NETMAP) sbuf_printf(sb, "; %d txq, %d rxq (netmap)", vi->nnmtxq, vi->nnmrxq); #endif sbuf_finish(sb); device_printf(dev, "%s\n", sbuf_data(sb)); sbuf_delete(sb); vi_sysctls(vi); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = if_name(ifp); vi->pfil = pfil_head_register(&pa); } static int cxgbe_attach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; struct vi_info *vi; int i; sysctl_ctx_init(&pi->ctx); cxgbe_vi_attach(dev, &pi->vi[0]); for_each_vi(pi, i, vi) { if (i == 0) continue; vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, DEVICE_UNIT_ANY); if (vi->dev == NULL) { device_printf(dev, "failed to add VI %d\n", i); continue; } device_set_softc(vi->dev, vi); } cxgbe_sysctls(pi); bus_generic_attach(dev); return (0); } static void cxgbe_vi_detach(struct vi_info *vi) { if_t ifp = vi->ifp; if (vi->pfil != NULL) { pfil_head_unregister(vi->pfil); vi->pfil = NULL; } ether_ifdetach(ifp); /* Let detach proceed even if these fail. */ #ifdef DEV_NETMAP if (if_getcapabilities(ifp) & IFCAP_NETMAP) cxgbe_nm_detach(vi); #endif cxgbe_uninit_synchronized(vi); callout_drain(&vi->tick); mtx_destroy(&vi->tick_mtx); sysctl_ctx_free(&vi->ctx); vi_full_uninit(vi); if_free(vi->ifp); vi->ifp = NULL; } static int cxgbe_detach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; int rc; /* Detach the extra VIs first. */ rc = bus_generic_detach(dev); if (rc) return (rc); device_delete_children(dev); sysctl_ctx_free(&pi->ctx); begin_vi_detach(sc, &pi->vi[0]); if (pi->flags & HAS_TRACEQ) { sc->traceq = -1; /* cloner should not create ifnet */ t4_tracer_port_detach(sc); } cxgbe_vi_detach(&pi->vi[0]); ifmedia_removeall(&pi->media); end_vi_detach(sc, &pi->vi[0]); return (0); } static void cxgbe_init(void *arg) { struct vi_info *vi = arg; struct adapter *sc = vi->adapter; if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) return; cxgbe_init_synchronized(vi); end_synchronized_op(sc, 0); } static int cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data) { int rc = 0, mtu, flags; struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifreq *ifr = (struct ifreq *)data; uint32_t mask; switch (cmd) { case SIOCSIFMTU: mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > MAX_MTU) return (EINVAL); rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); if (rc) return (rc); if_setmtu(ifp, mtu); if (vi->flags & VI_INIT_DONE) { t4_update_fl_bufsize(ifp); if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MTU); } end_synchronized_op(sc, 0); break; case SIOCSIFFLAGS: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto fail; } if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = vi->if_flags; if ((if_getflags(ifp) ^ flags) & (IFF_PROMISC | IFF_ALLMULTI)) { rc = update_mac_settings(ifp, XGMAC_PROMISC | XGMAC_ALLMULTI); } } else { rc = cxgbe_init_synchronized(vi); } vi->if_flags = if_getflags(ifp); } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { rc = cxgbe_uninit_synchronized(vi); } end_synchronized_op(sc, 0); break; case SIOCADDMULTI: case SIOCDELMULTI: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi"); if (rc) return (rc); if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MCADDRS); end_synchronized_op(sc, 0); break; case SIOCSIFCAP: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); if (rc) return (rc); mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & if_getcapenable(ifp) && !(IFCAP_TXCSUM & if_getcapenable(ifp))) { mask &= ~IFCAP_TSO4; if_setcapenablebit(ifp, 0, IFCAP_TSO4); if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & if_getcapenable(ifp) && !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { mask &= ~IFCAP_TSO6; if_setcapenablebit(ifp, 0, IFCAP_TSO6); if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if (mask & IFCAP_RXCSUM_IPV6) if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & if_getcapenable(ifp)) && !(IFCAP_TXCSUM & if_getcapenable(ifp))) { if_printf(ifp, "enable txcsum first.\n"); rc = EAGAIN; goto fail; } if_togglecapenable(ifp, IFCAP_TSO4); } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & if_getcapenable(ifp)) && !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) { if_printf(ifp, "enable txcsum6 first.\n"); rc = EAGAIN; goto fail; } if_togglecapenable(ifp, IFCAP_TSO6); } if (mask & IFCAP_LRO) { #if defined(INET) || defined(INET6) int i; struct sge_rxq *rxq; if_togglecapenable(ifp, IFCAP_LRO); for_each_rxq(vi, i, rxq) { if (if_getcapenable(ifp) & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; else rxq->iq.flags &= ~IQ_LRO_ENABLED; } #endif } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE) { int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE; rc = toe_capability(vi, enable); if (rc != 0) goto fail; if_togglecapenable(ifp, mask); } #endif if (mask & IFCAP_VLAN_HWTAGGING) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_VLANEX); } if (mask & IFCAP_VLAN_MTU) { if_togglecapenable(ifp, IFCAP_VLAN_MTU); /* Need to find out how to disable auto-mtu-inflation */ } if (mask & IFCAP_VLAN_HWTSO) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if (mask & IFCAP_VLAN_HWCSUM) if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); #ifdef RATELIMIT if (mask & IFCAP_TXRTLMT) if_togglecapenable(ifp, IFCAP_TXRTLMT); #endif if (mask & IFCAP_HWRXTSTMP) { int i; struct sge_rxq *rxq; if_togglecapenable(ifp, IFCAP_HWRXTSTMP); for_each_rxq(vi, i, rxq) { if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) rxq->iq.flags |= IQ_RX_TIMESTAMP; else rxq->iq.flags &= ~IQ_RX_TIMESTAMP; } } if (mask & IFCAP_MEXTPG) if_togglecapenable(ifp, IFCAP_MEXTPG); #ifdef KERN_TLS if (mask & IFCAP_TXTLS) { int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TXTLS; rc = ktls_capability(sc, enable); if (rc != 0) goto fail; if_togglecapenable(ifp, mask & IFCAP_TXTLS); } #endif if (mask & IFCAP_VXLAN_HWCSUM) { if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM); if_togglehwassist(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP); } if (mask & IFCAP_VXLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO); if_togglehwassist(ifp, CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO); } #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif fail: end_synchronized_op(sc, 0); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd); break; case SIOCGI2C: { struct ifi2creq i2c; rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (rc != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { rc = EPERM; break; } if (i2c.len > sizeof(i2c.data)) { rc = EINVAL; break; } rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, i2c.offset, i2c.len, &i2c.data[0]); end_synchronized_op(sc, 0); if (rc == 0) rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } default: rc = ether_ioctl(ifp, cmd, data); } return (rc); } static int cxgbe_transmit(if_t ifp, struct mbuf *m) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc; struct sge_txq *txq; void *items[1]; int rc; M_ASSERTPKTHDR(m); MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ #if defined(KERN_TLS) || defined(RATELIMIT) if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) MPASS(m->m_pkthdr.snd_tag->ifp == ifp); #endif if (__predict_false(pi->link_cfg.link_ok == false)) { m_freem(m); return (ENETDOWN); } rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); if (__predict_false(rc != 0)) { if (__predict_true(rc == EINPROGRESS)) { /* queued by parse_pkt */ MPASS(m != NULL); return (0); } MPASS(m == NULL); /* was freed already */ atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ return (rc); } /* Select a txq. */ sc = vi->adapter; txq = &sc->sge.txq[vi->first_txq]; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + vi->rsrv_noflowq); items[0] = m; rc = mp_ring_enqueue(txq->r, items, 1, 256); if (__predict_false(rc != 0)) m_freem(m); return (rc); } static void cxgbe_qflush(if_t ifp) { struct vi_info *vi = if_getsoftc(ifp); struct sge_txq *txq; int i; /* queues do not exist if !VI_INIT_DONE. */ if (vi->flags & VI_INIT_DONE) { for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_QFLUSH; TXQ_UNLOCK(txq); while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("qflush", 1); } TXQ_LOCK(txq); txq->eq.flags &= ~EQ_QFLUSH; TXQ_UNLOCK(txq); } } if_qflush(ifp); } static uint64_t vi_get_counter(if_t ifp, ift_counter c) { struct vi_info *vi = if_getsoftc(ifp); struct fw_vi_stats_vf *s = &vi->stats; mtx_lock(&vi->tick_mtx); vi_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_bcast_frames + s->rx_mcast_frames + s->rx_ucast_frames); case IFCOUNTER_IERRORS: return (s->rx_err_frames); case IFCOUNTER_OPACKETS: return (s->tx_bcast_frames + s->tx_mcast_frames + s->tx_ucast_frames + s->tx_offload_frames); case IFCOUNTER_OERRORS: return (s->tx_drop_frames); case IFCOUNTER_IBYTES: return (s->rx_bcast_bytes + s->rx_mcast_bytes + s->rx_ucast_bytes); case IFCOUNTER_OBYTES: return (s->tx_bcast_bytes + s->tx_mcast_bytes + s->tx_ucast_bytes + s->tx_offload_bytes); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = 0; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } static uint64_t cxgbe_get_counter(if_t ifp, ift_counter c) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct port_stats *s = &pi->stats; mtx_lock(&vi->tick_mtx); cxgbe_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_frames); case IFCOUNTER_IERRORS: return (s->rx_jabber + s->rx_runt + s->rx_too_long + s->rx_fcs_err + s->rx_len_err); case IFCOUNTER_OPACKETS: return (s->tx_frames); case IFCOUNTER_OERRORS: return (s->tx_error_frames); case IFCOUNTER_IBYTES: return (s->rx_octets); case IFCOUNTER_OBYTES: return (s->tx_octets); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_IQDROPS: return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + s->rx_trunc3 + pi->tnl_cong_drops); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = s->tx_drop; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt) { int error; switch (params->hdr.type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: error = cxgbe_rate_tag_alloc(ifp, params, pt); break; #endif #ifdef KERN_TLS case IF_SND_TAG_TYPE_TLS: { struct vi_info *vi = if_getsoftc(ifp); if (is_t6(vi->pi->adapter)) error = t6_tls_tag_alloc(ifp, params, pt); else error = EOPNOTSUPP; break; } #endif default: error = EOPNOTSUPP; } return (error); } #endif /* * The kernel picks a media from the list we had provided but we still validate * the requeste. */ int cxgbe_media_change(if_t ifp) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct ifmedia *ifm = &pi->media; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); if (rc != 0) return (rc); PORT_LOCK(pi); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { /* ifconfig .. media autoselect */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; /* AN not supported by transceiver */ goto done; } lc->requested_aneg = AUTONEG_ENABLE; lc->requested_speed = 0; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->requested_aneg = AUTONEG_DISABLE; lc->requested_speed = ifmedia_baudrate(ifm->ifm_media) / 1000000; lc->requested_fc = 0; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) lc->requested_fc |= PAUSE_RX; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) lc->requested_fc |= PAUSE_TX; } if (pi->up_vis > 0 && !hw_off_limits(sc)) { fixup_link_config(pi); rc = apply_link_config(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } /* * Base media word (without ETHER, pause, link active, etc.) for the port at the * given speed. */ static int port_mword(struct port_info *pi, uint32_t speed) { MPASS(speed & M_FW_PORT_CAP32_SPEED); MPASS(powerof2(speed)); switch(pi->port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI: /* BaseT */ switch (speed) { case FW_PORT_CAP32_SPEED_100M: return (IFM_100_T); case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_T); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_T); } break; case FW_PORT_TYPE_KX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_KX4); break; case FW_PORT_TYPE_CX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_CX4); break; case FW_PORT_TYPE_KX: if (speed == FW_PORT_CAP32_SPEED_1G) return (IFM_1000_KX); break; case FW_PORT_TYPE_KR: case FW_PORT_TYPE_BP_AP: case FW_PORT_TYPE_BP4_AP: case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_KR_SFP28: case FW_PORT_TYPE_KR_XLAUI: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_KX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_KR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_KR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_KR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_KR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_KR4); } break; case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA: case FW_PORT_TYPE_QSFP: case FW_PORT_TYPE_CR4_QSFP: case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_CR2_QSFP: case FW_PORT_TYPE_SFP28: /* Pluggable transceiver */ switch (pi->mod_type) { case FW_PORT_MOD_TYPE_LR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_LX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_LR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_LR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_LR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_LR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_LR4); } break; case FW_PORT_MOD_TYPE_SR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_SX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_SR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_SR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_SR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_SR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_SR4); } break; case FW_PORT_MOD_TYPE_ER: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_ER); break; case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_CX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_TWINAX); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_CR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_CR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_CR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_CR4); } break; case FW_PORT_MOD_TYPE_LRM: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_LRM); break; case FW_PORT_MOD_TYPE_NA: MPASS(0); /* Not pluggable? */ /* fall throough */ case FW_PORT_MOD_TYPE_ERROR: case FW_PORT_MOD_TYPE_UNKNOWN: case FW_PORT_MOD_TYPE_NOTSUPPORTED: break; case FW_PORT_MOD_TYPE_NONE: return (IFM_NONE); } break; case FW_PORT_TYPE_NONE: return (IFM_NONE); } return (IFM_UNKNOWN); } void cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr) { struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; if (begin_synchronized_op(sc, vi , SLEEP_OK | INTR_OK, "t4med") != 0) return; PORT_LOCK(pi); if (pi->up_vis == 0 && !hw_off_limits(sc)) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here * so that ifconfig displays accurate ifmedia at all times. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); build_medialist(pi); } /* ifm_status */ ifmr->ifm_status = IFM_AVALID; if (lc->link_ok == false) goto done; ifmr->ifm_status |= IFM_ACTIVE; /* ifm_active */ ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); if (lc->fc & PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (lc->fc & PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } static int vcxgbe_probe(device_t dev) { struct vi_info *vi = device_get_softc(dev); device_set_descf(dev, "port %d vi %td", vi->pi->port_id, vi - vi->pi->vi); return (BUS_PROBE_DEFAULT); } static int alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) { int func, index, rc; uint32_t param, val; ASSERT_SYNCHRONIZED_OP(sc); index = vi - pi->vi; MPASS(index > 0); /* This function deals with _extra_ VIs only */ KASSERT(index < nitems(vi_mac_funcs), ("%s: VI %s doesn't have a MAC func", __func__, device_get_nameunit(vi->dev))); func = vi_mac_funcs[index]; rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); if (rc < 0) { CH_ERR(vi, "failed to allocate virtual interface %d" "for port %d: %d\n", index, pi->port_id, -rc); return (-rc); } vi->viid = rc; if (vi->rss_size == 1) { /* * This VI didn't get a slice of the RSS table. Reduce the * number of VIs being created (hw.cxgbe.num_vis) or modify the * configuration file (nvi, rssnvi for this PF) if this is a * problem. */ device_printf(vi->dev, "RSS table not available.\n"); vi->rss_base = 0xffff; return (0); } param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | V_FW_PARAMS_PARAM_YZ(vi->viid); rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc) vi->rss_base = 0xffff; else { MPASS((val >> 16) == vi->rss_size); vi->rss_base = val & 0xffff; } return (0); } static int vcxgbe_attach(device_t dev) { struct vi_info *vi; struct port_info *pi; struct adapter *sc; int rc; vi = device_get_softc(dev); pi = vi->pi; sc = pi->adapter; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); if (rc) return (rc); rc = alloc_extra_vi(sc, pi, vi); end_synchronized_op(sc, 0); if (rc) return (rc); cxgbe_vi_attach(dev, vi); return (0); } static int vcxgbe_detach(device_t dev) { struct vi_info *vi; struct adapter *sc; vi = device_get_softc(dev); sc = vi->adapter; begin_vi_detach(sc, vi); cxgbe_vi_detach(vi); t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); end_vi_detach(sc, vi); return (0); } static struct callout fatal_callout; static struct taskqueue *reset_tq; static void delayed_panic(void *arg) { struct adapter *sc = arg; panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); } static void fatal_error_task(void *arg, int pending) { struct adapter *sc = arg; int rc; if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) { dump_cim_regs(sc); dump_cimla(sc); dump_devlog(sc); } if (t4_reset_on_fatal_err) { CH_ALERT(sc, "resetting adapter after fatal error.\n"); rc = reset_adapter(sc); if (rc == 0 && t4_panic_on_fatal_err) { CH_ALERT(sc, "reset was successful, " "system will NOT panic.\n"); return; } } if (t4_panic_on_fatal_err) { CH_ALERT(sc, "panicking on fatal error (after 30s).\n"); callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); } } void t4_fatal_err(struct adapter *sc, bool fw_error) { const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; stop_adapter(sc); if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR))) return; if (fw_error) { /* * We are here because of a firmware error/timeout and not * because of a hardware interrupt. It is possible (although * not very likely) that an error interrupt was also raised but * this thread ran first and inhibited t4_intr_err. We walk the * main INT_CAUSE registers here to make sure we haven't missed * anything interesting. */ t4_slow_intr_handler(sc, verbose); atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); } t4_report_fw_error(sc); log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n", device_get_nameunit(sc->dev), fw_error); taskqueue_enqueue(reset_tq, &sc->fatal_error_task); } void t4_add_adapter(struct adapter *sc) { sx_xlock(&t4_list_lock); SLIST_INSERT_HEAD(&t4_list, sc, link); sx_xunlock(&t4_list_lock); } int t4_map_bars_0_and_4(struct adapter *sc) { sc->regs_rid = PCIR_BAR(0); sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE); if (sc->regs_res == NULL) { device_printf(sc->dev, "cannot map registers.\n"); return (ENXIO); } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); setbit(&sc->doorbells, DOORBELL_KDB); sc->msix_rid = PCIR_BAR(4); sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->msix_rid, RF_ACTIVE); if (sc->msix_res == NULL) { device_printf(sc->dev, "cannot map MSI-X BAR.\n"); return (ENXIO); } return (0); } int t4_map_bar_2(struct adapter *sc) { /* * T4: only iWARP driver uses the userspace doorbells. There is no need * to map it if RDMA is disabled. */ if (is_t4(sc) && sc->rdmacaps == 0) return (0); sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE); if (sc->udbs_res == NULL) { device_printf(sc->dev, "cannot map doorbell BAR.\n"); return (ENXIO); } sc->udbs_base = rman_get_virtual(sc->udbs_res); if (chip_id(sc) >= CHELSIO_T5) { setbit(&sc->doorbells, DOORBELL_UDB); #if defined(__i386__) || defined(__amd64__) if (t5_write_combine) { int rc, mode; /* * Enable write combining on BAR2. This is the * userspace doorbell BAR and is split into 128B * (UDBS_SEG_SIZE) doorbell regions, each associated * with an egress queue. The first 64B has the doorbell * and the second 64B can be used to submit a tx work * request with an implicit doorbell. */ rc = pmap_change_attr((vm_offset_t)sc->udbs_base, rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); if (rc == 0) { clrbit(&sc->doorbells, DOORBELL_UDB); setbit(&sc->doorbells, DOORBELL_WCWR); setbit(&sc->doorbells, DOORBELL_UDBWC); } else { device_printf(sc->dev, "couldn't enable write combining: %d\n", rc); } mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); t4_write_reg(sc, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | mode); } #endif } sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; return (0); } int t4_adj_doorbells(struct adapter *sc) { if ((sc->doorbells & t4_doorbells_allowed) != 0) { sc->doorbells &= t4_doorbells_allowed; return (0); } CH_ERR(sc, "No usable doorbell (available = 0x%x, allowed = 0x%x).\n", sc->doorbells, t4_doorbells_allowed); return (EINVAL); } struct memwin_init { uint32_t base; uint32_t aperture; }; static const struct memwin_init t4_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } }; static const struct memwin_init t5_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, }; static void setup_memwin(struct adapter *sc) { const struct memwin_init *mw_init; struct memwin *mw; int i; uint32_t bar0; if (is_t4(sc)) { /* * Read low 32b of bar0 indirectly via the hardware backdoor * mechanism. Works from within PCI passthrough environments * too, where rman_get_start() can return a different value. We * need to program the T4 memory window decoders with the actual * addresses that will be coming across the PCIe link. */ bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; mw_init = &t4_memwin[0]; } else { /* T5+ use the relative offset inside the PCIe BAR */ bar0 = 0; mw_init = &t5_memwin[0]; } for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { if (!rw_initialized(&mw->mw_lock)) { rw_init(&mw->mw_lock, "memory window access"); mw->mw_base = mw_init->base; mw->mw_aperture = mw_init->aperture; mw->mw_curpos = 0; } t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), (mw->mw_base + bar0) | V_BIR(0) | V_WINDOW(ilog2(mw->mw_aperture) - 10)); rw_wlock(&mw->mw_lock); position_memwin(sc, i, mw->mw_curpos); rw_wunlock(&mw->mw_lock); } /* flush */ t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); } /* * Positions the memory window at the given address in the card's address space. * There are some alignment requirements and the actual position may be at an * address prior to the requested address. mw->mw_curpos always has the actual * position of the window. */ static void position_memwin(struct adapter *sc, int idx, uint32_t addr) { struct memwin *mw; uint32_t pf; uint32_t reg; MPASS(idx >= 0 && idx < NUM_MEMWIN); mw = &sc->memwin[idx]; rw_assert(&mw->mw_lock, RA_WLOCKED); if (is_t4(sc)) { pf = 0; mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ } reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); t4_write_reg(sc, reg, mw->mw_curpos | pf); t4_read_reg(sc, reg); /* flush */ } int rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len, int rw) { struct memwin *mw; uint32_t mw_end, v; MPASS(idx >= 0 && idx < NUM_MEMWIN); /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len <= 0) return (EINVAL); mw = &sc->memwin[idx]; while (len > 0) { rw_rlock(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; if (addr >= mw_end || addr < mw->mw_curpos) { /* Will need to reposition the window */ if (!rw_try_upgrade(&mw->mw_lock)) { rw_runlock(&mw->mw_lock); rw_wlock(&mw->mw_lock); } rw_assert(&mw->mw_lock, RA_WLOCKED); position_memwin(sc, idx, addr); rw_downgrade(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; } rw_assert(&mw->mw_lock, RA_RLOCKED); while (addr < mw_end && len > 0) { if (rw == 0) { v = t4_read_reg(sc, mw->mw_base + addr - mw->mw_curpos); *val++ = le32toh(v); } else { v = *val++; t4_write_reg(sc, mw->mw_base + addr - mw->mw_curpos, htole32(v)); } addr += 4; len -= 4; } rw_runlock(&mw->mw_lock); } return (0); } CTASSERT(M_TID_COOKIE == M_COOKIE); CTASSERT(MAX_ATIDS <= (M_TID_TID + 1)); static void t4_init_atid_table(struct adapter *sc) { struct tid_info *t; int i; t = &sc->tids; if (t->natids == 0) return; MPASS(t->atid_tab == NULL); t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, M_ZERO | M_WAITOK); mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); t->afree = t->atid_tab; t->atids_in_use = 0; t->atid_alloc_stopped = false; for (i = 1; i < t->natids; i++) t->atid_tab[i - 1].next = &t->atid_tab[i]; t->atid_tab[t->natids - 1].next = NULL; } static void t4_free_atid_table(struct adapter *sc) { struct tid_info *t; t = &sc->tids; KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); if (mtx_initialized(&t->atid_lock)) mtx_destroy(&t->atid_lock); free(t->atid_tab, M_CXGBE); t->atid_tab = NULL; } static void stop_atid_allocator(struct adapter *sc) { struct tid_info *t = &sc->tids; mtx_lock(&t->atid_lock); t->atid_alloc_stopped = true; mtx_unlock(&t->atid_lock); } static void restart_atid_allocator(struct adapter *sc) { struct tid_info *t = &sc->tids; mtx_lock(&t->atid_lock); KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); t->atid_alloc_stopped = false; mtx_unlock(&t->atid_lock); } int alloc_atid(struct adapter *sc, void *ctx) { struct tid_info *t = &sc->tids; int atid = -1; mtx_lock(&t->atid_lock); if (t->afree && !t->atid_alloc_stopped) { union aopen_entry *p = t->afree; atid = p - t->atid_tab; MPASS(atid <= M_TID_TID); t->afree = p->next; p->data = ctx; t->atids_in_use++; } mtx_unlock(&t->atid_lock); return (atid); } void * lookup_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; return (t->atid_tab[atid].data); } void free_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; union aopen_entry *p = &t->atid_tab[atid]; mtx_lock(&t->atid_lock); p->next = t->afree; t->afree = p; t->atids_in_use--; mtx_unlock(&t->atid_lock); } static void queue_tid_release(struct adapter *sc, int tid) { CXGBE_UNIMPLEMENTED("deferred tid release"); } void release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) { struct wrqe *wr; struct cpl_tid_release *req; wr = alloc_wrqe(sizeof(*req), ctrlq); if (wr == NULL) { queue_tid_release(sc, tid); /* defer */ return; } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); t4_wrq_tx(sc, wr); } static int t4_range_cmp(const void *a, const void *b) { return ((const struct t4_range *)a)->start - ((const struct t4_range *)b)->start; } /* * Verify that the memory range specified by the addr/len pair is valid within * the card's address space. */ static int validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len) { struct t4_range mem_ranges[4], *r, *next; uint32_t em, addr_len; int i, n, remaining; /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len == 0) return (EINVAL); /* Enabled memories */ em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); r = &mem_ranges[0]; n = 0; bzero(r, sizeof(mem_ranges)); if (em & F_EDRAM0_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); r->size = G_EDRAM0_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM0_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EDRAM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); r->size = G_EDRAM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EXT_MEM_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); r->size = G_EXT_MEM_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); r->size = G_EXT_MEM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } MPASS(n <= nitems(mem_ranges)); if (n > 1) { /* Sort and merge the ranges. */ qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); /* Start from index 0 and examine the next n - 1 entries. */ r = &mem_ranges[0]; for (remaining = n - 1; remaining > 0; remaining--, r++) { MPASS(r->size > 0); /* r is a valid entry. */ next = r + 1; MPASS(next->size > 0); /* and so is the next one. */ while (r->start + r->size >= next->start) { /* Merge the next one into the current entry. */ r->size = max(r->start + r->size, next->start + next->size) - r->start; n--; /* One fewer entry in total. */ if (--remaining == 0) goto done; /* short circuit */ next++; } if (next != r + 1) { /* * Some entries were merged into r and next * points to the first valid entry that couldn't * be merged. */ MPASS(next->size > 0); /* must be valid */ memcpy(r + 1, next, remaining * sizeof(*r)); #ifdef INVARIANTS /* * This so that the foo->size assertion in the * next iteration of the loop do the right * thing for entries that were pulled up and are * no longer valid. */ MPASS(n < nitems(mem_ranges)); bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * sizeof(struct t4_range)); #endif } } done: /* Done merging the ranges. */ MPASS(n > 0); r = &mem_ranges[0]; for (i = 0; i < n; i++, r++) { if (addr >= r->start && addr + len <= r->start + r->size) return (0); } } return (EFAULT); } static int fwmtype_to_hwmtype(int mtype) { switch (mtype) { case FW_MEMTYPE_EDC0: return (MEM_EDC0); case FW_MEMTYPE_EDC1: return (MEM_EDC1); case FW_MEMTYPE_EXTMEM: return (MEM_MC0); case FW_MEMTYPE_EXTMEM1: return (MEM_MC1); default: panic("%s: cannot translate fw mtype %d.", __func__, mtype); } } /* * Verify that the memory range specified by the memtype/offset/len pair is * valid and lies entirely within the memtype specified. The global address of * the start of the range is returned in addr. */ static int validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len, uint32_t *addr) { uint32_t em, addr_len, maddr; /* Memory can only be accessed in naturally aligned 4 byte units */ if (off & 3 || len & 3 || len == 0) return (EINVAL); em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); switch (fwmtype_to_hwmtype(mtype)) { case MEM_EDC0: if (!(em & F_EDRAM0_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); maddr = G_EDRAM0_BASE(addr_len) << 20; break; case MEM_EDC1: if (!(em & F_EDRAM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); maddr = G_EDRAM1_BASE(addr_len) << 20; break; case MEM_MC: if (!(em & F_EXT_MEM_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); maddr = G_EXT_MEM_BASE(addr_len) << 20; break; case MEM_MC1: if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); maddr = G_EXT_MEM1_BASE(addr_len) << 20; break; default: return (EINVAL); } *addr = maddr + off; /* global address */ return (validate_mem_range(sc, *addr, len)); } static int fixup_devlog_params(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; int rc; rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, dparams->size, &dparams->addr); return (rc); } static void update_nirq(struct intrs_and_queues *iaq, int nports) { iaq->nirq = T4_EXTRA_INTR; iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); iaq->nirq += nports * iaq->nofldrxq; iaq->nirq += nports * (iaq->num_vis - 1) * max(iaq->nrxq_vi, iaq->nnmrxq_vi); iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; } /* * Adjust requirements to fit the number of interrupts available. */ static void calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, int navail) { int old_nirq; const int nports = sc->params.nports; MPASS(nports > 0); MPASS(navail > 0); bzero(iaq, sizeof(*iaq)); iaq->intr_type = itype; iaq->num_vis = t4_num_vis; iaq->ntxq = t4_ntxq; iaq->ntxq_vi = t4_ntxq_vi; iaq->nrxq = t4_nrxq; iaq->nrxq_vi = t4_nrxq_vi; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { iaq->nofldtxq = t4_nofldtxq; iaq->nofldtxq_vi = t4_nofldtxq_vi; } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { iaq->nofldrxq = t4_nofldrxq; iaq->nofldrxq_vi = t4_nofldrxq_vi; } #endif #ifdef DEV_NETMAP if (t4_native_netmap & NN_MAIN_VI) { iaq->nnmtxq = t4_nnmtxq; iaq->nnmrxq = t4_nnmrxq; } if (t4_native_netmap & NN_EXTRA_VI) { iaq->nnmtxq_vi = t4_nnmtxq_vi; iaq->nnmrxq_vi = t4_nnmrxq_vi; } #endif update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { /* * This is the normal case -- there are enough interrupts for * everything. */ goto done; } /* * If extra VIs have been configured try reducing their count and see if * that works. */ while (iaq->num_vis > 1) { iaq->num_vis--; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "virtual interfaces per port " "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " "itype %d, navail %u, nirq %d.\n", iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); goto done; } } /* * Extra VIs will not be created. Log a message if they were requested. */ MPASS(iaq->num_vis == 1); iaq->ntxq_vi = iaq->nrxq_vi = 0; iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; if (iaq->num_vis != t4_num_vis) { device_printf(sc->dev, "extra virtual interfaces disabled. " "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); } /* * Keep reducing the number of NIC rx queues to the next lower power of * 2 (for even RSS distribution) and halving the TOE rx queues and see * if that works. */ do { if (iaq->nrxq > 1) { iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1); if (iaq->nnmrxq > iaq->nrxq) iaq->nnmrxq = iaq->nrxq; } if (iaq->nofldrxq > 1) iaq->nofldrxq >>= 1; old_nirq = iaq->nirq; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "running with reduced number of " "rx queues because of shortage of interrupts. " "nrxq=%u, nofldrxq=%u. " "itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, itype, navail, iaq->nirq); goto done; } } while (old_nirq != iaq->nirq); /* One interrupt for everything. Ugh. */ device_printf(sc->dev, "running with minimal number of queues. " "itype %d, navail %u.\n", itype, navail); iaq->nirq = 1; iaq->nrxq = 1; iaq->ntxq = 1; if (iaq->nofldrxq > 0) { iaq->nofldrxq = 1; iaq->nofldtxq = 1; } iaq->nnmtxq = 0; iaq->nnmrxq = 0; done: MPASS(iaq->num_vis > 0); if (iaq->num_vis > 1) { MPASS(iaq->nrxq_vi > 0); MPASS(iaq->ntxq_vi > 0); } MPASS(iaq->nirq > 0); MPASS(iaq->nrxq > 0); MPASS(iaq->ntxq > 0); if (itype == INTR_MSI) { MPASS(powerof2(iaq->nirq)); } } static int cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) { int rc, itype, navail, nalloc; for (itype = INTR_MSIX; itype; itype >>= 1) { if ((itype & t4_intr_types) == 0) continue; /* not allowed */ if (itype == INTR_MSIX) navail = pci_msix_count(sc->dev); else if (itype == INTR_MSI) navail = pci_msi_count(sc->dev); else navail = 1; restart: if (navail == 0) continue; calculate_iaq(sc, iaq, itype, navail); nalloc = iaq->nirq; rc = 0; if (itype == INTR_MSIX) rc = pci_alloc_msix(sc->dev, &nalloc); else if (itype == INTR_MSI) rc = pci_alloc_msi(sc->dev, &nalloc); if (rc == 0 && nalloc > 0) { if (nalloc == iaq->nirq) return (0); /* * Didn't get the number requested. Use whatever number * the kernel is willing to allocate. */ device_printf(sc->dev, "fewer vectors than requested, " "type=%d, req=%d, rcvd=%d; will downshift req.\n", itype, iaq->nirq, nalloc); pci_release_msi(sc->dev); navail = nalloc; goto restart; } device_printf(sc->dev, "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", itype, rc, iaq->nirq, nalloc); } device_printf(sc->dev, "failed to find a usable interrupt type. " "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, pci_msix_count(sc->dev), pci_msi_count(sc->dev)); return (ENXIO); } #define FW_VERSION(chip) ( \ V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) /* Just enough of fw_hdr to cover all version info. */ struct fw_h { __u8 ver; __u8 chip; __be16 len512; __be32 fw_ver; __be32 tp_microcode_ver; __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; }; /* Spot check a couple of fields. */ CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); struct fw_info { uint8_t chip; char *kld_name; char *fw_mod_name; struct fw_h fw_h; } fw_info[] = { { .chip = CHELSIO_T4, .kld_name = "t4fw_cfg", .fw_mod_name = "t4fw", .fw_h = { .chip = FW_HDR_CHIP_T4, .fw_ver = htobe32(FW_VERSION(T4)), .intfver_nic = FW_INTFVER(T4, NIC), .intfver_vnic = FW_INTFVER(T4, VNIC), .intfver_ofld = FW_INTFVER(T4, OFLD), .intfver_ri = FW_INTFVER(T4, RI), .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T4, ISCSI), .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), .intfver_fcoe = FW_INTFVER(T4, FCOE), }, }, { .chip = CHELSIO_T5, .kld_name = "t5fw_cfg", .fw_mod_name = "t5fw", .fw_h = { .chip = FW_HDR_CHIP_T5, .fw_ver = htobe32(FW_VERSION(T5)), .intfver_nic = FW_INTFVER(T5, NIC), .intfver_vnic = FW_INTFVER(T5, VNIC), .intfver_ofld = FW_INTFVER(T5, OFLD), .intfver_ri = FW_INTFVER(T5, RI), .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, }, { .chip = CHELSIO_T6, .kld_name = "t6fw_cfg", .fw_mod_name = "t6fw", .fw_h = { .chip = FW_HDR_CHIP_T6, .fw_ver = htobe32(FW_VERSION(T6)), .intfver_nic = FW_INTFVER(T6, NIC), .intfver_vnic = FW_INTFVER(T6, VNIC), .intfver_ofld = FW_INTFVER(T6, OFLD), .intfver_ri = FW_INTFVER(T6, RI), .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T6, ISCSI), .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), .intfver_fcoe = FW_INTFVER(T6, FCOE), }, } }; static struct fw_info * find_fw_info(int chip) { int i; for (i = 0; i < nitems(fw_info); i++) { if (fw_info[i].chip == chip) return (&fw_info[i]); } return (NULL); } /* * Is the given firmware API compatible with the one the driver was compiled * with? */ static int fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) { /* short circuit if it's the exact same firmware version */ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) return (1); /* * XXX: Is this too conservative? Perhaps I should limit this to the * features that are supported in the driver. */ #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) return (1); #undef SAME_INTF return (0); } static int load_fw_module(struct adapter *sc, const struct firmware **dcfg, const struct firmware **fw) { struct fw_info *fw_info; *dcfg = NULL; if (fw != NULL) *fw = NULL; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } *dcfg = firmware_get(fw_info->kld_name); if (*dcfg != NULL) { if (fw != NULL) *fw = firmware_get(fw_info->fw_mod_name); return (0); } return (ENOENT); } static void unload_fw_module(struct adapter *sc, const struct firmware *dcfg, const struct firmware *fw) { if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); if (dcfg != NULL) firmware_put(dcfg, FIRMWARE_UNLOAD); } /* * Return values: * 0 means no firmware install attempted. * ERESTART means a firmware install was attempted and was successful. * +ve errno means a firmware install was attempted but failed. */ static int install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, const struct fw_h *drv_fw, const char *reason, int *already) { const struct firmware *cfg, *fw; const uint32_t c = be32toh(card_fw->fw_ver); uint32_t d, k; int rc, fw_install; struct fw_h bundled_fw; bool load_attempted; cfg = fw = NULL; load_attempted = false; fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); if (t4_fw_install < 0) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p;" " will use compiled-in firmware version for" "hw.cxgbe.fw_install checks.\n", rc, cfg, fw); } else { memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); } load_attempted = true; } d = be32toh(bundled_fw.fw_ver); if (reason != NULL) goto install; if ((sc->flags & FW_OK) == 0) { if (c == 0xffffffff) { reason = "missing"; goto install; } rc = 0; goto done; } if (!fw_compatible(card_fw, &bundled_fw)) { reason = "incompatible or unusable"; goto install; } if (d > c) { reason = "older than the version bundled with this driver"; goto install; } if (fw_install == 2 && d != c) { reason = "different than the version bundled with this driver"; goto install; } /* No reason to do anything to the firmware already on the card. */ rc = 0; goto done; install: rc = 0; if ((*already)++) goto done; if (fw_install == 0) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver is prohibited from installing a firmware " "on the card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); goto done; } /* * We'll attempt to install a firmware. Load the module first (if it * hasn't been loaded already). */ if (!load_attempted) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p\n", rc, cfg, fw); /* carry on */ } } if (fw == NULL) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver cannot take corrective action because it " "is unable to load the firmware module.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); rc = sc->flags & FW_OK ? 0 : ENOENT; goto done; } k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); if (k != d) { MPASS(t4_fw_install > 0); device_printf(sc->dev, "firmware in KLD (%u.%u.%u.%u) is not what the driver was " "expecting (%u.%u.%u.%u) and will not be used.\n", G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = sc->flags & FW_OK ? 0 : EINVAL; goto done; } device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); if (rc != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", rc); } else { /* Installed successfully, update the cached header too. */ rc = ERESTART; memcpy(card_fw, fw->data, sizeof(*card_fw)); } done: unload_fw_module(sc, cfg, fw); return (rc); } /* * Establish contact with the firmware and attempt to become the master driver. * * A firmware will be installed to the card if needed (if the driver is allowed * to do so). */ static int contact_firmware(struct adapter *sc) { int rc, already = 0; enum dev_state state; struct fw_info *fw_info; struct fw_hdr *card_fw; /* fw on the card */ const struct fw_h *drv_fw; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } drv_fw = &fw_info->fw_h; /* Read the header of the firmware on the card */ card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); restart: rc = -t4_get_fw_hdr(sc, card_fw); if (rc != 0) { device_printf(sc->dev, "unable to read firmware header from card's flash: %d\n", rc); goto done; } rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) goto restart; if (rc != 0) goto done; rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); if (rc < 0 || state == DEV_STATE_ERR) { rc = -rc; device_printf(sc->dev, "failed to connect to the firmware: %d, %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); #if 0 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, "not responding properly to HELLO", &already) == ERESTART) goto restart; #endif goto done; } MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ if (rc == sc->pf) { sc->flags |= MASTER_PF; rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) rc = 0; else if (rc != 0) goto done; } else if (state == DEV_STATE_UNINIT) { /* * We didn't get to be the master so we definitely won't be * configuring the chip. It's a bug if someone else hasn't * configured it already. */ device_printf(sc->dev, "couldn't be master(%d), " "device not already initialized either(%d). " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); rc = EPROTO; goto done; } else { /* * Some other PF is the master and has configured the chip. * This is allowed but untested. */ device_printf(sc->dev, "PF%d is master, device state %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); sc->cfcsum = 0; rc = 0; } done: if (rc != 0 && sc->flags & FW_OK) { t4_fw_bye(sc, sc->mbox); sc->flags &= ~FW_OK; } free(card_fw, M_CXGBE); return (rc); } static int copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, uint32_t mtype, uint32_t moff) { struct fw_info *fw_info; const struct firmware *dcfg, *rcfg = NULL; const uint32_t *cfdata; uint32_t cflen, addr; int rc; load_fw_module(sc, &dcfg, NULL); /* Card specific interpretation of "default". */ if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (pci_get_device(sc->dev) == 0x440a) snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF); if (is_fpga(sc)) snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF); } if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (dcfg == NULL) { device_printf(sc->dev, "KLD with default config is not available.\n"); rc = ENOENT; goto done; } cfdata = dcfg->data; cflen = dcfg->datasize & ~3; } else { char s[32]; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); rc = EINVAL; goto done; } snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); rcfg = firmware_get(s); if (rcfg == NULL) { device_printf(sc->dev, "unable to load module \"%s\" for configuration " "profile \"%s\".\n", s, cfg_file); rc = ENOENT; goto done; } cfdata = rcfg->data; cflen = rcfg->datasize & ~3; } if (cflen > FLASH_CFG_MAX_SIZE) { device_printf(sc->dev, "config file too long (%d, max allowed is %d).\n", cflen, FLASH_CFG_MAX_SIZE); rc = EINVAL; goto done; } rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); if (rc != 0) { device_printf(sc->dev, "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", __func__, mtype, moff, cflen, rc); rc = EINVAL; goto done; } write_via_memwin(sc, 2, addr, cfdata, cflen); done: if (rcfg != NULL) firmware_put(rcfg, FIRMWARE_UNLOAD); unload_fw_module(sc, dcfg, NULL); return (rc); } struct caps_allowed { uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; }; #define FW_PARAM_DEV(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) /* * Provide a configuration profile to the firmware and have it initialize the * chip accordingly. This may involve uploading a configuration file to the * card. */ static int apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, const struct caps_allowed *caps_allowed) { int rc; struct fw_caps_config_cmd caps; uint32_t mtype, moff, finicsum, cfcsum, param, val; rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); if (rc != 0) { device_printf(sc->dev, "firmware reset failed: %d.\n", rc); return (rc); } bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) { mtype = 0; moff = 0; caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) { mtype = FW_MEMTYPE_FLASH; moff = t4_flash_cfg_addr(sc); caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); } else { /* * Ask the firmware where it wants us to upload the config file. */ param = FW_PARAM_DEV(CF); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* No support for config file? Shouldn't happen. */ device_printf(sc->dev, "failed to query config file location: %d.\n", rc); goto done; } mtype = G_FW_PARAMS_PARAM_Y(val); moff = G_FW_PARAMS_PARAM_Z(val) << 16; caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); if (rc != 0) { device_printf(sc->dev, "failed to upload config file to card: %d.\n", rc); goto done; } } rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to pre-process config file: %d " "(mtype %d, moff 0x%x).\n", rc, mtype, moff); goto done; } finicsum = be32toh(caps.finicsum); cfcsum = be32toh(caps.cfcsum); /* actual */ if (finicsum != cfcsum) { device_printf(sc->dev, "WARNING: config file checksum mismatch: %08x %08x\n", finicsum, cfcsum); } sc->cfcsum = cfcsum; snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); /* * Let the firmware know what features will (not) be used so it can tune * things accordingly. */ #define LIMIT_CAPS(x) do { \ caps.x##caps &= htobe16(caps_allowed->x##caps); \ } while (0) LIMIT_CAPS(nbm); LIMIT_CAPS(link); LIMIT_CAPS(switch); LIMIT_CAPS(nic); LIMIT_CAPS(toe); LIMIT_CAPS(rdma); LIMIT_CAPS(crypto); LIMIT_CAPS(iscsi); LIMIT_CAPS(fcoe); #undef LIMIT_CAPS if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) { /* * TOE and hashfilters are mutually exclusive. It is a config * file or firmware bug if both are reported as available. Try * to cope with the situation in non-debug builds by disabling * TOE. */ MPASS(caps.toecaps == 0); caps.toecaps = 0; caps.rdmacaps = 0; caps.iscsicaps = 0; } caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); if (rc != 0) { device_printf(sc->dev, "failed to process config file: %d.\n", rc); goto done; } t4_tweak_chip_settings(sc); set_params__pre_init(sc); /* get basic stuff going */ rc = -t4_fw_initialize(sc, sc->mbox); if (rc != 0) { device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); goto done; } done: return (rc); } /* * Partition chip resources for use between various PFs, VFs, etc. */ static int partition_resources(struct adapter *sc) { char cfg_file[sizeof(t4_cfg_file)]; struct caps_allowed caps_allowed; int rc; bool fallback; /* Only the master driver gets to configure the chip resources. */ MPASS(sc->flags & MASTER_PF); #define COPY_CAPS(x) do { \ caps_allowed.x##caps = t4_##x##caps_allowed; \ } while (0) bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(nbm); COPY_CAPS(link); COPY_CAPS(switch); COPY_CAPS(nic); COPY_CAPS(toe); COPY_CAPS(rdma); COPY_CAPS(crypto); COPY_CAPS(iscsi); COPY_CAPS(fcoe); fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file); retry: rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); if (rc != 0 && fallback) { dump_devlog(sc); device_printf(sc->dev, "failed (%d) to configure card with \"%s\" profile, " "will fall back to a basic configuration and retry.\n", rc, cfg_file); snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF); bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(switch); caps_allowed.niccaps = FW_CAPS_CONFIG_NIC; fallback = false; goto retry; } #undef COPY_CAPS return (rc); } /* * Retrieve parameters that are needed (or nice to have) very early. */ static int get_params__pre_init(struct adapter *sc) { int rc; uint32_t param[2], val[2]; t4_get_version_info(sc); snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); param[0] = FW_PARAM_DEV(PORTVEC); param[1] = FW_PARAM_DEV(CCLK); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (pre_init): %d.\n", rc); return (rc); } sc->params.portvec = val[0]; sc->params.nports = bitcount32(val[0]); sc->params.vpd.cclk = val[1]; /* Read device log parameters. */ rc = -t4_init_devlog_params(sc, 1); if (rc == 0) fixup_devlog_params(sc); else { device_printf(sc->dev, "failed to get devlog parameters: %d.\n", rc); rc = 0; /* devlog isn't critical for device operation */ } return (rc); } /* * Any params that need to be set before FW_INITIALIZE. */ static int set_params__pre_init(struct adapter *sc) { int rc = 0; uint32_t param, val; if (chip_id(sc) >= CHELSIO_T6) { param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* firmwares < 1.20.1.0 do not have this param. */ if (rc == FW_EINVAL && sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { rc = 0; } if (rc != 0) { device_printf(sc->dev, "failed to enable high priority filters :%d.\n", rc); } param = FW_PARAM_DEV(PPOD_EDRAM); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc == 0 && val == 1) { rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { device_printf(sc->dev, "failed to set PPOD_EDRAM: %d.\n", rc); } } } /* Enable opaque VIIDs with firmwares that support it. */ param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc == 0 && val == 1) sc->params.viid_smt_extn_support = true; else sc->params.viid_smt_extn_support = false; return (rc); } /* * Retrieve various parameters that are of interest to the driver. The device * has been initialized by the firmware at this point. */ static int get_params__post_init(struct adapter *sc) { int rc; uint32_t param[7], val[7]; struct fw_caps_config_cmd caps; param[0] = FW_PARAM_PFVF(IQFLINT_START); param[1] = FW_PARAM_PFVF(EQ_START); param[2] = FW_PARAM_PFVF(FILTER_START); param[3] = FW_PARAM_PFVF(FILTER_END); param[4] = FW_PARAM_PFVF(L2T_START); param[5] = FW_PARAM_PFVF(L2T_END); param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init): %d.\n", rc); return (rc); } sc->sge.iq_start = val[0]; sc->sge.eq_start = val[1]; if ((int)val[3] > (int)val[2]) { sc->tids.ftid_base = val[2]; sc->tids.ftid_end = val[3]; sc->tids.nftids = val[3] - val[2] + 1; } sc->vres.l2t.start = val[4]; sc->vres.l2t.size = val[5] - val[4] + 1; /* val[5] is the last hwidx and it must not collide with F_SYNC_WR */ if (sc->vres.l2t.size > 0) MPASS(fls(val[5]) <= S_SYNC_WR); sc->params.core_vdd = val[6]; param[0] = FW_PARAM_PFVF(IQFLINT_END); param[1] = FW_PARAM_PFVF(EQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init2): %d.\n", rc); return (rc); } MPASS((int)val[0] >= sc->sge.iq_start); sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; MPASS((int)val[1] >= sc->sge.eq_start); sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; if (chip_id(sc) >= CHELSIO_T6) { sc->tids.tid_base = t4_read_reg(sc, A_LE_DB_ACTIVE_TABLE_START_INDEX); param[0] = FW_PARAM_PFVF(HPFILTER_START); param[1] = FW_PARAM_PFVF(HPFILTER_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query hpfilter parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.hpftid_base = val[0]; sc->tids.hpftid_end = val[1]; sc->tids.nhpftids = val[1] - val[0] + 1; /* * These should go off if the layout changes and the * driver needs to catch up. */ MPASS(sc->tids.hpftid_base == 0); MPASS(sc->tids.tid_base == sc->tids.nhpftids); } param[0] = FW_PARAM_PFVF(RAWF_START); param[1] = FW_PARAM_PFVF(RAWF_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query rawf parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->rawf_base = val[0]; sc->nrawf = val[1] - val[0] + 1; } } /* * The parameters that follow may not be available on all firmwares. We * query them individually rather than in a compound query because old * firmwares fail the entire query if an unknown parameter is queried. */ /* * MPS buffer group configuration. */ param[0] = FW_PARAM_DEV(MPSBGMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.mps_bg_map = val[0]; else sc->params.mps_bg_map = UINT32_MAX; /* Not a legal value. */ param[0] = FW_PARAM_DEV(TPCHMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.tp_ch_map = val[0]; else sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */ /* * Determine whether the firmware supports the filter2 work request. */ param[0] = FW_PARAM_DEV(FILTER2_WR); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.filter2_wr_support = val[0] != 0; else sc->params.filter2_wr_support = 0; /* * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL. */ param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.ulptx_memwrite_dsgl = val[0] != 0; else sc->params.ulptx_memwrite_dsgl = false; /* FW_RI_FR_NSMR_TPTE_WR support */ param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; else sc->params.fr_nsmr_tpte_wr_support = false; /* Support for 512 SGL entries per FR MR. */ param[0] = FW_PARAM_DEV(DEV_512SGL_MR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.dev_512sgl_mr = val[0] != 0; else sc->params.dev_512sgl_mr = false; param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; else sc->params.max_pkts_per_eth_tx_pkts_wr = 15; param[0] = FW_PARAM_DEV(NUM_TM_CLASS); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) { MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */ sc->params.nsched_cls = val[0]; } else sc->params.nsched_cls = sc->chip_params->nsched_cls; /* get capabilites */ bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to get card capabilities: %d.\n", rc); return (rc); } #define READ_CAPS(x) do { \ sc->x = htobe16(caps.x); \ } while (0) READ_CAPS(nbmcaps); READ_CAPS(linkcaps); READ_CAPS(switchcaps); READ_CAPS(niccaps); READ_CAPS(toecaps); READ_CAPS(rdmacaps); READ_CAPS(cryptocaps); READ_CAPS(iscsicaps); READ_CAPS(fcoecaps); if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { MPASS(chip_id(sc) > CHELSIO_T4); MPASS(sc->toecaps == 0); sc->toecaps = 0; param[0] = FW_PARAM_DEV(NTID); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query HASHFILTER parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); sc->params.hash_filter = 1; } if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { param[0] = FW_PARAM_PFVF(ETHOFLD_START); param[1] = FW_PARAM_PFVF(ETHOFLD_END); param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query NIC parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.etid_base = val[0]; sc->tids.etid_end = val[1]; sc->tids.netids = val[1] - val[0] + 1; sc->params.eo_wr_cred = val[2]; sc->params.ethoffload = 1; } } if (sc->toecaps) { /* query offload-related parameters */ param[0] = FW_PARAM_DEV(NTID); param[1] = FW_PARAM_PFVF(SERVER_START); param[2] = FW_PARAM_PFVF(SERVER_END); param[3] = FW_PARAM_PFVF(TDDP_START); param[4] = FW_PARAM_PFVF(TDDP_END); param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TOE parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); if ((int)val[2] > (int)val[1]) { sc->tids.stid_base = val[1]; sc->tids.nstids = val[2] - val[1] + 1; } sc->vres.ddp.start = val[3]; sc->vres.ddp.size = val[4] - val[3] + 1; sc->params.ofldq_wr_cred = val[5]; sc->params.offload = 1; } else { /* * The firmware attempts memfree TOE configuration for -SO cards * and will report toecaps=0 if it runs out of resources (this * depends on the config file). It may not report 0 for other * capabilities dependent on the TOE in this case. Set them to * 0 here so that the driver doesn't bother tracking resources * that will never be used. */ sc->iscsicaps = 0; sc->rdmacaps = 0; } if (sc->rdmacaps) { param[0] = FW_PARAM_PFVF(STAG_START); param[1] = FW_PARAM_PFVF(STAG_END); param[2] = FW_PARAM_PFVF(RQ_START); param[3] = FW_PARAM_PFVF(RQ_END); param[4] = FW_PARAM_PFVF(PBL_START); param[5] = FW_PARAM_PFVF(PBL_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(1): %d.\n", rc); return (rc); } sc->vres.stag.start = val[0]; sc->vres.stag.size = val[1] - val[0] + 1; sc->vres.rq.start = val[2]; sc->vres.rq.size = val[3] - val[2] + 1; sc->vres.pbl.start = val[4]; sc->vres.pbl.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SQRQ_START); param[1] = FW_PARAM_PFVF(SQRQ_END); param[2] = FW_PARAM_PFVF(CQ_START); param[3] = FW_PARAM_PFVF(CQ_END); param[4] = FW_PARAM_PFVF(OCQ_START); param[5] = FW_PARAM_PFVF(OCQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(2): %d.\n", rc); return (rc); } sc->vres.qp.start = val[0]; sc->vres.qp.size = val[1] - val[0] + 1; sc->vres.cq.start = val[2]; sc->vres.cq.size = val[3] - val[2] + 1; sc->vres.ocq.start = val[4]; sc->vres.ocq.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SRQ_START); param[1] = FW_PARAM_PFVF(SRQ_END); param[2] = FW_PARAM_DEV(MAXORDIRD_QP); param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(3): %d.\n", rc); return (rc); } sc->vres.srq.start = val[0]; sc->vres.srq.size = val[1] - val[0] + 1; sc->params.max_ordird_qp = val[2]; sc->params.max_ird_adapter = val[3]; } if (sc->iscsicaps) { param[0] = FW_PARAM_PFVF(ISCSI_START); param[1] = FW_PARAM_PFVF(ISCSI_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query iSCSI parameters: %d.\n", rc); return (rc); } sc->vres.iscsi.start = val[0]; sc->vres.iscsi.size = val[1] - val[0] + 1; } if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { param[0] = FW_PARAM_PFVF(TLS_START); param[1] = FW_PARAM_PFVF(TLS_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TLS parameters: %d.\n", rc); return (rc); } sc->vres.key.start = val[0]; sc->vres.key.size = val[1] - val[0] + 1; } /* * We've got the params we wanted to query directly from the firmware. * Grab some others via other means. */ t4_init_sge_params(sc); t4_init_tp_params(sc); t4_read_mtu_tbl(sc, sc->params.mtus, NULL); t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); rc = t4_verify_chip_settings(sc); if (rc != 0) return (rc); t4_init_rx_buf_info(sc); return (rc); } #ifdef KERN_TLS static void ktls_tick(void *arg) { struct adapter *sc; uint32_t tstamp; sc = arg; tstamp = tcp_ts_getticks(); t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1); t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31); callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK); } static int t6_config_kern_tls(struct adapter *sc, bool enable) { int rc; uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) | V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) | V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, ¶m); if (rc != 0) { CH_ERR(sc, "failed to %s NIC TLS: %d\n", enable ? "enable" : "disable", rc); return (rc); } if (enable) { sc->flags |= KERN_TLS_ON; callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc, C_HARDCLOCK); } else { sc->flags &= ~KERN_TLS_ON; callout_stop(&sc->ktls_tick); } return (rc); } #endif static int set_params__post_init(struct adapter *sc) { uint32_t mask, param, val; #ifdef TCP_OFFLOAD int i, v, shift; #endif /* ask for encapsulated CPLs */ param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val = 1; (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* Enable 32b port caps if the firmware supports it. */ param = FW_PARAM_PFVF(PORT_CAPS32); val = 1; if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0) sc->params.port_caps32 = 1; /* Let filter + maskhash steer to a part of the VI's RSS region. */ val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER), V_MASKFILTER(val - 1)); mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER | F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN | F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM; val = 0; if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE, F_ATTACKFILTERENABLE); val |= F_DROPERRORATTACK; } if (t4_drop_ip_fragments != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP, F_FRAGMENTDROP); val |= F_DROPERRORFRAG; } if (t4_drop_pkts_with_l2_errors != 0) val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN; if (t4_drop_pkts_with_l3_errors != 0) { val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN | F_DROPERRORCSUMIP; } if (t4_drop_pkts_with_l4_errors != 0) { val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUM; } t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val); #ifdef TCP_OFFLOAD /* * Override the TOE timers with user provided tunables. This is not the * recommended way to change the timers (the firmware config file is) so * these tunables are not documented. * * All the timer tunables are in microseconds. */ if (t4_toe_keepalive_idle != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); v &= M_KEEPALIVEIDLE; t4_set_reg_field(sc, A_TP_KEEP_IDLE, V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); } if (t4_toe_keepalive_interval != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); v &= M_KEEPALIVEINTVL; t4_set_reg_field(sc, A_TP_KEEP_INTVL, V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); } if (t4_toe_keepalive_count != 0) { v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); } if (t4_toe_rexmt_min != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); v &= M_RXTMIN; t4_set_reg_field(sc, A_TP_RXT_MIN, V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); } if (t4_toe_rexmt_max != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); v &= M_RXTMAX; t4_set_reg_field(sc, A_TP_RXT_MAX, V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); } if (t4_toe_rexmt_count != 0) { v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); } for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { if (t4_toe_rexmt_backoff[i] != -1) { v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; shift = (i & 3) << 3; t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), M_TIMERBACKOFFINDEX0 << shift, v << shift); } } #endif /* * Limit TOE connections to 2 reassembly "islands". This is * required to permit migrating TOE connections to either * ULP_MODE_TCPDDP or UPL_MODE_TLS. */ t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE), V_PASSMODE(2)); #ifdef KERN_TLS if (is_ktls(sc)) { sc->tlst.inline_keys = t4_tls_inline_keys; sc->tlst.combo_wrs = t4_tls_combo_wrs; if (t4_kern_tls != 0 && is_t6(sc)) t6_config_kern_tls(sc, true); } #endif return (0); } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV static void t4_set_desc(struct adapter *sc) { struct adapter_params *p = &sc->params; device_set_descf(sc->dev, "Chelsio %s", p->vpd.id); } static inline void ifmedia_add4(struct ifmedia *ifm, int m) { ifmedia_add(ifm, m, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL); } /* * This is the selected media, which is not quite the same as the active media. * The media line in ifconfig is "media: Ethernet selected (active)" if selected * and active are not the same, and "media: Ethernet selected" otherwise. */ static void set_current_media(struct port_info *pi) { struct link_config *lc; struct ifmedia *ifm; int mword; u_int speed; PORT_LOCK_ASSERT_OWNED(pi); /* Leave current media alone if it's already set to IFM_NONE. */ ifm = &pi->media; if (ifm->ifm_cur != NULL && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) return; lc = &pi->link_cfg; if (lc->requested_aneg != AUTONEG_DISABLE && lc->pcaps & FW_PORT_CAP32_ANEG) { ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); return; } mword = IFM_ETHER | IFM_FDX; if (lc->requested_fc & PAUSE_TX) mword |= IFM_ETH_TXPAUSE; if (lc->requested_fc & PAUSE_RX) mword |= IFM_ETH_RXPAUSE; if (lc->requested_speed == 0) speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ else speed = lc->requested_speed; mword |= port_mword(pi, speed_to_fwcap(speed)); ifmedia_set(ifm, mword); } /* * Returns true if the ifmedia list for the port cannot change. */ static bool fixed_ifmedia(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI || pi->port_type == FW_PORT_TYPE_KX4 || pi->port_type == FW_PORT_TYPE_KX || pi->port_type == FW_PORT_TYPE_KR || pi->port_type == FW_PORT_TYPE_BP_AP || pi->port_type == FW_PORT_TYPE_BP4_AP || pi->port_type == FW_PORT_TYPE_BP40_BA || pi->port_type == FW_PORT_TYPE_KR4_100G || pi->port_type == FW_PORT_TYPE_KR_SFP28 || pi->port_type == FW_PORT_TYPE_KR_XLAUI); } static void build_medialist(struct port_info *pi) { uint32_t ss, speed; int unknown, mword, bit; struct link_config *lc; struct ifmedia *ifm; PORT_LOCK_ASSERT_OWNED(pi); if (pi->flags & FIXED_IFMEDIA) return; /* * Rebuild the ifmedia list. */ ifm = &pi->media; ifmedia_removeall(ifm); lc = &pi->link_cfg; ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ if (__predict_false(ss == 0)) { /* not supposed to happen. */ MPASS(ss != 0); no_media: MPASS(LIST_EMPTY(&ifm->ifm_list)); ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(ifm, IFM_ETHER | IFM_NONE); return; } unknown = 0; for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) { speed = 1 << bit; MPASS(speed & M_FW_PORT_CAP32_SPEED); if (ss & speed) { mword = port_mword(pi, speed); if (mword == IFM_NONE) { goto no_media; } else if (mword == IFM_UNKNOWN) unknown++; else ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword); } } if (unknown > 0) /* Add one unknown for all unknown media types. */ ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN); if (lc->pcaps & FW_PORT_CAP32_ANEG) ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); set_current_media(pi); } /* * Initialize the requested fields in the link config based on driver tunables. */ static void init_link_config(struct port_info *pi) { struct link_config *lc = &pi->link_cfg; PORT_LOCK_ASSERT_OWNED(pi); lc->requested_caps = 0; lc->requested_speed = 0; if (t4_autoneg == 0) lc->requested_aneg = AUTONEG_DISABLE; else if (t4_autoneg == 1) lc->requested_aneg = AUTONEG_ENABLE; else lc->requested_aneg = AUTONEG_AUTO; lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG); if (t4_fec & FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (t4_fec == 0) lc->requested_fec = FEC_NONE; else { /* -1 is handled by the FEC_AUTO block above and not here. */ lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE); if (lc->requested_fec == 0) lc->requested_fec = FEC_AUTO; } if (t4_force_fec < 0) lc->force_fec = -1; else if (t4_force_fec > 0) lc->force_fec = 1; else lc->force_fec = 0; } /* * Makes sure that all requested settings comply with what's supported by the * port. Returns the number of settings that were invalid and had to be fixed. */ static int fixup_link_config(struct port_info *pi) { int n = 0; struct link_config *lc = &pi->link_cfg; uint32_t fwspeed; PORT_LOCK_ASSERT_OWNED(pi); /* Speed (when not autonegotiating) */ if (lc->requested_speed != 0) { fwspeed = speed_to_fwcap(lc->requested_speed); if ((fwspeed & lc->pcaps) == 0) { n++; lc->requested_speed = 0; } } /* Link autonegotiation */ MPASS(lc->requested_aneg == AUTONEG_ENABLE || lc->requested_aneg == AUTONEG_DISABLE || lc->requested_aneg == AUTONEG_AUTO); if (lc->requested_aneg == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { n++; lc->requested_aneg = AUTONEG_AUTO; } /* Flow control */ MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); if (lc->requested_fc & PAUSE_TX && !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { n++; lc->requested_fc &= ~PAUSE_TX; } if (lc->requested_fc & PAUSE_RX && !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { n++; lc->requested_fc &= ~PAUSE_RX; } if (!(lc->requested_fc & PAUSE_AUTONEG) && !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { n++; lc->requested_fc |= PAUSE_AUTONEG; } /* FEC */ if ((lc->requested_fec & FEC_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || (lc->requested_fec & FEC_BASER_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { n++; lc->requested_fec = FEC_AUTO; } return (n); } /* * Apply the requested L1 settings, which are expected to be valid, to the * hardware. */ static int apply_link_config(struct port_info *pi) { struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; #ifdef INVARIANTS ASSERT_SYNCHRONIZED_OP(sc); PORT_LOCK_ASSERT_OWNED(pi); if (lc->requested_aneg == AUTONEG_ENABLE) MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); if (!(lc->requested_fc & PAUSE_AUTONEG)) MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); if (lc->requested_fc & PAUSE_TX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); if (lc->requested_fc & PAUSE_RX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); if (lc->requested_fec & FEC_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); if (lc->requested_fec & FEC_BASER_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); #endif if (!(sc->flags & IS_VF)) { rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); if (rc != 0) { device_printf(pi->dev, "l1cfg failed: %d\n", rc); return (rc); } } /* * An L1_CFG will almost always result in a link-change event if the * link is up, and the driver will refresh the actual fec/fc/etc. when * the notification is processed. If the link is down then the actual * settings are meaningless. * * This takes care of the case where a change in the L1 settings may not * result in a notification. */ if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); return (0); } #define FW_MAC_EXACT_CHUNK 7 struct mcaddr_ctx { if_t ifp; const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; uint64_t hash; int i; int del; int rc; }; static u_int add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct mcaddr_ctx *ctx = arg; struct vi_info *vi = if_getsoftc(ctx->ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if (ctx->rc < 0) return (0); ctx->mcaddr[ctx->i] = LLADDR(sdl); MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i])); ctx->i++; if (ctx->i == FW_MAC_EXACT_CHUNK) { ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del, ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0); if (ctx->rc < 0) { int j; for (j = 0; j < ctx->i; j++) { if_printf(ctx->ifp, "failed to add mc address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx->mcaddr[j][0], ctx->mcaddr[j][1], ctx->mcaddr[j][2], ctx->mcaddr[j][3], ctx->mcaddr[j][4], ctx->mcaddr[j][5], -ctx->rc); } return (0); } ctx->del = 0; ctx->i = 0; } return (1); } /* * Program the port's XGMAC based on parameters in ifnet. The caller also * indicates which parameters should be programmed (the rest are left alone). */ int update_mac_settings(if_t ifp, int flags) { int rc = 0; struct vi_info *vi = if_getsoftc(ifp); struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); KASSERT(flags, ("%s: not told what to update.", __func__)); if (flags & XGMAC_MTU) mtu = if_getmtu(ifp); if (flags & XGMAC_PROMISC) promisc = if_getflags(ifp) & IFF_PROMISC ? 1 : 0; if (flags & XGMAC_ALLMULTI) allmulti = if_getflags(ifp) & IFF_ALLMULTI ? 1 : 0; if (flags & XGMAC_VLANEX) vlanex = if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING ? 1 : 0; if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, allmulti, 1, vlanex, false); if (rc) { if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); return (rc); } } if (flags & XGMAC_UCADDR) { uint8_t ucaddr[ETHER_ADDR_LEN]; bcopy(if_getlladdr(ifp), ucaddr, sizeof(ucaddr)); rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, ucaddr, true, &vi->smt_idx); if (rc < 0) { rc = -rc; if_printf(ifp, "change_mac failed: %d\n", rc); return (rc); } else { vi->xact_addr_filt = rc; rc = 0; } } if (flags & XGMAC_MCADDRS) { struct epoch_tracker et; struct mcaddr_ctx ctx; int j; ctx.ifp = ifp; ctx.hash = 0; ctx.i = 0; ctx.del = 1; ctx.rc = 0; /* * Unlike other drivers, we accumulate list of pointers into * interface address lists and we need to keep it safe even * after if_foreach_llmaddr() returns, thus we must enter the * network epoch. */ NET_EPOCH_ENTER(et); if_foreach_llmaddr(ifp, add_maddr, &ctx); if (ctx.rc < 0) { NET_EPOCH_EXIT(et); rc = -ctx.rc; return (rc); } if (ctx.i > 0) { rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0); NET_EPOCH_EXIT(et); if (rc < 0) { rc = -rc; for (j = 0; j < ctx.i; j++) { if_printf(ifp, "failed to add mcast address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx.mcaddr[j][0], ctx.mcaddr[j][1], ctx.mcaddr[j][2], ctx.mcaddr[j][3], ctx.mcaddr[j][4], ctx.mcaddr[j][5], rc); } return (rc); } ctx.del = 0; } else NET_EPOCH_EXIT(et); rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0); if (rc != 0) if_printf(ifp, "failed to set mcast address hash: %d\n", rc); if (ctx.del == 0) { /* We clobbered the VXLAN entry if there was one. */ pi->vxlan_tcam_entry = false; } } if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && pi->vxlan_tcam_entry == false) { rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); rc = 0; pi->vxlan_tcam_entry = true; } } return (rc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ int begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, char *wmesg) { int rc, pri; #ifdef WITNESS /* the caller thinks it's ok to sleep, but is it really? */ if (flags & SLEEP_OK) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "begin_synchronized_op"); #endif if (INTR_OK) pri = PCATCH; else pri = 0; ADAPTER_LOCK(sc); for (;;) { if (vi && IS_DETACHING(vi)) { rc = ENXIO; goto done; } if (!IS_BUSY(sc)) { rc = 0; break; } if (!(flags & SLEEP_OK)) { rc = EBUSY; goto done; } if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { rc = EINTR; goto done; } } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = wmesg; sc->last_op_thr = curthread; sc->last_op_flags = flags; #endif done: if (!(flags & HOLD_LOCK) || rc) ADAPTER_UNLOCK(sc); return (rc); } /* * Tell if_ioctl and if_init that the VI is going away. This is * special variant of begin_synchronized_op and must be paired with a * call to end_vi_detach. */ void begin_vi_detach(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); SET_DETACHING(vi); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = "t4detach"; sc->last_op_thr = curthread; sc->last_op_flags = 0; #endif ADAPTER_UNLOCK(sc); } void end_vi_detach(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); CLR_DETACHING(vi); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ void end_synchronized_op(struct adapter *sc, int flags) { if (flags & LOCK_HELD) ADAPTER_LOCK_ASSERT_OWNED(sc); else ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } static int cxgbe_init_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if_t ifp = vi->ifp; int rc = 0, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return (0); /* already running */ if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0)) return (rc); /* error message displayed already */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); /* error message displayed already */ rc = update_mac_settings(ifp, XGMAC_ALL); if (rc) goto done; /* error message displayed already */ PORT_LOCK(pi); if (pi->up_vis == 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { if_printf(ifp, "enable_vi failed: %d\n", rc); PORT_UNLOCK(pi); goto done; } /* * Can't fail from this point onwards. Review cxgbe_uninit_synchronized * if this changes. */ for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } /* * The first iq of the first port to come up is used for tracing. */ if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } /* all ok */ pi->up_vis++; if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); PORT_UNLOCK(pi); mtx_lock(&vi->tick_mtx); if (vi->pi->nvi > 1 || sc->flags & IS_VF) callout_reset(&vi->tick, hz, vi_tick, vi); else callout_reset(&vi->tick, hz, cxgbe_tick, vi); mtx_unlock(&vi->tick_mtx); done: if (rc != 0) cxgbe_uninit_synchronized(vi); return (rc); } /* * Idempotent. */ static int cxgbe_uninit_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if_t ifp = vi->ifp; int rc, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (!(vi->flags & VI_INIT_DONE)) { if (__predict_false(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { KASSERT(0, ("uninited VI is running")); if_printf(ifp, "uninited VI with running ifnet. " "vi->flags 0x%016lx, if_flags 0x%08x, " "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp), if_getdrvflags(ifp)); } return (0); } /* * Disable the VI so that all its data in either direction is discarded * by the MPS. Leave everything else (the queues, interrupts, and 1Hz * tick) intact as the TP can deliver negative advice or data that it's * holding in its RAM (for an offloaded connection) even after the VI is * disabled. */ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); if (rc) { if_printf(ifp, "disable_vi failed: %d\n", rc); return (rc); } for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); PORT_LOCK(pi); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { PORT_UNLOCK(pi); return (0); } if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); pi->up_vis--; if (pi->up_vis > 0) { PORT_UNLOCK(pi); return (0); } pi->link_cfg.link_ok = false; pi->link_cfg.speed = 0; pi->link_cfg.link_down_rc = 255; t4_os_link_changed(pi); PORT_UNLOCK(pi); return (0); } /* * It is ok for this function to fail midway and return right away. t4_detach * will walk the entire sc->irq list and clean up whatever is valid. */ int t4_setup_intr_handlers(struct adapter *sc) { int rc, rid, p, q, v; char s[8]; struct irq *irq; struct port_info *pi; struct vi_info *vi; struct sge *sge = &sc->sge; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; #endif #ifdef RSS int nbuckets = rss_getnumbuckets(); #endif /* * Setup interrupts. */ irq = &sc->irq[0]; rid = sc->intr_type == INTR_INTX ? 0 : 1; if (forwarding_intr_to_fwq(sc)) return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); /* Multiple interrupts. */ if (sc->flags & IS_VF) KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); else KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); /* The first one is always error intr on PFs */ if (!(sc->flags & IS_VF)) { rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); if (rc != 0) return (rc); irq++; rid++; } /* The second one is always the firmware event queue (first on VFs) */ rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); if (rc != 0) return (rc); irq++; rid++; for_each_port(sc, p) { pi = sc->port[p]; for_each_vi(pi, v, vi) { vi->first_intr = rid - 1; if (vi->nnmrxq > 0) { int n = max(vi->nrxq, vi->nnmrxq); rxq = &sge->rxq[vi->first_rxq]; #ifdef DEV_NETMAP nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; #endif for (q = 0; q < n; q++) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); if (q < vi->nrxq) irq->rxq = rxq++; #ifdef DEV_NETMAP if (q < vi->nnmrxq) irq->nm_rxq = nm_rxq++; if (irq->nm_rxq != NULL && irq->rxq == NULL) { /* Netmap rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, irq->nm_rxq, s); } if (irq->nm_rxq != NULL && irq->rxq != NULL) { /* NIC and Netmap rx */ rc = t4_alloc_irq(sc, irq, rid, t4_vi_intr, irq, s); } #endif if (irq->rxq != NULL && irq->nm_rxq == NULL) { /* NIC rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_intr, irq->rxq, s); } if (rc != 0) return (rc); #ifdef RSS if (q < vi->nrxq) { bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); } #endif irq++; rid++; vi->nintr++; } } else { for_each_rxq(vi, q, rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, s); if (rc != 0) return (rc); #ifdef RSS bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); #endif irq++; rid++; vi->nintr++; } } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, q, ofld_rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, ofld_rxq, s); if (rc != 0) return (rc); irq++; rid++; vi->nintr++; } #endif } } MPASS(irq == &sc->irq[sc->intr_count]); return (0); } static void write_global_rss_key(struct adapter *sc) { #ifdef RSS int i; uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; CTASSERT(RSS_KEYSIZE == 40); rss_getkey((void *)&raw_rss_key[0]); for (i = 0; i < nitems(rss_key); i++) { rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); } t4_write_rss_key(sc, &rss_key[0], -1, 1); #endif } /* * Idempotent. */ static int adapter_full_init(struct adapter *sc) { int rc, i; ASSERT_SYNCHRONIZED_OP(sc); /* * queues that belong to the adapter (not any particular port). */ rc = t4_setup_adapter_queues(sc); if (rc != 0) return (rc); MPASS(sc->params.nports <= nitems(sc->tq)); for (i = 0; i < sc->params.nports; i++) { if (sc->tq[i] != NULL) continue; sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq[i]); if (sc->tq[i] == NULL) { CH_ERR(sc, "failed to allocate task queue %d\n", i); return (ENOMEM); } taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", device_get_nameunit(sc->dev), i); } if (!(sc->flags & IS_VF)) { write_global_rss_key(sc); t4_intr_enable(sc); } return (0); } int adapter_init(struct adapter *sc) { int rc; ASSERT_SYNCHRONIZED_OP(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); KASSERT((sc->flags & FULL_INIT_DONE) == 0, ("%s: FULL_INIT_DONE already", __func__)); rc = adapter_full_init(sc); if (rc != 0) adapter_full_uninit(sc); else sc->flags |= FULL_INIT_DONE; return (rc); } /* * Idempotent. */ static void adapter_full_uninit(struct adapter *sc) { int i; t4_teardown_adapter_queues(sc); for (i = 0; i < nitems(sc->tq); i++) { if (sc->tq[i] == NULL) continue; taskqueue_free(sc->tq[i]); sc->tq[i] = NULL; } sc->flags &= ~FULL_INIT_DONE; } #ifdef RSS #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ RSS_HASHTYPE_RSS_UDP_IPV6) /* Translates kernel hash types to hardware. */ static int hashconfig_to_hashen(int hashconfig) { int hashen = 0; if (hashconfig & RSS_HASHTYPE_RSS_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; return (hashen); } /* Translates hardware hash types to kernel. */ static int hashen_to_hashconfig(int hashen) { int hashconfig = 0; if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { /* * If UDP hashing was enabled it must have been enabled for * either IPv4 or IPv6 (inclusive or). Enabling UDP without * enabling any 4-tuple hash is nonsense configuration. */ MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; } if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV6; return (hashconfig); } #endif /* * Idempotent. */ static int vi_full_init(struct vi_info *vi) { struct adapter *sc = vi->adapter; struct sge_rxq *rxq; int rc, i, j; #ifdef RSS int nbuckets = rss_getnumbuckets(); int hashconfig = rss_gethashconfig(); int extra; #endif ASSERT_SYNCHRONIZED_OP(sc); /* * Allocate tx/rx/fl queues for this VI. */ rc = t4_setup_vi_queues(vi); if (rc != 0) return (rc); /* * Setup RSS for this VI. Save a copy of the RSS table for later use. */ if (vi->nrxq > vi->rss_size) { CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); " "some queues will never receive traffic.\n", vi->nrxq, vi->rss_size); } else if (vi->rss_size % vi->nrxq) { CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); " "expect uneven traffic distribution.\n", vi->nrxq, vi->rss_size); } #ifdef RSS if (vi->nrxq != nbuckets) { CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);" "performance will be impacted.\n", vi->nrxq, nbuckets); } #endif if (vi->rss == NULL) vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE, M_ZERO | M_WAITOK); for (i = 0; i < vi->rss_size;) { #ifdef RSS j = rss_get_indirection_to_bucket(i); j %= vi->nrxq; rxq = &sc->sge.rxq[vi->first_rxq + j]; vi->rss[i++] = rxq->iq.abs_id; #else for_each_rxq(vi, j, rxq) { vi->rss[i++] = rxq->iq.abs_id; if (i == vi->rss_size) break; } #endif } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->rss, vi->rss_size); if (rc != 0) { CH_ERR(vi, "rss_config failed: %d\n", rc); return (rc); } #ifdef RSS vi->hashen = hashconfig_to_hashen(hashconfig); /* * We may have had to enable some hashes even though the global config * wants them disabled. This is a potential problem that must be * reported to the user. */ extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; /* * If we consider only the supported hash types, then the enabled hashes * are a superset of the requested hashes. In other words, there cannot * be any supported hash that was requested but not enabled, but there * can be hashes that were not requested but had to be enabled. */ extra &= SUPPORTED_RSS_HASHTYPES; MPASS((extra & hashconfig) == 0); if (extra) { CH_ALERT(vi, "global RSS config (0x%x) cannot be accommodated.\n", hashconfig); } if (extra & RSS_HASHTYPE_RSS_IPV4) CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_IPV6) CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n"); #else vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; #endif rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], 0, 0); if (rc != 0) { CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc); return (rc); } return (0); } int vi_init(struct vi_info *vi) { int rc; ASSERT_SYNCHRONIZED_OP(vi->adapter); KASSERT((vi->flags & VI_INIT_DONE) == 0, ("%s: VI_INIT_DONE already", __func__)); rc = vi_full_init(vi); if (rc != 0) vi_full_uninit(vi); else vi->flags |= VI_INIT_DONE; return (rc); } /* * Idempotent. */ static void vi_full_uninit(struct vi_info *vi) { if (vi->flags & VI_INIT_DONE) { quiesce_vi(vi); free(vi->rss, M_CXGBE); free(vi->nm_rss, M_CXGBE); } t4_teardown_vi_queues(vi); vi->flags &= ~VI_INIT_DONE; } static void quiesce_txq(struct sge_txq *txq) { struct sge_eq *eq = &txq->eq; struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; MPASS(eq->flags & EQ_SW_ALLOCATED); MPASS(!(eq->flags & EQ_ENABLED)); /* Wait for the mp_ring to empty. */ while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("rquiesce", 1); } MPASS(txq->txp.npkt == 0); if (eq->flags & EQ_HW_ALLOCATED) { /* * Hardware is alive and working normally. Wait for it to * finish and then wait for the driver to catch up and reclaim * all descriptors. */ while (spg->cidx != htobe16(eq->pidx)) pause("equiesce", 1); while (eq->cidx != eq->pidx) pause("dquiesce", 1); } else { /* * Hardware is unavailable. Discard all pending tx and reclaim * descriptors directly. */ TXQ_LOCK(txq); while (eq->cidx != eq->pidx) { struct mbuf *m, *nextpkt; struct tx_sdesc *txsd; txsd = &txq->sdesc[eq->cidx]; for (m = txsd->m; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } IDXINCR(eq->cidx, txsd->desc_used, eq->sidx); } spg->pidx = spg->cidx = htobe16(eq->cidx); TXQ_UNLOCK(txq); } } static void quiesce_wrq(struct sge_wrq *wrq) { struct wrqe *wr; TXQ_LOCK(wrq); while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { STAILQ_REMOVE_HEAD(&wrq->wr_list, link); #ifdef INVARIANTS wrq->nwr_pending--; wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE); #endif free(wr, M_CXGBE); } MPASS(wrq->nwr_pending == 0); MPASS(wrq->ndesc_needed == 0); wrq->nwr_pending = 0; wrq->ndesc_needed = 0; TXQ_UNLOCK(wrq); } static void quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl) { /* Synchronize with the interrupt handler */ while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) pause("iqfree", 1); if (fl != NULL) { MPASS(iq->flags & IQ_HAS_FL); mtx_lock(&sc->sfl_lock); FL_LOCK(fl); fl->flags |= FL_DOOMED; FL_UNLOCK(fl); callout_stop(&sc->sfl_callout); mtx_unlock(&sc->sfl_lock); KASSERT((fl->flags & FL_STARVING) == 0, ("%s: still starving", __func__)); /* Release all buffers if hardware is no longer available. */ if (!(iq->flags & IQ_HW_ALLOCATED)) free_fl_buffers(sc, fl); } } /* * Wait for all activity on all the queues of the VI to complete. It is assumed * that no new work is being enqueued by the hardware or the driver. That part * should be arranged before calling this function. */ static void quiesce_vi(struct vi_info *vi) { int i; struct adapter *sc = vi->adapter; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif if (!(vi->flags & VI_INIT_DONE)) return; for_each_txq(vi, i, txq) { quiesce_txq(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { quiesce_wrq(&ofld_txq->wrq); } #endif for_each_rxq(vi, i, rxq) { quiesce_iq_fl(sc, &rxq->iq, &rxq->fl); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl); } #endif } static int t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, driver_intr_t *handler, void *arg, char *name) { int rc; irq->rid = rid; irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, RF_SHAREABLE | RF_ACTIVE); if (irq->res == NULL) { device_printf(sc->dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, arg, &irq->tag); if (rc != 0) { device_printf(sc->dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name, rc); } else if (name) bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); return (rc); } static int t4_free_irq(struct adapter *sc, struct irq *irq) { if (irq->tag) bus_teardown_intr(sc->dev, irq->res, irq->tag); if (irq->res) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); bzero(irq, sizeof(*irq)); return (0); } static void get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) { regs->version = chip_id(sc) | chip_rev(sc) << 10; t4_get_regs(sc, buf, regs->len); } #define A_PL_INDIR_CMD 0x1f8 #define S_PL_AUTOINC 31 #define M_PL_AUTOINC 0x1U #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) #define S_PL_VFID 20 #define M_PL_VFID 0xffU #define V_PL_VFID(x) ((x) << S_PL_VFID) #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) #define S_PL_ADDR 0 #define M_PL_ADDR 0xfffffU #define V_PL_ADDR(x) ((x) << S_PL_ADDR) #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) #define A_PL_INDIR_DATA 0x1fc static uint64_t read_vf_stat(struct adapter *sc, u_int vin, int reg) { u32 stats[2]; if (sc->flags & IS_VF) { stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); } else { mtx_assert(&sc->reg_lock, MA_OWNED); t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg))); stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); } return (((uint64_t)stats[1]) << 32 | stats[0]); } static void t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats) { #define GET_STAT(name) \ read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L) if (!(sc->flags & IS_VF)) mtx_lock(&sc->reg_lock); stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); if (!(sc->flags & IS_VF)) mtx_unlock(&sc->reg_lock); #undef GET_STAT } static void t4_clr_vi_stats(struct adapter *sc, u_int vin) { int reg; t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) t4_write_reg(sc, A_PL_INDIR_DATA, 0); } static void vi_refresh_stats(struct vi_info *vi) { struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ mtx_assert(&vi->tick_mtx, MA_OWNED); if (vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats); getmicrotime(&vi->last_refreshed); } static void cxgbe_refresh_stats(struct vi_info *vi) { u_int i, v, tnl_cong_drops, chan_map; struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ struct port_info *pi; struct adapter *sc; mtx_assert(&vi->tick_mtx, MA_OWNED); if (vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; pi = vi->pi; sc = vi->adapter; tnl_cong_drops = 0; t4_get_port_stats(sc, pi->port_id, &pi->stats); chan_map = pi->rx_e_chan_map; while (chan_map) { i = ffs(chan_map) - 1; mtx_lock(&sc->reg_lock); t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); mtx_unlock(&sc->reg_lock); tnl_cong_drops += v; chan_map &= ~(1 << i); } pi->tnl_cong_drops = tnl_cong_drops; getmicrotime(&vi->last_refreshed); } static void cxgbe_tick(void *arg) { struct vi_info *vi = arg; MPASS(IS_MAIN_VI(vi)); mtx_assert(&vi->tick_mtx, MA_OWNED); cxgbe_refresh_stats(vi); callout_schedule(&vi->tick, hz); } static void vi_tick(void *arg) { struct vi_info *vi = arg; mtx_assert(&vi->tick_mtx, MA_OWNED); vi_refresh_stats(vi); callout_schedule(&vi->tick, hz); } /* * Should match fw_caps_config_ enums in t4fw_interface.h */ static char *caps_decoder[] = { "\20\001IPMI\002NCSI", /* 0: NBM */ "\20\001PPP\002QFC\003DCBX", /* 1: link */ "\20\001INGRESS\002EGRESS", /* 2: switch */ "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ "\006HASHFILTER\007ETHOFLD", "\20\001TOE", /* 4: TOE */ "\20\001RDDP\002RDMAC", /* 5: RDMA */ "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" "\007T10DIF" "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */ "\004TLS_HW", "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ "\004PO_INITIATOR\005PO_TARGET", }; void t4_sysctls(struct adapter *sc) { struct sysctl_ctx_list *ctx = &sc->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *c0; static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; /* * dev.t4nex.X. */ oid = device_get_sysctl_tree(sc->dev); c0 = children = SYSCTL_CHILDREN(oid); sc->sc_do_rxcopy = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, sc->params.nports, "# of ports"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells, (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", "available doorbells"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, sc->params.vpd.cclk, "core clock frequency (in KHz)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", "interrupt holdoff timer values (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", "interrupt holdoff packet counter values"); t4_sge_sysctls(sc, ctx, children); sc->lro_timeout = 100; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, &sc->debug_flags, 0, "flags to enable runtime debugging"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); if (sc->flags & IS_VF) return; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, NULL, chip_rev(sc), "chip hardware revision"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, sc->er_version, 0, "expansion ROM version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, sc->bs_version, 0, "bootstrap firmware version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, NULL, sc->params.scfg_vers, "serial config version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, NULL, sc->params.vpd_vers, "VPD version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, sc->cfcsum, "config file checksum"); #define SYSCTL_CAP(name, n, text) \ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \ (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \ "available " text " capabilities") SYSCTL_CAP(nbmcaps, 0, "NBM"); SYSCTL_CAP(linkcaps, 1, "link"); SYSCTL_CAP(switchcaps, 2, "switch"); SYSCTL_CAP(niccaps, 3, "NIC"); SYSCTL_CAP(toecaps, 4, "TCP offload"); SYSCTL_CAP(rdmacaps, 5, "RDMA"); SYSCTL_CAP(iscsicaps, 6, "iSCSI"); SYSCTL_CAP(cryptocaps, 7, "crypto"); SYSCTL_CAP(fcoecaps, 8, "FCoE"); #undef SYSCTL_CAP SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, NULL, sc->tids.nftids, "number of filters"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_temperature, "I", "chip temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_reset_sensor, "I", "reset the chip's temperature sensor."); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_loadavg, "A", "microprocessor load averages (debug firmwares only)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd, "I", "core Vdd (in mV)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS, sysctl_cpus, "A", "local CPUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS, sysctl_cpus, "A", "preferred CPUs for interrupts"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW, &sc->swintr, 0, "software triggered interrupts"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I", "1 = reset adapter, 0 = zero reset counter"); /* * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "logs and miscellaneous information"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cctrl, "A", "congestion control"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3, sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4, sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5, sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_la, "A", "CIM logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); if (chip_id(sc) > CHELSIO_T4) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_qcfg, "A", "CIM queue configuration"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cpl_stats, "A", "CPL statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ddp_stats, "A", "non-TCP DDP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tid_stats, "A", "tid stats"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_devlog, "A", "firmware's device log"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_fcoe_stats, "A", "FCoE statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_hw_sched, "A", "hardware scheduler "); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_l2t, "A", "hardware L2 table"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_smt, "A", "hardware source MAC table"); #ifdef INET6 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_clip, "A", "active CLIP table entries"); #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_lb_stats, "A", "loopback statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_meminfo, "A", "memory regions"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, "A", "MPS TCAM entries"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_path_mtus, "A", "path MTUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_pm_stats, "A", "PM statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_rdma_stats, "A", "RDMA statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tcp_stats, "A", "TCP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tids, "A", "TID information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_err_stats, "A", "TP error statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tnl_stats, "A", "TP tunnel statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la, "A", "TP logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tx_rate, "A", "Tx rate"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ulprx_la, "A", "ULPRX logic analyzer"); if (chip_id(sc) >= CHELSIO_T5) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_wcwr_stats, "A", "write combined work requests"); } #ifdef KERN_TLS if (is_ktls(sc)) { /* * dev.t4nex.0.tls. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys", CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS " "keys in work requests (1) or attempt to store TLS keys " "in card memory."); if (is_t6(sc)) SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs", CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to " "combine TCB field updates with TLS record work " "requests."); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { int i; char s[4]; /* * dev.t4nex.X.toe. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters"); children = SYSCTL_CHILDREN(oid); sc->tt.cong_algorithm = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " "3 = highspeed)"); sc->tt.sndbuf = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, &sc->tt.sndbuf, 0, "hardware send buffer"); sc->tt.ddp = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, ""); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW, &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)"); sc->tt.rx_coalesce = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); sc->tt.tls = 0; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I", "Inline TLS allowed"); sc->tt.tx_align = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); sc->tt.tx_zcopy = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", CTLFLAG_RW, &sc->tt.tx_zcopy, 0, "Enable zero-copy aio_write(2)"); sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cop_managed_offloading", CTLFLAG_RW, &sc->tt.cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); sc->tt.autorcvbuf_inc = 16 * 1024; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc", CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0, "autorcvbuf increment"); sc->tt.update_hc_on_pmtu_change = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "update_hc_on_pmtu_change", CTLFLAG_RW, &sc->tt.update_hc_on_pmtu_change, 0, "Update hostcache entry if the PMTU changes"); sc->tt.iso = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW, &sc->tt.iso, 0, "Enable iSCSI segmentation offload"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_tick, "A", "TP timer tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_tp_tick, "A", "TCP timestamp tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_tp_tick, "A", "DACK tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_dack_timer, "IU", "DACK timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MIN, sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MAX, sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MIN, sysctl_tp_timer, "LU", "Persist timer min (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MAX, sysctl_tp_timer, "LU", "Persist timer max (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_IDLE, sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_INTVL, sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU", "Number of SYN retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU", "Number of retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU", "Number of keepalive probes before abort"); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE retransmit backoffs"); children = SYSCTL_CHILDREN(oid); for (i = 0; i < 16; i++) { snprintf(s, sizeof(s), "%u", i); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i, sysctl_tp_backoff, "IU", "TOE retransmit backoff"); } } #endif } void vi_sysctls(struct vi_info *vi) { struct sysctl_ctx_list *ctx = &vi->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; /* * dev.v?(cxgbe|cxl).X. */ oid = device_get_sysctl_tree(vi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, vi->viid, "VI identifer"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, &vi->nrxq, 0, "# of rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, &vi->ntxq, 0, "# of tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, &vi->first_rxq, 0, "index of first rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, &vi->first_txq, 0, "index of first tx queue"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL, vi->rss_base, "start of RSS indirection table"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, vi->rss_size, "size of RSS indirection table"); if (IS_MAIN_VI(vi)) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_noflowq, "IU", "Reserve queue 0 for non-flowid packets"); } if (vi->adapter->flags & IS_VF) { MPASS(vi->flags & TX_USES_VM_WR); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD, NULL, 1, "use VM work requests for transmit"); } else { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_tx_vm_wr, "I", "use VM work requestes for transmit"); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, &vi->nofldrxq, 0, "# of rx queues for offloaded TCP connections"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", CTLFLAG_RD, &vi->first_ofld_rxq, 0, "index of first TOE rx queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx_ofld, "I", "holdoff timer index for TOE queues"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx_ofld, "I", "holdoff packet counter index for TOE queues"); } #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (vi->nofldtxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, &vi->nofldtxq, 0, "# of tx queues for TOE/ETHOFLD"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", CTLFLAG_RD, &vi->first_ofld_txq, 0, "index of first TOE/ETHOFLD tx queue"); } #endif #ifdef DEV_NETMAP if (vi->nnmrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, &vi->nnmrxq, 0, "# of netmap rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, &vi->nnmtxq, 0, "# of netmap tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", CTLFLAG_RD, &vi->first_nm_rxq, 0, "index of first netmap rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", CTLFLAG_RD, &vi->first_nm_txq, 0, "index of first netmap tx queue"); } #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx, "I", "holdoff timer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_rxq, "I", "rx queue size"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_txq, "I", "tx queue size"); } static void cxgbe_sysctls(struct port_info *pi) { struct sysctl_ctx_list *ctx = &pi->ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *children2; struct adapter *sc = pi->adapter; int i; char name[16]; static char *tc_flags = {"\20\1USER"}; /* * dev.cxgbe.X. */ oid = device_get_sysctl_tree(pi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_btphy, "I", "PHY temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1, sysctl_btphy, "I", "PHY firmware version"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_pause_settings, "A", "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec", CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A", "FEC in use on the link"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_requested_fec, "A", "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec", CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A", "FEC recommended by the cable/transceiver"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_autoneg, "I", "autonegotiation (-1 = not supported)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD, &pi->link_cfg.requested_caps, 0, "L1 config requested by driver"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD, &pi->link_cfg.pcaps, 0, "port capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD, &pi->link_cfg.acaps, 0, "advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD, &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, port_top_speed(pi), "max speed (in Gbps)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, pi->mps_bg_map, "MPS buffer group map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, NULL, pi->rx_e_chan_map, "TP rx e-channel map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_chan", CTLFLAG_RD, NULL, pi->tx_chan, "TP tx c-channel"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_chan", CTLFLAG_RD, NULL, pi->rx_chan, "TP rx c-channel"); if (sc->flags & IS_VF) return; /* * dev.(cxgbe|cxl).X.tc. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx scheduler traffic classes (cl_rl)"); children2 = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize", CTLFLAG_RW, &pi->sched_params->pktsize, 0, "pktsize for per-flow cl-rl (0 means up to the driver )"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize", CTLFLAG_RW, &pi->sched_params->burstsize, 0, "burstsize for per-flow cl-rl (0 means up to the driver)"); for (i = 0; i < sc->params.nsched_cls; i++) { struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; snprintf(name, sizeof(name), "%d", i); children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class")); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state", CTLFLAG_RD, &tc->state, 0, "current state"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags, (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", CTLFLAG_RD, &tc->refcount, 0, "references to this class"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, (pi->port_id << 16) | i, sysctl_tc_params, "A", "traffic class parameters"); } /* * dev.cxgbe.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, &pi->tx_parse_error, 0, "# of tx packets with invalid length or # of segments"); #define T4_REGSTAT(name, stat, desc) \ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \ sysctl_handle_t4_reg64, "QU", desc) /* We get these from port_stats and they may be stale by up to 1s */ #define T4_PORTSTAT(name, desc) \ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ &pi->stats.name, desc) T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames"); T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range"); T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames"); T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted"); T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted"); T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted"); T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted"); T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted"); T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted"); T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted"); T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted"); T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted"); T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU"); T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames"); if (is_t6(sc)) { T4_PORTSTAT(rx_fcs_err, "# of frames received with bad FCS since last link up"); } else { T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR, "# of frames received with bad FCS"); } T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error"); T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors"); T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received"); T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range"); T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received"); T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received"); T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received"); T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received"); T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received"); T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received"); T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received"); T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received"); T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received"); T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); #undef T4_REGSTAT #undef T4_PORTSTAT } static int sysctl_int_array(SYSCTL_HANDLER_ARGS) { int rc, *i, space = 0; struct sbuf sb; sbuf_new_for_sysctl(&sb, NULL, 64, req); for (i = arg1; arg2; arg2 -= sizeof(int), i++) { if (space) sbuf_printf(&sb, " "); sbuf_printf(&sb, "%d", *i); space = 1; } rc = sbuf_finish(&sb); sbuf_delete(&sb); return (rc); } static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_btphy(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; int op = arg2; struct adapter *sc = pi->adapter; u_int v; int rc; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { /* XXX: magic numbers */ rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, &v); } end_synchronized_op(sc, 0); if (rc) return (rc); if (op == 0) v /= 256; rc = sysctl_handle_int(oidp, &v, 0, req); return (rc); } static int sysctl_noflowq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; int rc, val; val = vi->rsrv_noflowq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if ((val >= 1) && (vi->ntxq > 1)) vi->rsrv_noflowq = 1; else vi->rsrv_noflowq = 0; return (rc); } static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int rc, val, i; MPASS(!(sc->flags & IS_VF)); val = vi->flags & TX_USES_VM_WR ? 1 : 0; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val != 0 && val != 1) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txvm"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) { /* * We don't want parse_pkt to run with one setting (VF or PF) * and then eth_tx to see a different setting but still use * stale information calculated by parse_pkt. */ rc = EBUSY; } else { struct port_info *pi = vi->pi; struct sge_txq *txq; uint32_t ctrl0; uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; if (val) { vi->flags |= TX_USES_VM_WR; if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO); ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan)); if (!(sc->flags & IS_VF)) npkt--; } else { vi->flags &= ~TX_USES_VM_WR; if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO); ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); } for_each_txq(vi, i, txq) { txq->cpl_ctrl0 = ctrl0; txq->txp.max_npkt = npkt; } } end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_rxq *rxq; uint8_t v; idx = vi->tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4tmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); for_each_rxq(vi, i, rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&rxq->iq.intr_params, v); #else rxq->iq.intr_params = v; #endif } vi->tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4pktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_rxq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || (qsize & 7)) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4rxqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_rxq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_txq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || qsize > 65536) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_txq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RX\2TX\3AUTO"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | (lc->requested_fc & PAUSE_AUTONEG), bits); } else { sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG), bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[2]; int n; s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)); s[1] = 0; rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); if (s[1] != 0) return (EINVAL); if (s[0] < '0' || s[0] > '9') return (EINVAL); /* not a number */ n = s[0] - '0'; if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) return (EINVAL); /* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4PAUSE"); if (rc) return (rc); if (!hw_off_limits(sc)) { PORT_LOCK(pi); lc->requested_fc = n; fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); PORT_UNLOCK(pi); } end_synchronized_op(sc, 0); } return (rc); } static int sysctl_link_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; int rc; struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) sbuf_printf(sb, "%b", lc->fec, bits); else sbuf_printf(sb, "no link"); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t old; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2" "\5RSVD3\6auto\7module"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", lc->requested_fec, bits); rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[8]; int n; snprintf(s, sizeof(s), "%d", lc->requested_fec == FEC_AUTO ? -1 : lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); n = strtol(&s[0], NULL, 0); if (n < 0 || n & FEC_AUTO) n = FEC_AUTO; else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE)) return (EINVAL);/* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4reqf"); if (rc) return (rc); PORT_LOCK(pi); old = lc->requested_fec; if (n == FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (n == 0 || n == FEC_NONE) lc->requested_fec = FEC_NONE; else { if ((lc->pcaps | V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) != lc->pcaps) { rc = ENOTSUP; goto done; } lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | FEC_MODULE); } if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) { rc = apply_link_config(pi); if (rc != 0) { lc->requested_fec = old; if (rc == FW_EPROTO) rc = ENOTSUP; } } } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } return (rc); } static int sysctl_module_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t fec; struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3"; sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) { rc = EBUSY; goto done; } if (hw_off_limits(sc)) { rc = ENXIO; goto done; } PORT_LOCK(pi); if (pi->up_vis == 0) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); } fec = lc->fec_hint; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || !fec_supported(lc->pcaps)) { PORT_UNLOCK(pi); sbuf_printf(sb, "n/a"); } else { if (fec == 0) fec = FEC_NONE; PORT_UNLOCK(pi); sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits); } rc = sbuf_finish(sb); done: sbuf_delete(sb); end_synchronized_op(sc, 0); return (rc); } static int sysctl_autoneg(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; if (lc->pcaps & FW_PORT_CAP32_ANEG) val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; else val = -1; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) val = AUTONEG_DISABLE; else if (val == 1) val = AUTONEG_ENABLE; else val = AUTONEG_AUTO; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4aneg"); if (rc) return (rc); PORT_LOCK(pi); if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; goto done; } lc->requested_aneg = val; if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_force_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; val = lc->force_fec; MPASS(val >= -1 && val <= 1); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC)) return (ENOTSUP); if (val < -1 || val > 1) return (EINVAL); rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff"); if (rc) return (rc); PORT_LOCK(pi); lc->force_fec = val; if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; uint64_t val; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; val = t4_read_reg64(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sysctl_handle_64(oidp, &val, 0, req); return (rc); } static int sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, t; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); /* unknown is returned as 0 but we display -1 in that case */ t = val == 0 ? -1 : val; rc = sysctl_handle_int(oidp, &t, 0, req); return (rc); } static int sysctl_vdd(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; uint32_t param, val; if (sc->params.core_vdd == 0) { rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vdd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); sc->params.core_vdd = val; } return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); } static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, v; uint32_t param, val; v = sc->sensor_resets; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL || v <= 0) return (rc); if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || chip_id(sc) < CHELSIO_T5) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR)); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc == 0) sc->sensor_resets++; return (rc); } static int sysctl_loadavg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (val == 0xffffffff) { /* Only debug and custom firmwares report load averages. */ sbuf_printf(sb, "not available"); } else { sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cctrl(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t incr[NMTUS][NCCTRL_WIN]; static const char *dec_fac[] = { "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", "0.9375" }; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_cong_tbl(sc, incr); mtx_unlock(&sc->reg_lock); if (rc) goto done; for (i = 0; i < NCCTRL_WIN; ++i) { sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], incr[5][i], incr[6][i], incr[7][i]); sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", incr[8][i], incr[9][i], incr[10][i], incr[11][i], incr[12][i], incr[13][i], incr[14][i], incr[15][i], sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ }; static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n, qid = arg2; uint32_t *buf, *p; char *qtype; u_int cim_num_obq = sc->chip_params->cim_num_obq; KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, ("%s: bad qid %d\n", __func__, qid)); if (qid < CIM_NUM_IBQ) { /* inbound queue */ qtype = "IBQ"; n = 4 * CIM_IBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_ibq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } else { /* outbound queue */ qtype = "OBQ"; qid -= CIM_NUM_IBQ; n = 4 * cim_num_obq * CIM_OBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_obq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } if (rc < 0) { rc = -rc; goto done; } n = rc * sizeof(uint32_t); /* rc has # of words actually read */ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) { rc = ENOMEM; goto done; } sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); for (i = 0, p = buf; i < n; i += 16, p += 4) sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], p[2], p[3]); rc = sbuf_finish(sb); sbuf_delete(sb); done: free(buf, M_CXGBE); return (rc); } static void sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, p[6], p[7]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, p[4] & 0xff, p[5] >> 8); sbuf_printf(sb, "\n %02x %x%07x %x%07x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); } else { sbuf_printf(sb, "\n %02x %x%07x %x%07x %08x %08x " "%08x%08x%08x%08x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], p[6], p[7]); } } } static void sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Inst Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x %08x", p[3] & 0xff, p[2], p[1], p[0]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16); } else { sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " "%08x %08x %08x %08x %08x %08x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16, p[2], p[1], p[0], p[5], p[4], p[3]); } } } static int sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags) { uint32_t cfg, *buf; int rc; MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (rc == 0) rc = -t4_cim_read_la(sc, buf, NULL); } mtx_unlock(&sc->reg_lock); if (rc == 0) { if (chip_id(sc) < CHELSIO_T6) sbuf_cim_la4(sc, sb, buf, cfg); else sbuf_cim_la6(sc, sb, buf, cfg); } free(buf, M_CXGBE); return (rc); } static int sysctl_cim_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_cim_la(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static void dump_cim_regs(struct adapter *sc) { log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n", device_get_nameunit(sc->dev), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA2), t4_read_reg(sc, A_EDC_H_BIST_DATA_PATTERN), t4_read_reg(sc, A_EDC_H_BIST_STATUS_RDATA)); log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n", device_get_nameunit(sc->dev), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0 + 0x800), t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1 + 0x800), t4_read_reg(sc, A_EDC_H_BIST_CMD_LEN)); } static void dump_cimla(struct adapter *sc) { struct sbuf sb; int rc; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) { log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n", device_get_nameunit(sc->dev)); return; } rc = sbuf_cim_la(sc, &sb, M_WAITOK); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } void t4_os_cim_err(struct adapter *sc) { atomic_set_int(&sc->error_flags, ADAP_CIM_ERR); } static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, (p[1] >> 2) | ((p[2] & 3) << 30), (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, p[0] & 1); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; sbuf_printf(sb, "Cntl ID DataBE Addr Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCntl ID Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t thres[CIM_NUM_IBQ]; uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; cim_num_obq = sc->chip_params->cim_num_obq; if (is_t4(sc)) { ibq_rdaddr = A_UP_IBQ_0_RDADDR; obq_rdaddr = A_UP_OBQ_0_REALADDR; } else { ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; } nq = CIM_NUM_IBQ + cim_num_obq; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); if (rc == 0) { rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); if (rc == 0) t4_read_cimq_cfg(sc, base, size, thres); } } mtx_unlock(&sc->reg_lock); if (rc) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); for ( ; i < nq; i++, p += 4, wr += 2) sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_cpl_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_cpl_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc) goto done; if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", stats.req[0], stats.req[1], stats.req[2], stats.req[3]); sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\nCPL requests: %10u %10u", stats.req[0], stats.req[1]); sbuf_printf(sb, "\nCPL responses: %10u %10u", stats.rsp[0], stats.rsp[1]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_usm_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_usm_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Frames: %u\n", stats.frames); sbuf_printf(sb, "Octets: %ju\n", stats.octets); sbuf_printf(sb, "Drops: %u", stats.drops); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tid_stats stats; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tid_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Delete: %u\n", stats.del); sbuf_printf(sb, "Invalidate: %u\n", stats.inv); sbuf_printf(sb, "Active: %u\n", stats.act); sbuf_printf(sb, "Passive: %u", stats.pas); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static const char * const devlog_level_strings[] = { [FW_DEVLOG_LEVEL_EMERG] = "EMERG", [FW_DEVLOG_LEVEL_CRIT] = "CRIT", [FW_DEVLOG_LEVEL_ERR] = "ERR", [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", [FW_DEVLOG_LEVEL_INFO] = "INFO", [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" }; static const char * const devlog_facility_strings[] = { [FW_DEVLOG_FACILITY_CORE] = "CORE", [FW_DEVLOG_FACILITY_CF] = "CF", [FW_DEVLOG_FACILITY_SCHED] = "SCHED", [FW_DEVLOG_FACILITY_TIMER] = "TIMER", [FW_DEVLOG_FACILITY_RES] = "RES", [FW_DEVLOG_FACILITY_HW] = "HW", [FW_DEVLOG_FACILITY_FLR] = "FLR", [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", [FW_DEVLOG_FACILITY_PHY] = "PHY", [FW_DEVLOG_FACILITY_MAC] = "MAC", [FW_DEVLOG_FACILITY_PORT] = "PORT", [FW_DEVLOG_FACILITY_VI] = "VI", [FW_DEVLOG_FACILITY_FILTER] = "FILTER", [FW_DEVLOG_FACILITY_ACL] = "ACL", [FW_DEVLOG_FACILITY_TM] = "TM", [FW_DEVLOG_FACILITY_QFC] = "QFC", [FW_DEVLOG_FACILITY_DCB] = "DCB", [FW_DEVLOG_FACILITY_ETH] = "ETH", [FW_DEVLOG_FACILITY_OFLD] = "OFLD", [FW_DEVLOG_FACILITY_RI] = "RI", [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", [FW_DEVLOG_FACILITY_FCOE] = "FCOE", [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", [FW_DEVLOG_FACILITY_CHNET] = "CHNET", }; static int sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags) { int i, j, rc, nentries, first = 0; struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e *buf, *e; uint64_t ftstamp = UINT64_MAX; if (dparams->addr == 0) return (ENXIO); MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; nentries = dparams->size / sizeof(struct fw_devlog_e); for (i = 0; i < nentries; i++) { e = &buf[i]; if (e->timestamp == 0) break; /* end */ e->timestamp = be64toh(e->timestamp); e->seqno = be32toh(e->seqno); for (j = 0; j < 8; j++) e->params[j] = be32toh(e->params[j]); if (e->timestamp < ftstamp) { ftstamp = e->timestamp; first = i; } } if (buf[first].timestamp == 0) goto done; /* nothing in the log */ sbuf_printf(sb, "%10s %15s %8s %8s %s\n", "Seq#", "Tstamp", "Level", "Facility", "Message"); i = first; do { e = &buf[i]; if (e->timestamp == 0) break; /* end */ sbuf_printf(sb, "%10d %15ju %8s %8s ", e->seqno, e->timestamp, (e->level < nitems(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); sbuf_printf(sb, e->fmt, e->params[0], e->params[1], e->params[2], e->params[3], e->params[4], e->params[5], e->params[6], e->params[7]); if (++i == nentries) i = 0; } while (i != first); done: free(buf, M_CXGBE); return (rc); } static int sysctl_devlog(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_devlog(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static void dump_devlog(struct adapter *sc) { int rc; struct sbuf sb; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) { log(LOG_DEBUG, "%s: failed to generate devlog dump.\n", device_get_nameunit(sc->dev)); return; } rc = sbuf_devlog(sc, &sb, M_WAITOK); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: device log follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_fcoe_stats stats[MAX_NCHAN]; int i, nchan = sc->chip_params->nchan; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { for (i = 0; i < nchan; i++) t4_get_fcoe_stats(sc, i, &stats[i], 1); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp, stats[2].octets_ddp, stats[3].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp, stats[2].frames_ddp, stats[3].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", stats[0].frames_drop, stats[1].frames_drop, stats[2].frames_drop, stats[3].frames_drop); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u", stats[0].frames_drop, stats[1].frames_drop); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; unsigned int map, kbps, ipg, mode; unsigned int pace_tab[NTX_SCHED]; sb = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { mtx_unlock(&sc->reg_lock); rc = ENXIO; goto done; } map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); t4_read_pace_tbl(sc, pace_tab); mtx_unlock(&sc->reg_lock); sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " "Class IPG (0.1 ns) Flow IPG (us)"); for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { t4_get_tx_sched(sc, i, &kbps, &ipg, 1); sbuf_printf(sb, "\n %u %-5s %u ", i, (mode & (1 << i)) ? "flow" : "class", map & 3); if (kbps) sbuf_printf(sb, "%9u ", kbps); else sbuf_printf(sb, " disabled "); if (ipg) sbuf_printf(sb, "%13u ", ipg); else sbuf_printf(sb, " disabled "); if (pace_tab[i]) sbuf_printf(sb, "%10u", pace_tab[i]); else sbuf_printf(sb, " disabled"); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, j; uint64_t *p0, *p1; struct lb_port_stats s[2]; static const char *stat_name[] = { "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", "Frames128To255:", "Frames256To511:", "Frames512To1023:", "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", "BG2FramesTrunc:", "BG3FramesTrunc:" }; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); memset(s, 0, sizeof(s)); rc = 0; for (i = 0; i < sc->chip_params->nchan; i += 2) { mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_get_lb_stats(sc, i, &s[0]); t4_get_lb_stats(sc, i + 1, &s[1]); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; p0 = &s[0].octets; p1 = &s[1].octets; sbuf_printf(sb, "%s Loopback %u" " Loopback %u", i == 0 ? "" : "\n", i, i + 1); for (j = 0; j < nitems(stat_name); j++) sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], *p0++, *p1++); } if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) { int rc = 0; struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; struct sbuf *sb; sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok || lc->link_down_rc == 255) sbuf_printf(sb, "n/a"); else sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } struct mem_desc { u_int base; u_int limit; u_int idx; }; static int mem_desc_cmp(const void *a, const void *b) { const u_int v1 = ((const struct mem_desc *)a)->base; const u_int v2 = ((const struct mem_desc *)b)->base; if (v1 < v2) return (-1); else if (v1 > v2) return (1); return (0); } static void mem_region_show(struct sbuf *sb, const char *name, unsigned int from, unsigned int to) { unsigned int size; if (from == to) return; size = to - from + 1; if (size == 0) return; /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); } static int sysctl_meminfo(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n; uint32_t lo, hi, used, free, alloc; static const char *memory[] = { "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:" }; static const char *region[] = { "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", "TDDP region:", "TPT region:", "STAG region:", "RQ region:", "RQUDP region:", "PBL region:", "TXPBL region:", "TLSKey region:", "DBVFIFO region:", "ULPRX state:", "ULPTX state:", "On-chip queues:", }; struct mem_desc avail[4]; struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ struct mem_desc *md = mem; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); for (i = 0; i < nitems(mem); i++) { mem[i].limit = 0; mem[i].idx = i; } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* Find and sort the populated memory ranges */ i = 0; lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); if (lo & F_EDRAM0_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); avail[i].base = G_EDRAM0_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); avail[i].idx = 0; i++; } if (lo & F_EDRAM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); avail[i].base = G_EDRAM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); avail[i].idx = 1; i++; } if (lo & F_EXT_MEM_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); avail[i].base = G_EXT_MEM_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20); avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ i++; } if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 4; i++; } if (is_t6(sc) && lo & F_HMA_MUX) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 5; i++; } MPASS(i <= nitems(avail)); if (!i) /* no memory available */ goto done; qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); /* the next few have explicit upper bounds */ md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); md++; md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); md++; if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { if (chip_id(sc) <= CHELSIO_T5) md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); else md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); md->limit = 0; } else { md->base = 0; md->idx = nitems(region); /* hide it */ } md++; #define ulp_region(reg) \ md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) ulp_region(RX_ISCSI); ulp_region(RX_TDDP); ulp_region(TX_TPT); ulp_region(RX_STAG); ulp_region(RX_RQ); ulp_region(RX_RQUDP); ulp_region(RX_PBL); ulp_region(TX_PBL); if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { ulp_region(RX_TLS_KEY); } #undef ulp_region md->base = 0; if (is_t4(sc)) md->idx = nitems(region); else { uint32_t size = 0; uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); if (is_t5(sc)) { if (sge_ctrl & F_VFIFO_ENABLE) size = fifo_size << 2; } else size = G_T6_DBVFIFO_SIZE(fifo_size) << 6; if (size) { md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR); md->limit = md->base + size - 1; } else md->idx = nitems(region); } md++; md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); md->limit = 0; md++; md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); md->limit = 0; md++; md->base = sc->vres.ocq.start; if (sc->vres.ocq.size) md->limit = md->base + sc->vres.ocq.size - 1; else md->idx = nitems(region); /* hide it */ md++; /* add any address-space holes, there can be up to 3 */ for (n = 0; n < i - 1; n++) if (avail[n].limit < avail[n + 1].base) (md++)->base = avail[n].limit; if (avail[n].limit) (md++)->base = avail[n].limit; n = md - mem; MPASS(n <= nitems(mem)); qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); for (lo = 0; lo < i; lo++) mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, avail[lo].limit - 1); sbuf_printf(sb, "\n"); for (i = 0; i < n; i++) { if (mem[i].idx >= nitems(region)) continue; /* skip holes */ if (!mem[i].limit) mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; mem_region_show(sb, region[mem[i].idx], mem[i].base, mem[i].limit); } sbuf_printf(sb, "\n"); lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP RAM:", lo, hi); lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP Extmem2:", lo, hi); lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); for (i = 0, free = 0; i < 2; i++) free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT)); sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", G_PMRXMAXPAGE(lo), free, t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, (lo & F_PMRXNUMCHN) ? 2 : 1); lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); for (i = 0, free = 0; i < 4; i++) free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT)); sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", G_PMTXMAXPAGE(lo), free, hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); sbuf_printf(sb, "%u p-structs (%u free)\n", t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT), G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT))); for (i = 0; i < 4; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", i, used, alloc); } for (i = 0; i < sc->chip_params->nchan; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nLoopback %d using %u pages out of %u allocated", i, used, alloc); } done: mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static inline void tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) { *mask = x | y; y = htobe64(y); memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); } static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) <= CHELSIO_T5); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask Vld Ports PF" " VF Replication P0 P1 P2 P3 ML"); rc = 0; for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint64_t tcamx, tcamy, mask; uint32_t cls_lo, cls_hi; uint8_t addr[ETHER_ADDR_LEN]; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', G_PORTMAP(cls_hi), G_PF(cls_lo), (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); if (cls_lo & F_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%36s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) > CHELSIO_T5); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" " Replication" " P0 P1 P2 P3 ML\n"); rc = 0; for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint8_t dip_hit, vlan_vld, lookup_type, port_num; uint16_t ivlan; uint64_t tcamx, tcamy, val, mask; uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; uint8_t addr[ETHER_ADDR_LEN]; ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); if (i < 256) ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); else ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamy = G_DMACH(val) << 32; tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; lookup_type = G_DATALKPTYPE(data2); port_num = G_DATAPORTNUM(data2); if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI */ vniy = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); dip_hit = data2 & F_DATADIPHIT; vlan_vld = 0; } else { vniy = 0; dip_hit = 0; vlan_vld = data2 & F_DATAVIDH2; ivlan = G_VIDL(val); } ctl |= V_CTLXYBITSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamx = G_DMACH(val) << 32; tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI mask */ vnix = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); } else vnix = 0; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx %06x %06x - - %3c" " I %4x %3c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } else { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx - - ", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask); if (vlan_vld) sbuf_printf(sb, "%4u Y ", ivlan); else sbuf_printf(sb, " - N "); sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", lookup_type ? 'I' : 'O', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } if (cls_lo & F_T6_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t6mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x" " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc255_224), be32toh(ldst_cmd.u.mps.rplc.rplc223_192), be32toh(ldst_cmd.u.mps.rplc.rplc191_160), be32toh(ldst_cmd.u.mps.rplc.rplc159_128), be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%72s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#x", G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), (cls_lo >> S_T6_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint16_t mtus[NMTUS]; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_mtu_tbl(sc, mtus, NULL); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], mtus[14], mtus[15]); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; static const char *tx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", "Tx FIFO wait", NULL, "Tx latency" }; static const char *rx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Flush:", "Rx FIFO wait", NULL, "Rx latency" }; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Tx pcmds Tx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); } sbuf_printf(sb, "\n Rx pcmds Rx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } if (chip_id(sc) > CHELSIO_T5) { sbuf_printf(sb, "\n Total wait Total occupancy"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); i += 2; MPASS(i < nitems(tx_stats)); sbuf_printf(sb, "\n Reads Total wait"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_rdma_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_rdma_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tcp_stats v4, v6; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tcp_stats(sc, &v4, &v6, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " IP IPv6\n"); sbuf_printf(sb, "OutRsts: %20u %20u\n", v4.tcp_out_rsts, v6.tcp_out_rsts); sbuf_printf(sb, "InSegs: %20ju %20ju\n", v4.tcp_in_segs, v6.tcp_in_segs); sbuf_printf(sb, "OutSegs: %20ju %20ju\n", v4.tcp_out_segs, v6.tcp_out_segs); sbuf_printf(sb, "RetransSegs: %20ju %20ju", v4.tcp_retrans_segs, v6.tcp_retrans_segs); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tids(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t x, y; struct tid_info *t = &sc->tids; rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (t->natids) { sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, t->atids_in_use); } if (t->nhpftids) { sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", t->hpftid_base, t->hpftid_end, t->hpftids_in_use); } if (t->ntids) { bool hashen = false; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { hashen = true; if (chip_id(sc) <= CHELSIO_T5) { x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; } else { x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "TID range: "); if (hashen) { if (x) sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1); sbuf_printf(sb, "%u-%u", y, t->ntids - 1); } else { sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + t->ntids - 1); } sbuf_printf(sb, ", in use: %u\n", atomic_load_acq_int(&t->tids_in_use)); } if (t->nstids) { sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, t->stid_base + t->nstids - 1, t->stids_in_use); } if (t->nftids) { sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, t->ftid_end, t->ftids_in_use); } if (t->netids) { sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, t->etid_base + t->netids - 1, t->etids_in_use); } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4); y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6); } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y); done: if (rc == 0) rc = sbuf_finish(sb); else (void)sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_err_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_err_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1], stats.mac_in_errs[2], stats.mac_in_errs[3]); sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1], stats.hdr_in_errs[2], stats.hdr_in_errs[3]); sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1], stats.tcp_in_errs[2], stats.tcp_in_errs[3]); sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "macInErrs: %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1]); sbuf_printf(sb, "hdrInErrs: %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1]); sbuf_printf(sb, "tcpInErrs: %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1]); sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); } sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", stats.ofld_no_neigh, stats.ofld_cong_defer); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tnl_stats stats; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tnl_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1], stats.out_pkt[2], stats.out_pkt[3]); sbuf_printf(sb, "InPkts: %10u %10u %10u %10u", stats.in_pkt[0], stats.in_pkt[1], stats.in_pkt[2], stats.in_pkt[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "OutPkts: %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1]); sbuf_printf(sb, "InPkts: %10u %10u", stats.in_pkt[0], stats.in_pkt[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct tp_params *tpp = &sc->params.tp; u_int mask; int rc; mask = tpp->la_mask >> 16; rc = sysctl_handle_int(oidp, &mask, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (mask > 0xffff) return (EINVAL); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tpp->la_mask = mask << 16; t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); } mtx_unlock(&sc->reg_lock); return (rc); } struct field_desc { const char *name; u_int start; u_int width; }; static void field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) { char buf[32]; int line_size = 0; while (f->name) { uint64_t mask = (1ULL << f->width) - 1; int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, ((uintmax_t)v >> f->start) & mask); if (line_size + len >= 79) { line_size = 8; sbuf_printf(sb, "\n "); } sbuf_printf(sb, "%s ", buf); line_size += len + 1; f++; } sbuf_printf(sb, "\n"); } static const struct field_desc tp_la0[] = { { "RcfOpCodeOut", 60, 4 }, { "State", 56, 4 }, { "WcfState", 52, 4 }, { "RcfOpcSrcOut", 50, 2 }, { "CRxError", 49, 1 }, { "ERxError", 48, 1 }, { "SanityFailed", 47, 1 }, { "SpuriousMsg", 46, 1 }, { "FlushInputMsg", 45, 1 }, { "FlushInputCpl", 44, 1 }, { "RssUpBit", 43, 1 }, { "RssFilterHit", 42, 1 }, { "Tid", 32, 10 }, { "InitTcb", 31, 1 }, { "LineNumber", 24, 7 }, { "Emsg", 23, 1 }, { "EdataOut", 22, 1 }, { "Cmsg", 21, 1 }, { "CdataOut", 20, 1 }, { "EreadPdu", 19, 1 }, { "CreadPdu", 18, 1 }, { "TunnelPkt", 17, 1 }, { "RcfPeerFin", 16, 1 }, { "RcfReasonOut", 12, 4 }, { "TxCchannel", 10, 2 }, { "RcfTxChannel", 8, 2 }, { "RxEchannel", 6, 2 }, { "RcfRxChannel", 5, 1 }, { "RcfDataOutSrdy", 4, 1 }, { "RxDvld", 3, 1 }, { "RxOoDvld", 2, 1 }, { "RxCongestion", 1, 1 }, { "TxCongestion", 0, 1 }, { NULL } }; static const struct field_desc tp_la1[] = { { "CplCmdIn", 56, 8 }, { "CplCmdOut", 48, 8 }, { "ESynOut", 47, 1 }, { "EAckOut", 46, 1 }, { "EFinOut", 45, 1 }, { "ERstOut", 44, 1 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static const struct field_desc tp_la2[] = { { "CplCmdIn", 56, 8 }, { "MpsVfVld", 55, 1 }, { "MpsPf", 52, 3 }, { "MpsVf", 44, 8 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static void tp_la_show(struct sbuf *sb, uint64_t *p, int idx) { field_desc_show(sb, *p, tp_la0); } static void tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], tp_la0); } static void tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); } static int sysctl_tp_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint64_t *buf, *p; int rc; u_int i, inc; void (*show_func)(struct sbuf *, uint64_t *, int); rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_tp_read_la(sc, buf, NULL); switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { case 2: inc = 2; show_func = tp_la_show2; break; case 3: inc = 2; show_func = tp_la_show3; break; default: inc = 1; show_func = tp_la_show; } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) (*show_func)(sb, p, i); rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_chan_txrate(sc, nrate, orate); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", nrate[0], nrate[1], nrate[2], nrate[3]); sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", orate[0], orate[1], orate[2], orate[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", nrate[0], nrate[1]); sbuf_printf(sb, "Offload B/s: %10ju %10ju", orate[0], orate[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint32_t *buf, *p; int rc, i; rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_ulprx_read_la(sc, buf); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; sbuf_printf(sb, " Pcmd Type Message" " Data"); for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t cfg, s1, s2; MPASS(chip_id(sc) >= CHELSIO_T5); rc = 0; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cfg = t4_read_reg(sc, A_SGE_STAT_CFG); s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL); s2 = t4_read_reg(sc, A_SGE_STAT_MATCH); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (G_STATSOURCE_T5(cfg) == 7) { int mode; mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg); if (mode == 0) sbuf_printf(sb, "total %d, incomplete %d", s1, s2); else if (mode == 1) sbuf_printf(sb, "total %d, data overflow %d", s1, s2); else sbuf_printf(sb, "unknown mode %d", mode); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; enum cpu_sets op = arg2; cpuset_t cpuset; struct sbuf *sb; int i, rc; MPASS(op == LOCAL_CPUS || op == INTR_CPUS); CPU_ZERO(&cpuset); rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); CPU_FOREACH(i) sbuf_printf(sb, "%d ", i); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_reset(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int val; int rc; val = atomic_load_int(&sc->num_resets); rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) { /* Zero out the counter that tracks reset. */ atomic_store_int(&sc->num_resets, 0); return (0); } if (val != 1) return (EINVAL); /* 0 or 1 are the only legal values */ if (hw_off_limits(sc)) /* harmless race */ return (EALREADY); taskqueue_enqueue(reset_tq, &sc->reset_task); return (0); } #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int i, j, v, rc; struct vi_info *vi; v = sc->tt.tls; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { sc->tt.tls = !!v; for_each_port(sc, i) { for_each_vi(sc->port[i], j, vi) { if (vi->flags & VI_INIT_DONE) t4_update_fl_bufsize(vi->ifp); } } } end_synchronized_op(sc, 0); return (rc); } static void unit_conv(char *buf, size_t len, u_int val, u_int factor) { u_int rem = val % factor; if (rem == 0) snprintf(buf, len, "%u", val / factor); else { while (rem % 10 == 0) rem /= 10; snprintf(buf, len, "%u.%u", val / factor, rem); } } static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; char buf[16]; u_int res, re; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) res = (u_int)-1; else res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); mtx_unlock(&sc->reg_lock); if (res == (u_int)-1) return (ENXIO); switch (arg2) { case 0: /* timer_tick */ re = G_TIMERRESOLUTION(res); break; case 1: /* TCP timestamp tick */ re = G_TIMESTAMPRESOLUTION(res); break; case 2: /* DACK tick */ re = G_DELAYEDACKRESOLUTION(res); break; default: return (EDOOFUS); } unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; u_int dack_tmr, dack_re, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); v = ((cclk_ps << dack_re) / 1000000) * dack_tmr; return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; u_int tre; u_long tp_tick_us, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); tp_tick_us = (cclk_ps << tre) / 1000000; if (reg == A_TP_INIT_SRTT) v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); else v = tp_tick_us * t4_read_reg(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_long(oidp, &v, 0, req)); } /* * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is * passed to this function. */ static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int v; MPASS(idx >= 0 && idx <= 24); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int shift, v, r; MPASS(idx >= 0 && idx < 16); r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); shift = (idx & 3) << 3; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_ofld_rxq *ofld_rxq; uint8_t v; idx = vi->ofld_tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4otmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); for_each_ofld_rxq(vi, i, ofld_rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); #else ofld_rxq->iq.intr_params = v; #endif } vi->ofld_tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->ofld_pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4opktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->ofld_pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } #endif static int get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) { int rc; if (cntxt->cid > M_CTXTQID) return (EINVAL); if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) return (EINVAL); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (sc->flags & FW_OK) { rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); if (rc == 0) goto done; } /* * Read via firmware failed or wasn't even attempted. Read directly via * the backdoor. */ rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); done: end_synchronized_op(sc, 0); return (rc); } static int load_fw(struct adapter *sc, struct t4_data *fw) { int rc; uint8_t *fw_data; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* * The firmware, with the sole exception of the memory parity error * handler, runs from memory and not flash. It is almost always safe to * install a new firmware on a running system. Just set bit 1 in * hw.cxgbe.dflags or dev...dflags first. */ if (sc->flags & FULL_INIT_DONE && (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { rc = EBUSY; goto done; } fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); rc = copyin(fw->data, fw_data, fw->len); if (rc == 0) rc = -t4_load_fw(sc, fw_data, fw->len); free(fw_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_cfg(struct adapter *sc, struct t4_data *cfg) { int rc; uint8_t *cfg_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (cfg->len == 0) { /* clear */ rc = -t4_load_cfg(sc, NULL, 0); goto done; } cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); rc = copyin(cfg->data, cfg_data, cfg->len); if (rc == 0) rc = -t4_load_cfg(sc, cfg_data, cfg->len); free(cfg_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_boot(struct adapter *sc, struct t4_bootrom *br) { int rc; uint8_t *br_data = NULL; u_int offset; if (br->len > 1024 * 1024) return (EFBIG); if (br->pf_offset == 0) { /* pfidx */ if (br->pfidx_addr > 7) return (EINVAL); offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, A_PCIE_PF_EXPROM_OFST))); } else if (br->pf_offset == 1) { /* offset */ offset = G_OFFSET(br->pfidx_addr); } else { return (EINVAL); } rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (br->len == 0) { /* clear */ rc = -t4_load_boot(sc, NULL, offset, 0); goto done; } br_data = malloc(br->len, M_CXGBE, M_WAITOK); rc = copyin(br->data, br_data, br->len); if (rc == 0) rc = -t4_load_boot(sc, br_data, offset, br->len); free(br_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_bootcfg(struct adapter *sc, struct t4_data *bc) { int rc; uint8_t *bc_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (bc->len == 0) { /* clear */ rc = -t4_load_bootcfg(sc, NULL, 0); goto done; } bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); rc = copyin(bc->data, bc_data, bc->len); if (rc == 0) rc = -t4_load_bootcfg(sc, bc_data, bc->len); free(bc_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) { int rc; struct cudbg_init *cudbg; void *handle, *buf; /* buf is large, don't block if no memory is available */ buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); handle = cudbg_alloc_handle(); if (handle == NULL) { rc = ENOMEM; goto done; } cudbg = cudbg_get_init(handle); cudbg->adap = sc; cudbg->print = (cudbg_print_cb)printf; #ifndef notyet device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", __func__, dump->wr_flash, dump->len, dump->data); #endif if (dump->wr_flash) cudbg->use_flash = 1; MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); rc = cudbg_collect(handle, buf, &dump->len); if (rc != 0) goto done; rc = copyout(buf, dump->data, dump->len); done: cudbg_free_handle(handle); free(buf, M_CXGBE); return (rc); } static void free_offload_policy(struct t4_offload_policy *op) { struct offload_rule *r; int i; if (op == NULL) return; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { free(r->bpf_prog.bf_insns, M_CXGBE); } free(op->rule, M_CXGBE); free(op, M_CXGBE); } static int set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop) { int i, rc, len; struct t4_offload_policy *op, *old; struct bpf_program *bf; const struct offload_settings *s; struct offload_rule *r; void *u; if (!is_offload(sc)) return (ENODEV); if (uop->nrules == 0) { /* Delete installed policies. */ op = NULL; goto set_policy; } else if (uop->nrules > 256) { /* arbitrary */ return (E2BIG); } /* Copy userspace offload policy to kernel */ op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK); op->nrules = uop->nrules; len = op->nrules * sizeof(struct offload_rule); op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(uop->rule, op->rule, len); if (rc) { free(op->rule, M_CXGBE); free(op, M_CXGBE); return (rc); } r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { /* Validate open_type */ if (r->open_type != OPEN_TYPE_LISTEN && r->open_type != OPEN_TYPE_ACTIVE && r->open_type != OPEN_TYPE_PASSIVE && r->open_type != OPEN_TYPE_DONTCARE) { error: /* * Rules 0 to i have malloc'd filters that need to be * freed. Rules i+1 to nrules have userspace pointers * and should be left alone. */ op->nrules = i; free_offload_policy(op); return (rc); } /* Validate settings */ s = &r->settings; if ((s->offload != 0 && s->offload != 1) || s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || s->sched_class < -1 || s->sched_class >= sc->params.nsched_cls) { rc = EINVAL; goto error; } bf = &r->bpf_prog; u = bf->bf_insns; /* userspace ptr */ bf->bf_insns = NULL; if (bf->bf_len == 0) { /* legal, matches everything */ continue; } len = bf->bf_len * sizeof(*bf->bf_insns); bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(u, bf->bf_insns, len); if (rc != 0) goto error; if (!bpf_validate(bf->bf_insns, bf->bf_len)) { rc = EINVAL; goto error; } } set_policy: rw_wlock(&sc->policy_lock); old = sc->policy; sc->policy = op; rw_wunlock(&sc->policy_lock); free_offload_policy(old); return (0); } #define MAX_READ_BUF_SIZE (128 * 1024) static int read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) { uint32_t addr, remaining, n; uint32_t *buf; int rc; uint8_t *dst; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = validate_mem_range(sc, mr->addr, mr->len); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); addr = mr->addr; remaining = mr->len; dst = (void *)mr->data; while (remaining) { n = min(remaining, MAX_READ_BUF_SIZE); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else read_via_memwin(sc, 2, addr, buf, n); mtx_unlock(&sc->reg_lock); if (rc != 0) break; rc = copyout(buf, dst, n); if (rc != 0) break; dst += n; remaining -= n; addr += n; } free(buf, M_CXGBE); return (rc); } #undef MAX_READ_BUF_SIZE static int read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) { int rc; if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) return (EINVAL); if (i2cd->len > sizeof(i2cd->data)) return (EFBIG); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, i2cd->offset, i2cd->len, &i2cd->data[0]); end_synchronized_op(sc, 0); return (rc); } static int clear_stats(struct adapter *sc, u_int port_id) { int i, v, chan_map; struct port_info *pi; struct vi_info *vi; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif if (port_id >= sc->params.nports) return (EINVAL); pi = sc->port[port_id]; if (pi == NULL) return (EIO); mtx_lock(&sc->reg_lock); if (!hw_off_limits(sc)) { /* MAC stats */ t4_clr_port_stats(sc, pi->tx_chan); if (is_t6(sc)) { if (pi->fcs_reg != -1) pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); else pi->stats.rx_fcs_err = 0; } for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) t4_clr_vi_stats(sc, vi->vin); } chan_map = pi->rx_e_chan_map; v = 0; /* reuse */ while (chan_map) { i = ffs(chan_map) - 1; t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); chan_map &= ~(1 << i); } } mtx_unlock(&sc->reg_lock); pi->tx_parse_error = 0; pi->tnl_cong_drops = 0; /* * Since this command accepts a port, clear stats for * all VIs on this port. */ for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) { for_each_rxq(vi, i, rxq) { #if defined(INET) || defined(INET6) rxq->lro.lro_queued = 0; rxq->lro.lro_flushed = 0; #endif rxq->rxcsum = 0; rxq->vlan_extraction = 0; rxq->vxlan_rxcsum = 0; rxq->fl.cl_allocated = 0; rxq->fl.cl_recycled = 0; rxq->fl.cl_fast_recycled = 0; } for_each_txq(vi, i, txq) { txq->txcsum = 0; txq->tso_wrs = 0; txq->vlan_insertion = 0; txq->imm_wrs = 0; txq->sgl_wrs = 0; txq->txpkt_wrs = 0; txq->txpkts0_wrs = 0; txq->txpkts1_wrs = 0; txq->txpkts0_pkts = 0; txq->txpkts1_pkts = 0; txq->txpkts_flush = 0; txq->raw_wrs = 0; txq->vxlan_tso_wrs = 0; txq->vxlan_txcsum = 0; txq->kern_tls_records = 0; txq->kern_tls_short = 0; txq->kern_tls_partial = 0; txq->kern_tls_full = 0; txq->kern_tls_octets = 0; txq->kern_tls_waste = 0; txq->kern_tls_options = 0; txq->kern_tls_header = 0; txq->kern_tls_fin = 0; txq->kern_tls_fin_short = 0; txq->kern_tls_cbc = 0; txq->kern_tls_gcm = 0; mp_ring_reset_stats(txq->r); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { ofld_txq->wrq.tx_wrs_direct = 0; ofld_txq->wrq.tx_wrs_copied = 0; counter_u64_zero(ofld_txq->tx_iscsi_pdus); counter_u64_zero(ofld_txq->tx_iscsi_octets); counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs); counter_u64_zero(ofld_txq->tx_aio_jobs); counter_u64_zero(ofld_txq->tx_aio_octets); counter_u64_zero(ofld_txq->tx_toe_tls_records); counter_u64_zero(ofld_txq->tx_toe_tls_octets); } #endif #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { ofld_rxq->fl.cl_allocated = 0; ofld_rxq->fl.cl_recycled = 0; ofld_rxq->fl.cl_fast_recycled = 0; counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_ok); counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_error); ofld_rxq->rx_iscsi_ddp_pdus = 0; ofld_rxq->rx_iscsi_ddp_octets = 0; ofld_rxq->rx_iscsi_fl_pdus = 0; ofld_rxq->rx_iscsi_fl_octets = 0; ofld_rxq->rx_aio_ddp_jobs = 0; ofld_rxq->rx_aio_ddp_octets = 0; ofld_rxq->rx_toe_tls_records = 0; ofld_rxq->rx_toe_tls_octets = 0; ofld_rxq->rx_toe_ddp_octets = 0; counter_u64_zero(ofld_rxq->ddp_buffer_alloc); counter_u64_zero(ofld_rxq->ddp_buffer_reuse); counter_u64_zero(ofld_rxq->ddp_buffer_free); } #endif if (IS_MAIN_VI(vi)) { wrq = &sc->sge.ctrlq[pi->port_id]; wrq->tx_wrs_direct = 0; wrq->tx_wrs_copied = 0; } } } return (0); } static int hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); if (t4_get_clip_entry(sc, &in6, true) != NULL) return (0); else return (EIO); #else return (ENOTSUP); #endif } static int release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); return (t4_release_clip_addr(sc, &in6)); #else return (ENOTSUP); #endif } int t4_os_find_pci_capability(struct adapter *sc, int cap) { int i; return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); } int t4_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t4_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } void t4_os_portmod_changed(struct port_info *pi) { struct adapter *sc = pi->adapter; struct vi_info *vi; if_t ifp; static const char *mod_str[] = { NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" }; KASSERT((pi->flags & FIXED_IFMEDIA) == 0, ("%s: port_type %u", __func__, pi->port_type)); vi = &pi->vi[0]; if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { PORT_LOCK(pi); build_medialist(pi); if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { fixup_link_config(pi); apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, LOCK_HELD); } ifp = vi->ifp; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) if_printf(ifp, "transceiver unplugged.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) if_printf(ifp, "unknown transceiver inserted.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) if_printf(ifp, "unsupported transceiver inserted.\n"); else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { if_printf(ifp, "%dGbps %s transceiver inserted.\n", port_top_speed(pi), mod_str[pi->mod_type]); } else { if_printf(ifp, "transceiver (type %d) inserted.\n", pi->mod_type); } } void t4_os_link_changed(struct port_info *pi) { struct vi_info *vi; if_t ifp; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int v; PORT_LOCK_ASSERT_OWNED(pi); if (is_t6(sc)) { if (lc->link_ok) { if (lc->speed > 25000 || (lc->speed == 25000 && lc->fec == FEC_RS)) { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS); } else { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS); } pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); pi->stats.rx_fcs_err = 0; } else { pi->fcs_reg = -1; } } else { MPASS(pi->fcs_reg != -1); MPASS(pi->fcs_base == 0); } for_each_vi(pi, v, vi) { ifp = vi->ifp; if (ifp == NULL || IS_DETACHING(vi)) continue; if (lc->link_ok) { if_setbaudrate(ifp, IF_Mbps(lc->speed)); if_link_state_change(ifp, LINK_STATE_UP); } else { if_link_state_change(ifp, LINK_STATE_DOWN); } } } void t4_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; sx_slock(&t4_list_lock); SLIST_FOREACH(sc, &t4_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } sx_sunlock(&t4_list_lock); } static int t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int rc; struct adapter *sc = dev->si_drv1; rc = priv_check(td, PRIV_DRIVER); if (rc != 0) return (rc); switch (cmd) { case CHELSIO_T4_GETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) edata->val = t4_read_reg(sc, edata->addr); else if (edata->size == 8) edata->val = t4_read_reg64(sc, edata->addr); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_SETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) { if (edata->val & 0xffffffff00000000) rc = EINVAL; t4_write_reg(sc, edata->addr, (uint32_t) edata->val); } else if (edata->size == 8) t4_write_reg64(sc, edata->addr, edata->val); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_REGDUMP: { struct t4_regdump *regs = (struct t4_regdump *)data; int reglen = t4_get_regs_len(sc); uint8_t *buf; if (regs->len < reglen) { regs->len = reglen; /* hint to the caller */ return (ENOBUFS); } regs->len = reglen; buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else get_regs(sc, regs, buf); mtx_unlock(&sc->reg_lock); if (rc == 0) rc = copyout(buf, regs->data, reglen); free(buf, M_CXGBE); break; } case CHELSIO_T4_GET_FILTER_MODE: rc = get_filter_mode(sc, (uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MODE: rc = set_filter_mode(sc, *(uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MASK: rc = set_filter_mask(sc, *(uint32_t *)data); break; case CHELSIO_T4_GET_FILTER: rc = get_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_SET_FILTER: rc = set_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_DEL_FILTER: rc = del_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_GET_SGE_CONTEXT: rc = get_sge_context(sc, (struct t4_sge_context *)data); break; case CHELSIO_T4_LOAD_FW: rc = load_fw(sc, (struct t4_data *)data); break; case CHELSIO_T4_GET_MEM: rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); break; case CHELSIO_T4_GET_I2C: rc = read_i2c(sc, (struct t4_i2c_data *)data); break; case CHELSIO_T4_CLEAR_STATS: rc = clear_stats(sc, *(uint32_t *)data); break; case CHELSIO_T4_SCHED_CLASS: rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); break; case CHELSIO_T4_SCHED_QUEUE: rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); break; case CHELSIO_T4_GET_TRACER: rc = t4_get_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_SET_TRACER: rc = t4_set_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_LOAD_CFG: rc = load_cfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_LOAD_BOOT: rc = load_boot(sc, (struct t4_bootrom *)data); break; case CHELSIO_T4_LOAD_BOOTCFG: rc = load_bootcfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_CUDBG_DUMP: rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); break; case CHELSIO_T4_SET_OFLD_POLICY: rc = set_offload_policy(sc, (struct t4_offload_policy *)data); break; case CHELSIO_T4_HOLD_CLIP_ADDR: rc = hold_clip_addr(sc, (struct t4_clip_addr *)data); break; case CHELSIO_T4_RELEASE_CLIP_ADDR: rc = release_clip_addr(sc, (struct t4_clip_addr *)data); break; default: rc = ENOTTY; } return (rc); } #ifdef TCP_OFFLOAD int toe_capability(struct vi_info *vi, bool enable) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; ASSERT_SYNCHRONIZED_OP(sc); if (!is_offload(sc)) return (ENODEV); if (hw_off_limits(sc)) return (ENXIO); if (enable) { #ifdef KERN_TLS if (sc->flags & KERN_TLS_ON && is_t6(sc)) { int i, j, n; struct port_info *p; struct vi_info *v; /* * Reconfigure hardware for TOE if TXTLS is not enabled * on any ifnet. */ n = 0; for_each_port(sc, i) { p = sc->port[i]; for_each_vi(p, j, v) { if (if_getcapenable(v->ifp) & IFCAP_TXTLS) { CH_WARN(sc, "%s has NIC TLS enabled.\n", device_get_nameunit(v->dev)); n++; } } } if (n > 0) { CH_WARN(sc, "Disable NIC TLS on all interfaces " "associated with this adapter before " "trying to enable TOE.\n"); return (EAGAIN); } rc = t6_config_kern_tls(sc, false); if (rc) return (rc); } #endif if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) { /* TOE is already enabled. */ return (0); } /* * We need the port's queues around so that we're able to send * and receive CPLs to/from the TOE even if the ifnet for this * port has never been UP'd administratively. */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); if (!(pi->vi[0].flags & VI_INIT_DONE) && ((rc = vi_init(&pi->vi[0])) != 0)) return (rc); if (isset(&sc->offload_map, pi->port_id)) { /* TOE is enabled on another VI of this port. */ MPASS(pi->uld_vis > 0); pi->uld_vis++; return (0); } if (!uld_active(sc, ULD_TOM)) { rc = t4_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t4_tom.ko before trying " "to enable TOE on a cxgbe interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM activated but flag not set", __func__)); } /* Activate iWARP and iSCSI too, if the modules are loaded. */ if (!uld_active(sc, ULD_IWARP)) (void) t4_activate_uld(sc, ULD_IWARP); if (!uld_active(sc, ULD_ISCSI)) (void) t4_activate_uld(sc, ULD_ISCSI); if (pi->uld_vis++ == 0) setbit(&sc->offload_map, pi->port_id); } else { if ((if_getcapenable(vi->ifp) & IFCAP_TOE) == 0) { /* TOE is already disabled. */ return (0); } MPASS(isset(&sc->offload_map, pi->port_id)); MPASS(pi->uld_vis > 0); if (--pi->uld_vis == 0) clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t4_register_uld(struct uld_info *ui, int id) { int rc; if (id < 0 || id > ULD_MAX) return (EINVAL); sx_xlock(&t4_uld_list_lock); if (t4_uld_list[id] != NULL) rc = EEXIST; else { t4_uld_list[id] = ui; rc = 0; } sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_unregister_uld(struct uld_info *ui, int id) { if (id < 0 || id > ULD_MAX) return (EINVAL); sx_xlock(&t4_uld_list_lock); MPASS(t4_uld_list[id] == ui); t4_uld_list[id] = NULL; sx_xunlock(&t4_uld_list_lock); return (0); } int t4_activate_uld(struct adapter *sc, int id) { int rc; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); /* Adapter needs to be initialized before any ULD can be activated. */ if (!(sc->flags & FULL_INIT_DONE)) { rc = adapter_init(sc); if (rc != 0) return (rc); } sx_slock(&t4_uld_list_lock); if (t4_uld_list[id] == NULL) rc = EAGAIN; /* load the KLD with this ULD and try again. */ else { rc = t4_uld_list[id]->uld_activate(sc); if (rc == 0) setbit(&sc->active_ulds, id); } sx_sunlock(&t4_uld_list_lock); return (rc); } int t4_deactivate_uld(struct adapter *sc, int id) { int rc; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); sx_slock(&t4_uld_list_lock); if (t4_uld_list[id] == NULL) rc = ENXIO; else { rc = t4_uld_list[id]->uld_deactivate(sc); if (rc == 0) clrbit(&sc->active_ulds, id); } sx_sunlock(&t4_uld_list_lock); return (rc); } static int deactivate_all_uld(struct adapter *sc) { int i, rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld"); if (rc != 0) return (ENXIO); sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i)) continue; rc = t4_uld_list[i]->uld_deactivate(sc); if (rc != 0) break; clrbit(&sc->active_ulds, i); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); return (rc); } static void stop_all_uld(struct adapter *sc) { int i; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldst") != 0) return; sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i) || t4_uld_list[i]->uld_stop == NULL) continue; (void) t4_uld_list[i]->uld_stop(sc); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); } static void restart_all_uld(struct adapter *sc) { int i; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldre") != 0) return; sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] == NULL || !uld_active(sc, i) || t4_uld_list[i]->uld_restart == NULL) continue; (void) t4_uld_list[i]->uld_restart(sc); } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); } int uld_active(struct adapter *sc, int id) { MPASS(id >= 0 && id <= ULD_MAX); return (isset(&sc->active_ulds, id)); } #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *sc, bool enable) { ASSERT_SYNCHRONIZED_OP(sc); if (!is_ktls(sc)) return (ENODEV); if (!is_t6(sc)) return (0); if (hw_off_limits(sc)) return (ENXIO); if (enable) { if (sc->flags & KERN_TLS_ON) return (0); /* already on */ if (sc->offload_map != 0) { CH_WARN(sc, "Disable TOE on all interfaces associated with " "this adapter before trying to enable NIC TLS.\n"); return (EAGAIN); } return (t6_config_kern_tls(sc, true)); } else { /* * Nothing to do for disable. If TOE is enabled sometime later * then toe_capability will reconfigure the hardware. */ return (0); } } #endif /* * t = ptr to tunable. * nc = number of CPUs. * c = compiled in default for that tunable. */ static void calculate_nqueues(int *t, int nc, const int c) { int nq; if (*t > 0) return; nq = *t < 0 ? -*t : c; *t = min(nc, nq); } /* * Come up with reasonable defaults for some of the tunables, provided they're * not set by the user (in which case we'll use the values as is). */ static void tweak_tunables(void) { int nc = mp_ncpus; /* our snapshot of the number of CPUs */ if (t4_ntxq < 1) { #ifdef RSS t4_ntxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_ntxq, nc, NTXQ); #endif } calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); if (t4_nrxq < 1) { #ifdef RSS t4_nrxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_nrxq, nc, NRXQ); #endif } calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); #endif #ifdef TCP_OFFLOAD calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); #endif #if defined(TCP_OFFLOAD) || defined(KERN_TLS) if (t4_toecaps_allowed == -1) t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; #else if (t4_toecaps_allowed == -1) t4_toecaps_allowed = 0; #endif #ifdef TCP_OFFLOAD if (t4_rdmacaps_allowed == -1) { t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | FW_CAPS_CONFIG_RDMA_RDMAC; } if (t4_iscsicaps_allowed == -1) { t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | FW_CAPS_CONFIG_ISCSI_TARGET_PDU | FW_CAPS_CONFIG_ISCSI_T10DIF; } if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) t4_tmr_idx_ofld = TMR_IDX_OFLD; if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) t4_pktc_idx_ofld = PKTC_IDX_OFLD; #else if (t4_rdmacaps_allowed == -1) t4_rdmacaps_allowed = 0; if (t4_iscsicaps_allowed == -1) t4_iscsicaps_allowed = 0; #endif #ifdef DEV_NETMAP calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ); calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ); calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); #endif if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) t4_tmr_idx = TMR_IDX; if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) t4_pktc_idx = PKTC_IDX; if (t4_qsize_txq < 128) t4_qsize_txq = 128; if (t4_qsize_rxq < 128) t4_qsize_rxq = 128; while (t4_qsize_rxq & 7) t4_qsize_rxq++; t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; /* * Number of VIs to create per-port. The first VI is the "main" regular * VI for the port. The rest are additional virtual interfaces on the * same physical port. Note that the main VI does not have native * netmap support but the extra VIs do. * * Limit the number of VIs per port to the number of available * MAC addresses per port. */ if (t4_num_vis < 1) t4_num_vis = 1; if (t4_num_vis > nitems(vi_mac_funcs)) { t4_num_vis = nitems(vi_mac_funcs); printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); } if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { pcie_relaxed_ordering = 1; #if defined(__i386__) || defined(__amd64__) if (cpu_vendor_id == CPU_VENDOR_INTEL) pcie_relaxed_ordering = 0; #endif } } #ifdef DDB static void t4_dump_mem(struct adapter *sc, u_int addr, u_int len) { uint32_t base, j, off, pf, reg, save, win_pos; reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); save = t4_read_reg(sc, reg); base = sc->memwin[2].mw_base; if (is_t4(sc)) { pf = 0; win_pos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); win_pos = addr & ~0x7f; /* start must be 128B aligned */ } off = addr - win_pos; t4_write_reg(sc, reg, win_pos | pf); t4_read_reg(sc, reg); while (len > 0 && !db_pager_quit) { uint32_t buf[8]; for (j = 0; j < 8; j++, off += 4) buf[j] = htonl(t4_read_reg(sc, base + off)); db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); if (len <= sizeof(buf)) len = 0; else len -= sizeof(buf); } t4_write_reg(sc, reg, save); t4_read_reg(sc, reg); } static void t4_dump_tcb(struct adapter *sc, int tid) { uint32_t tcb_addr; /* Dump TCB for the tid */ tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); tcb_addr += tid * TCB_SIZE; t4_dump_mem(sc, tcb_addr, TCB_SIZE); } static void t4_dump_devlog(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e e; int i, first, j, m, nentries, rc; uint64_t ftstamp = UINT64_MAX; if (dparams->start == 0) { db_printf("devlog params not valid\n"); return; } nentries = dparams->size / sizeof(struct fw_devlog_e); m = fwmtype_to_hwmtype(dparams->memtype); /* Find the first entry. */ first = -1; for (i = 0; i < nentries && !db_pager_quit; i++) { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) break; if (e.timestamp == 0) break; e.timestamp = be64toh(e.timestamp); if (e.timestamp < ftstamp) { ftstamp = e.timestamp; first = i; } } if (first == -1) return; i = first; do { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) return; if (e.timestamp == 0) return; e.timestamp = be64toh(e.timestamp); e.seqno = be32toh(e.seqno); for (j = 0; j < 8; j++) e.params[j] = be32toh(e.params[j]); db_printf("%10d %15ju %8s %8s ", e.seqno, e.timestamp, (e.level < nitems(devlog_level_strings) ? devlog_level_strings[e.level] : "UNKNOWN"), (e.facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e.facility] : "UNKNOWN")); db_printf(e.fmt, e.params[0], e.params[1], e.params[2], e.params[3], e.params[4], e.params[5], e.params[6], e.params[7]); if (++i == nentries) i = 0; } while (i != first && !db_pager_quit); } static DB_DEFINE_TABLE(show, t4, show_t4); DB_TABLE_COMMAND_FLAGS(show_t4, devlog, db_show_devlog, CS_OWN) { device_t dev; int t; bool valid; valid = false; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); valid = true; } db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 devlog \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } t4_dump_devlog(device_get_softc(dev)); } DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN) { device_t dev; int radix, tid, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { tid = db_tok_number; valid = true; } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 tcb \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (tid < 0) { db_printf("invalid tid\n"); return; } t4_dump_tcb(device_get_softc(dev), tid); } DB_TABLE_COMMAND_FLAGS(show_t4, memdump, db_show_memdump, CS_OWN) { device_t dev; int radix, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { addr = db_tok_number; t = db_read_token(); if (t == tNUMBER) { count = db_tok_number; valid = true; } } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 memdump \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (addr < 0) { db_printf("invalid address\n"); return; } if (count <= 0) { db_printf("invalid length\n"); return; } t4_dump_mem(device_get_softc(dev), addr, count); } #endif static eventhandler_tag vxlan_start_evtag; static eventhandler_tag vxlan_stop_evtag; struct vxlan_evargs { if_t ifp; uint16_t port; }; static void enable_vxlan_rx(struct adapter *sc) { int i, rc; struct port_info *pi; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) | F_VXLAN_EN); for_each_port(sc, i) { pi = sc->port[i]; if (pi->vxlan_tcam_entry == true) continue; rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; CH_ERR(&pi->vi[0], "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); pi->vxlan_tcam_entry = true; } } } static void t4_vxlan_start(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0) return; if (sc->vxlan_refcount == 0) { sc->vxlan_port = v->port; sc->vxlan_refcount = 1; if (!hw_off_limits(sc)) enable_vxlan_rx(sc); } else if (sc->vxlan_port == v->port) { sc->vxlan_refcount++; } else { CH_ERR(sc, "VXLAN already configured on port %d; " "ignoring attempt to configure it on port %d\n", sc->vxlan_port, v->port); } end_synchronized_op(sc, 0); } static void t4_vxlan_stop(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0) return; /* * VXLANs may have been configured before the driver was loaded so we * may see more stops than starts. This is not handled cleanly but at * least we keep the refcount sane. */ if (sc->vxlan_port != v->port) goto done; if (sc->vxlan_refcount == 0) { CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; " "ignoring attempt to stop it again.\n", sc->vxlan_port); } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc)) t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0); done: end_synchronized_op(sc, 0); } static void t4_vxlan_start_handler(void *arg __unused, if_t ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_start, &v); } static void t4_vxlan_stop_handler(void *arg __unused, if_t ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_stop, &v); } static struct sx mlu; /* mod load unload */ SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); static int mod_event(module_t mod, int cmd, void *arg) { int rc = 0; static int loaded = 0; switch (cmd) { case MOD_LOAD: sx_xlock(&mlu); if (loaded++ == 0) { t4_sge_modload(); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_filter_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER); t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); t4_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); sx_init(&t4_list_lock, "T4/T5 adapters"); SLIST_INIT(&t4_list); callout_init(&fatal_callout, 1); #ifdef TCP_OFFLOAD sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); #endif #ifdef INET6 t4_clip_modload(); #endif #ifdef KERN_TLS t6_ktls_modload(); #endif t4_tracer_modload(); tweak_tunables(); vxlan_start_evtag = EVENTHANDLER_REGISTER(vxlan_start, t4_vxlan_start_handler, NULL, EVENTHANDLER_PRI_ANY); vxlan_stop_evtag = EVENTHANDLER_REGISTER(vxlan_stop, t4_vxlan_stop_handler, NULL, EVENTHANDLER_PRI_ANY); reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK, taskqueue_thread_enqueue, &reset_tq); taskqueue_start_threads(&reset_tq, 1, PI_SOFT, "t4_rst_thr"); } sx_xunlock(&mlu); break; case MOD_UNLOAD: sx_xlock(&mlu); if (--loaded == 0) { #ifdef TCP_OFFLOAD int i; #endif int tries; taskqueue_free(reset_tq); tries = 0; while (tries++ < 5 && t4_sge_extfree_refs() != 0) { uprintf("%ju clusters with custom free routine " "still is use.\n", t4_sge_extfree_refs()); pause("t4unload", 2 * hz); } sx_slock(&t4_list_lock); if (!SLIST_EMPTY(&t4_list)) { rc = EBUSY; sx_sunlock(&t4_list_lock); goto done_unload; } #ifdef TCP_OFFLOAD sx_slock(&t4_uld_list_lock); for (i = 0; i <= ULD_MAX; i++) { if (t4_uld_list[i] != NULL) { rc = EBUSY; sx_sunlock(&t4_uld_list_lock); sx_sunlock(&t4_list_lock); goto done_unload; } } sx_sunlock(&t4_uld_list_lock); #endif sx_sunlock(&t4_list_lock); if (t4_sge_extfree_refs() == 0) { EVENTHANDLER_DEREGISTER(vxlan_start, vxlan_start_evtag); EVENTHANDLER_DEREGISTER(vxlan_stop, vxlan_stop_evtag); t4_tracer_modunload(); #ifdef KERN_TLS t6_ktls_modunload(); #endif #ifdef INET6 t4_clip_modunload(); #endif #ifdef TCP_OFFLOAD sx_destroy(&t4_uld_list_lock); #endif sx_destroy(&t4_list_lock); t4_sge_modunload(); loaded = 0; } else { rc = EBUSY; loaded++; /* undo earlier decrement */ } } done_unload: sx_xunlock(&mlu); break; } return (rc); } DRIVER_MODULE(t4nex, pci, t4_driver, mod_event, 0); MODULE_VERSION(t4nex, 1); MODULE_DEPEND(t4nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t4nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t5nex, pci, t5_driver, mod_event, 0); MODULE_VERSION(t5nex, 1); MODULE_DEPEND(t5nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t5nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t6nex, pci, t6_driver, mod_event, 0); MODULE_VERSION(t6nex, 1); MODULE_DEPEND(t6nex, crypto, 1, 1, 1); MODULE_DEPEND(t6nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t6nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0); MODULE_VERSION(cxgbe, 1); DRIVER_MODULE(cxl, t5nex, cxl_driver, 0, 0); MODULE_VERSION(cxl, 1); DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0); MODULE_VERSION(cc, 1); DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0); MODULE_VERSION(vcxgbe, 1); DRIVER_MODULE(vcxl, cxl, vcxl_driver, 0, 0); MODULE_VERSION(vcxl, 1); DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0); MODULE_VERSION(vcc, 1); diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index c7553e8a514a..738a1db8b7e1 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,882 +1,882 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; error = bus_generic_detach(dev); if (error != 0) return (error); sc = device_get_softc(dev); if (sc->rcdev) device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type, flags); if (rm == NULL) return (bus_generic_alloc_resource(mcdev, child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = bus_generic_rman_alloc_resource(mcdev, child, type, rid, start, end, count, flags); if (res == NULL) goto fail; return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_adjust_resource(mcdev, child, r, start, end)); return (bus_generic_adjust_resource(mcdev, child, r, start, end)); } int dpaa2_mc_release_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_release_resource(mcdev, child, r)); return (bus_generic_release_resource(mcdev, child, r)); } int dpaa2_mc_activate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_activate_resource(mcdev, child, r)); return (bus_generic_activate_resource(mcdev, child, r)); } int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, rman_get_type(r), rman_get_flags(r)); if (rm) return (bus_generic_rman_deactivate_resource(mcdev, child, r)); return (bus_generic_deactivate_resource(mcdev, child, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype, 0); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype, 0); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ struct rman * dpaa2_mc_rman(device_t mcdev, int type, u_int flags) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/dpaa2/dpaa2_mc_fdt.c b/sys/dev/dpaa2/dpaa2_mc_fdt.c index a6babfc89ca9..a571f4cf7219 100644 --- a/sys/dev/dpaa2/dpaa2_mc_fdt.c +++ b/sys/dev/dpaa2/dpaa2_mc_fdt.c @@ -1,396 +1,397 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * Copyright © 2022 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) Bus Driver (FDT-based). * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "ofw_bus_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_mc_if.h" struct dpaa2_mac_fdt_softc { uint32_t reg; phandle_t sfp; phandle_t pcs_handle; phandle_t phy_handle; char managed[64]; char phy_conn_type[64]; }; #if 0 ethernet@1 { compatible = "fsl,qoriq-mc-dpmac"; reg = <0x1>; sfp = <0x14>; pcs-handle = <0x15>; phy-connection-type = "10gbase-r"; managed = "in-band-status"; }; ethernet@3 { compatible = "fsl,qoriq-mc-dpmac"; reg = <0x3>; phy-handle = <0x18>; phy-connection-type = "qsgmii"; managed = "in-band-status"; pcs-handle = <0x19>; }; #endif static int dpaa2_mac_dev_probe(device_t dev) { phandle_t node; uint64_t reg; ssize_t s; node = ofw_bus_get_node(dev); if (!ofw_bus_node_is_compatible(node, "fsl,qoriq-mc-dpmac")) { device_printf(dev, "'%s' not fsl,qoriq-mc-dpmac compatible\n", ofw_bus_get_name(dev)); return (ENXIO); } s = device_get_property(dev, "reg", ®, sizeof(reg), DEVICE_PROP_UINT32); if (s == -1) { device_printf(dev, "%s: '%s' has no 'reg' property, s %zd\n", __func__, ofw_bus_get_name(dev), s); return (ENXIO); } device_set_desc(dev, "DPAA2 MAC DEV"); return (BUS_PROBE_DEFAULT); } static int dpaa2_mac_fdt_attach(device_t dev) { struct dpaa2_mac_fdt_softc *sc; phandle_t node; ssize_t s; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); s = device_get_property(dev, "reg", &sc->reg, sizeof(sc->reg), DEVICE_PROP_UINT32); if (s == -1) { device_printf(dev, "Cannot find 'reg' property: %zd\n", s); return (ENXIO); } s = device_get_property(dev, "managed", sc->managed, sizeof(sc->managed), DEVICE_PROP_ANY); s = device_get_property(dev, "phy-connection-type", sc->phy_conn_type, sizeof(sc->phy_conn_type), DEVICE_PROP_ANY); s = device_get_property(dev, "pcs-handle", &sc->pcs_handle, sizeof(sc->pcs_handle), DEVICE_PROP_HANDLE); /* 'sfp' and 'phy-handle' are optional but we need one or the other. */ s = device_get_property(dev, "sfp", &sc->sfp, sizeof(sc->sfp), DEVICE_PROP_HANDLE); s = device_get_property(dev, "phy-handle", &sc->phy_handle, sizeof(sc->phy_handle), DEVICE_PROP_HANDLE); if (bootverbose) device_printf(dev, "node %#x '%s': reg %#x sfp %#x pcs-handle " "%#x phy-handle %#x managed '%s' phy-conn-type '%s'\n", node, ofw_bus_get_name(dev), sc->reg, sc->sfp, sc->pcs_handle, sc->phy_handle, sc->managed, sc->phy_conn_type); return (0); } static bool dpaa2_mac_fdt_match_id(device_t dev, uint32_t id) { struct dpaa2_mac_fdt_softc *sc; if (dev == NULL) return (false); sc = device_get_softc(dev); if (sc->reg == id) return (true); return (false); } static device_t dpaa2_mac_fdt_get_phy_dev(device_t dev) { struct dpaa2_mac_fdt_softc *sc; if (dev == NULL) return (NULL); sc = device_get_softc(dev); if (sc->phy_handle == 0 && sc->sfp == 0) return (NULL); #ifdef __not_yet__ /* No sff,sfp support yet. */ if (sc->sfp != 0) { device_t xdev; xdev = OF_device_from_xref(OF_xref_from_node(sc->sfp)); if (xdev != NULL) return (xdev); } #endif return (OF_device_from_xref(OF_xref_from_node(sc->phy_handle))); } static device_method_t dpaa2_mac_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_mac_dev_probe), DEVMETHOD(device_attach, dpaa2_mac_fdt_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mac_fdt, dpaa2_mac_fdt_driver, dpaa2_mac_fdt_methods, sizeof(struct dpaa2_mac_fdt_softc)); DRIVER_MODULE(dpaa2_mac_fdt, dpaa2_mc, dpaa2_mac_fdt_driver, 0, 0); MODULE_DEPEND(dpaa2_mac_fdt, memac_mdio_fdt, 1, 1, 1); /* * Device interface. */ static int dpaa2_mc_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,qoriq-mc")) return (ENXIO); device_set_desc(dev, "DPAA2 Management Complex"); return (BUS_PROBE_DEFAULT); } static int dpaa2_mc_fdt_probe_child(device_t bus, phandle_t child) { device_t childdev; /* make sure we do not aliready have a device. */ childdev = ofw_bus_find_child_device_by_phandle(bus, child); if (childdev != NULL) return (0); childdev = simplebus_add_device(bus, child, 0, "dpaa2_mac_fdt", -1, NULL); if (childdev == NULL) return (ENXIO); return (device_probe_and_attach(childdev)); } static int dpaa2_mc_fdt_attach(device_t dev) { struct dpaa2_mc_softc *sc; phandle_t node; phandle_t child; sc = device_get_softc(dev); sc->acpi_based = false; sc->ofw_node = ofw_bus_get_node(dev); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); + /* * Attach the children represented in the device tree. */ /* fsl-mc -> dpamcs */ node = OF_child(sc->ofw_node); simplebus_init(dev, node); /* Attach the dpmac children represented in the device tree. */ child = ofw_bus_find_compatible(node, "fsl,qoriq-mc-dpmac"); for (; child > 0; child = OF_peer(child)) { if (!ofw_bus_node_is_compatible(child, "fsl,qoriq-mc-dpmac")) continue; if (!OF_hasprop(child, "reg")) continue; if (dpaa2_mc_fdt_probe_child(dev, child) != 0) continue; } return (dpaa2_mc_attach(dev)); } /* * FDT compat layer. */ static device_t dpaa2_mc_fdt_find_dpaa2_mac_dev(device_t dev, uint32_t id) { int devcount, error, i, len; device_t *devlist, mdev; const char *mdevname; error = device_get_children(dev, &devlist, &devcount); if (error != 0) return (NULL); for (i = 0; i < devcount; i++) { mdev = devlist[i]; mdevname = device_get_name(mdev); if (mdevname == NULL) continue; len = strlen(mdevname); if (strncmp("dpaa2_mac_fdt", mdevname, len) != 0) continue; if (!device_is_attached(mdev)) continue; if (dpaa2_mac_fdt_match_id(mdev, id)) return (mdev); } return (NULL); } static int dpaa2_mc_fdt_get_phy_dev(device_t dev, device_t *phy_dev, uint32_t id) { device_t mdev, pdev; mdev = dpaa2_mc_fdt_find_dpaa2_mac_dev(dev, id); if (mdev == NULL) { device_printf(dev, "%s: error finding dpmac device with id=%u\n", __func__, id); return (ENXIO); } pdev = dpaa2_mac_fdt_get_phy_dev(mdev); if (pdev == NULL) { device_printf(dev, "%s: error getting MDIO device for dpamc %s " "(id=%u)\n", __func__, device_get_nameunit(mdev), id); return (ENXIO); } if (phy_dev != NULL) *phy_dev = pdev; if (bootverbose) device_printf(dev, "dpmac_id %u mdev %p (%s) pdev %p (%s)\n", id, mdev, device_get_nameunit(mdev), pdev, device_get_nameunit(pdev)); return (0); } static const struct ofw_bus_devinfo * dpaa2_mc_simplebus_get_devinfo(device_t bus, device_t child) { return (OFW_BUS_GET_DEVINFO(device_get_parent(bus), child)); } static device_method_t dpaa2_mc_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_mc_fdt_probe), DEVMETHOD(device_attach, dpaa2_mc_fdt_attach), DEVMETHOD(device_detach, dpaa2_mc_detach), /* Bus interface */ DEVMETHOD(bus_get_rman, dpaa2_mc_rman), DEVMETHOD(bus_alloc_resource, dpaa2_mc_alloc_resource), DEVMETHOD(bus_adjust_resource, dpaa2_mc_adjust_resource), DEVMETHOD(bus_release_resource, dpaa2_mc_release_resource), DEVMETHOD(bus_activate_resource, dpaa2_mc_activate_resource), DEVMETHOD(bus_deactivate_resource, dpaa2_mc_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* Pseudo-PCIB interface */ DEVMETHOD(pcib_alloc_msi, dpaa2_mc_alloc_msi), DEVMETHOD(pcib_release_msi, dpaa2_mc_release_msi), DEVMETHOD(pcib_map_msi, dpaa2_mc_map_msi), DEVMETHOD(pcib_get_id, dpaa2_mc_get_id), /* DPAA2 MC bus interface */ DEVMETHOD(dpaa2_mc_manage_dev, dpaa2_mc_manage_dev), DEVMETHOD(dpaa2_mc_get_free_dev,dpaa2_mc_get_free_dev), DEVMETHOD(dpaa2_mc_get_dev, dpaa2_mc_get_dev), DEVMETHOD(dpaa2_mc_get_shared_dev, dpaa2_mc_get_shared_dev), DEVMETHOD(dpaa2_mc_reserve_dev, dpaa2_mc_reserve_dev), DEVMETHOD(dpaa2_mc_release_dev, dpaa2_mc_release_dev), DEVMETHOD(dpaa2_mc_get_phy_dev, dpaa2_mc_fdt_get_phy_dev), /* OFW/simplebus */ DEVMETHOD(ofw_bus_get_devinfo, dpaa2_mc_simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(dpaa2_mc, dpaa2_mc_fdt_driver, dpaa2_mc_fdt_methods, sizeof(struct dpaa2_mc_softc), dpaa2_mc_driver); DRIVER_MODULE(dpaa2_mc, simplebus, dpaa2_mc_fdt_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_rc.c b/sys/dev/dpaa2/dpaa2_rc.c index 49ed8944b64b..221af43f2bf7 100644 --- a/sys/dev/dpaa2/dpaa2_rc.c +++ b/sys/dev/dpaa2/dpaa2_rc.c @@ -1,3559 +1,3559 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Resource Container (DPRC) bus driver. * * DPRC holds all the resources and object information that a software context * (kernel, virtual machine, etc.) can access or use. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_ni.h" #include "dpaa2_mc_if.h" #include "dpaa2_cmd_if.h" /* Timeouts to wait for a command response from MC. */ #define CMD_SPIN_TIMEOUT 100u /* us */ #define CMD_SPIN_ATTEMPTS 2000u /* max. 200 ms */ #define TYPE_LEN_MAX 16u #define LABEL_LEN_MAX 16u MALLOC_DEFINE(M_DPAA2_RC, "dpaa2_rc", "DPAA2 Resource Container"); /* Discover and add devices to the resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *); static int dpaa2_rc_add_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *, struct dpaa2_cmd *, struct dpaa2_obj *); /* Helper routines. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *, struct dpaa2_cmd *, uint8_t, bool, uint16_t); static int dpaa2_rc_configure_irq(device_t, device_t, int, uint64_t, uint32_t); static int dpaa2_rc_add_res(device_t, device_t, enum dpaa2_dev_type, int *, int); static int dpaa2_rc_print_type(struct resource_list *, enum dpaa2_dev_type); static struct dpaa2_mcp *dpaa2_rc_select_portal(device_t, device_t); /* Routines to send commands to MC. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *, uint16_t); static int dpaa2_rc_send_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *, struct dpaa2_cmd *); static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *); static int dpaa2_rc_probe(device_t dev) { /* DPRC device will be added by the parent DPRC or MC bus itself. */ device_set_desc(dev, "DPAA2 Resource Container"); return (BUS_PROBE_DEFAULT); } static int dpaa2_rc_detach(device_t dev) { struct dpaa2_devinfo *dinfo; int error; error = bus_generic_detach(dev); if (error) return (error); dinfo = device_get_ivars(dev); if (dinfo->portal) dpaa2_mcp_free_portal(dinfo->portal); if (dinfo) free(dinfo, M_DPAA2_RC); return (device_delete_children(dev)); } static int dpaa2_rc_attach(device_t dev) { device_t pdev; struct dpaa2_mc_softc *mcsc; struct dpaa2_rc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; sc = device_get_softc(dev); sc->dev = dev; sc->unit = device_get_unit(dev); if (sc->unit == 0) { /* Root DPRC should be attached directly to the MC bus. */ pdev = device_get_parent(dev); mcsc = device_get_softc(pdev); KASSERT(strcmp(device_get_name(pdev), "dpaa2_mc") == 0, ("root DPRC should be attached to the MC bus")); /* * Allocate devinfo to let the parent MC bus access ICID of the * DPRC object. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(dev, "%s: failed to allocate " "dpaa2_devinfo\n", __func__); dpaa2_rc_detach(dev); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = pdev; dinfo->dev = dev; dinfo->dtype = DPAA2_DEV_RC; dinfo->portal = NULL; /* Prepare helper portal object to send commands to MC. */ error = dpaa2_mcp_init_portal(&dinfo->portal, mcsc->res[0], &mcsc->map[0], DPAA2_PORTAL_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_mcp: " "error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (ENXIO); } } else { /* TODO: Child DPRCs aren't supported yet. */ return (ENXIO); } /* Create DPAA2 devices for objects in this container. */ error = dpaa2_rc_discover(sc); if (error) { device_printf(dev, "%s: failed to discover objects in " "container: error=%d\n", __func__, error); dpaa2_rc_detach(dev); return (error); } return (0); } /* * Bus interface. */ static struct resource_list * dpaa2_rc_get_resource_list(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static void dpaa2_rc_delete_resource(device_t rcdev, device_t child, int type, int rid) { struct resource_list *rl; struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(rcdev, "%s: resource still owned by " "child: type=%d, rid=%d, start=%jx\n", __func__, type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, rcdev, child, type, rid); } resource_list_delete(rl, type, rid); } static struct resource * dpaa2_rc_alloc_multi_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * By default, software portal interrupts are message-based, that is, * they are issued from QMan using a 4 byte write. * * TODO: However this default behavior can be changed by programming one * or more software portals to issue their interrupts via a * dedicated software portal interrupt wire. * See registers SWP_INTW0_CFG to SWP_INTW3_CFG for details. */ if (type == SYS_RES_IRQ && *rid == 0) return (NULL); return (resource_list_alloc(rl, rcdev, child, type, rid, start, end, count, flags)); } static struct resource * dpaa2_rc_alloc_resource(device_t rcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { if (device_get_parent(child) != rcdev) return (BUS_ALLOC_RESOURCE(device_get_parent(rcdev), child, type, rid, start, end, count, flags)); return (dpaa2_rc_alloc_multi_resource(rcdev, child, type, rid, start, end, count, flags)); } static int dpaa2_rc_release_resource(device_t rcdev, device_t child, struct resource *r) { struct resource_list *rl; struct dpaa2_devinfo *dinfo; if (device_get_parent(child) != rcdev) return (BUS_RELEASE_RESOURCE(device_get_parent(rcdev), child, r)); dinfo = device_get_ivars(child); rl = &dinfo->resources; return (resource_list_release(rl, rcdev, child, r)); } static void dpaa2_rc_child_deleted(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { device_printf(child, "%s: resource still owned: " "type=%d, rid=%d, addr=%lx\n", __func__, rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, rcdev, child, rle->type, rle->rid); } } resource_list_free(rl); if (dinfo) free(dinfo, M_DPAA2_RC); } static void dpaa2_rc_child_detached(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, rcdev, child, SYS_RES_IRQ) != 0) device_printf(child, "%s: leaked IRQ resources!\n", __func__); if (dinfo->msi.msi_alloc != 0) { device_printf(child, "%s: leaked %d MSI vectors!\n", __func__, dinfo->msi.msi_alloc); PCI_RELEASE_MSI(rcdev, child); } if (resource_list_release_active(rl, rcdev, child, SYS_RES_MEMORY) != 0) device_printf(child, "%s: leaked memory resources!\n", __func__); } static int dpaa2_rc_setup_intr(device_t rcdev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct dpaa2_devinfo *dinfo; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(rcdev, child, irq, flags, filter, intr, arg, &cookie); if (error) { device_printf(rcdev, "%s: bus_generic_setup_intr() failed: " "error=%d\n", __func__, error); return (error); } /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != rcdev) { *cookiep = cookie; return (0); } rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot setup interrupt with " "rid=0: INTx are not supported by DPAA2 objects " "yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); KASSERT(dinfo->msi.msi_alloc > 0, ("No MSI interrupts allocated")); /* * Ask our parent to map the MSI and give us the address and * data register values. If we fail for some reason, teardown * the interrupt handler. */ error = PCIB_MAP_MSI(device_get_parent(rcdev), child, rman_get_start(irq), &addr, &data); if (error) { device_printf(rcdev, "%s: PCIB_MAP_MSI failed: " "error=%d\n", __func__, error); (void)bus_generic_teardown_intr(rcdev, child, irq, cookie); return (error); } /* Configure MSI for this DPAA2 object. */ error = dpaa2_rc_configure_irq(rcdev, child, rid, addr, data); if (error) { device_printf(rcdev, "%s: failed to configure IRQ for " "DPAA2 object: rid=%d, type=%s, unit=%d\n", __func__, rid, dpaa2_ttos(dinfo->dtype), device_get_unit(child)); return (error); } dinfo->msi.msi_handlers++; } *cookiep = cookie; return (0); } static int dpaa2_rc_teardown_intr(device_t rcdev, device_t child, struct resource *irq, void *cookie) { struct resource_list_entry *rle; struct dpaa2_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != rcdev) return(bus_generic_teardown_intr(rcdev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { if (bootverbose) device_printf(rcdev, "%s: cannot teardown interrupt " "with rid=0: INTx are not supported by DPAA2 " "objects yet\n", __func__); return (EINVAL); } else { dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); dinfo->msi.msi_handlers--; } error = bus_generic_teardown_intr(rcdev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI", __func__)); return (error); } static int dpaa2_rc_print_child(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; int retval = 0; retval += bus_print_child_header(rcdev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); /* Print DPAA2-specific resources. */ retval += dpaa2_rc_print_type(rl, DPAA2_DEV_IO); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_BP); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_CON); retval += dpaa2_rc_print_type(rl, DPAA2_DEV_MCP); retval += printf(" at %s (id=%u)", dpaa2_ttos(dinfo->dtype), dinfo->id); retval += bus_print_child_domain(rcdev, child); retval += bus_print_child_footer(rcdev, child); return (retval); } /* * Pseudo-PCI interface. */ /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_alloc_msi(device_t rcdev, device_t child, int *count) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); int error, actual, i, run, irqs[32]; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* MSI should be allocated by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Already have allocated messages? */ if (dinfo->msi.msi_alloc != 0) return (ENXIO); /* Don't ask for more than the device supports. */ actual = min(*count, dinfo->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(rcdev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ resources in * the irqs[] array, so add new resources starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) { device_printf(child, "using IRQ %d for MSI\n", irqs[0]); } else { /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update counts of alloc'd messages. */ dinfo->msi.msi_alloc = actual; dinfo->msi.msi_handlers = 0; *count = actual; return (0); } /* * Release the MSI messages associated with this DPAA2 device. * * NOTE: Implementation is similar to sys/dev/pci/pci.c. */ static int dpaa2_rc_release_msi(device_t rcdev, device_t child) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource_list_entry *rle; int i, irqs[32]; /* MSI should be released by the resource container. */ if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); /* Do we have any messages to release? */ if (dinfo->msi.msi_alloc == 0) return (ENODEV); KASSERT(dinfo->msi.msi_alloc <= 32, ("more than 32 alloc'd MSI messages")); /* Make sure none of the resources are allocated. */ if (dinfo->msi.msi_handlers > 0) return (EBUSY); for (i = 0; i < dinfo->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(rcdev), child, dinfo->msi.msi_alloc, irqs); for (i = 0; i < dinfo->msi.msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ dinfo->msi.msi_alloc = 0; return (0); } /** * @brief Return the maximum number of the MSI supported by this DPAA2 device. */ static int dpaa2_rc_msi_count(device_t rcdev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(child); return (dinfo->msi.msi_msgnum); } static int dpaa2_rc_get_id(device_t rcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); if (rcinfo->dtype != DPAA2_DEV_RC) return (ENODEV); return (PCIB_GET_ID(device_get_parent(rcdev), child, type, id)); } /* * DPAA2 MC command interface. */ static int dpaa2_rc_mng_get_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *major, uint32_t *minor, uint32_t *rev) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL || rev == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_VER); if (!error) { *major = cmd->params[0] >> 32; *minor = cmd->params[1] & 0xFFFFFFFF; *rev = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_soc_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *pvr, uint32_t *svr) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || pvr == NULL || svr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_SOC_VER); if (!error) { *pvr = cmd->params[0] >> 32; *svr = cmd->params[0] & 0xFFFFFFFF; } return (error); } static int dpaa2_rc_mng_get_container_id(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *cont_id) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cont_id == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MNG_GET_CONT_ID); if (!error) *cont_id = cmd->params[0] & 0xFFFFFFFF; return (error); } static int dpaa2_rc_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t cont_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = cont_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_CLOSE)); } static int dpaa2_rc_get_obj_count(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t *obj_count) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj_count == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_COUNT); if (!error) *obj_count = (uint32_t)(cmd->params[0] >> 32); return (error); } static int dpaa2_rc_get_obj(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_idx, struct dpaa2_obj *obj) { struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = obj_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_obj_descriptor(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, enum dpaa2_dev_type dtype, struct dpaa2_obj *obj) { struct __packed get_obj_desc_args { uint32_t obj_id; uint32_t _reserved1; uint8_t type[16]; } *args; struct __packed dpaa2_obj_resp { uint32_t _reserved1; uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint16_t _reserved2; uint8_t type[16]; uint8_t label[16]; } *pobj; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || obj == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_obj_desc_args *) &cmd->params[0]; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_OBJ_DESC); if (!error) { pobj = (struct dpaa2_obj_resp *) &cmd->params[0]; obj->id = pobj->id; obj->vendor = pobj->vendor; obj->irq_count = pobj->irq_count; obj->reg_count = pobj->reg_count; obj->state = pobj->state; obj->ver_major = pobj->ver_major; obj->ver_minor = pobj->ver_minor; obj->flags = pobj->flags; obj->type = dpaa2_stot((const char *) pobj->type); memcpy(obj->label, pobj->label, sizeof(pobj->label)); } /* Some DPAA2 objects might not be supported by the driver yet. */ if (obj->type == DPAA2_DEV_NOTYPE) error = DPAA2_CMD_STAT_UNKNOWN_OBJ; return (error); } static int dpaa2_rc_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_rc_attr *attr) { struct __packed dpaa2_rc_attr { uint32_t cont_id; uint32_t icid; uint32_t options; uint32_t portal_id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_ATTR); if (!error) { pattr = (struct dpaa2_rc_attr *) &cmd->params[0]; attr->cont_id = pattr->cont_id; attr->portal_id = pattr->portal_id; attr->options = pattr->options; attr->icid = pattr->icid; } return (error); } static int dpaa2_rc_get_obj_region(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t obj_id, uint8_t reg_idx, enum dpaa2_dev_type dtype, struct dpaa2_rc_obj_region *reg) { struct __packed obj_region_args { uint32_t obj_id; uint16_t _reserved1; uint8_t reg_idx; uint8_t _reserved2; uint64_t _reserved3; uint64_t _reserved4; uint8_t type[16]; } *args; struct __packed obj_region { uint64_t _reserved1; uint64_t base_offset; uint32_t size; uint32_t type; uint32_t flags; uint32_t _reserved2; uint64_t base_paddr; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); uint16_t cmdid, api_major, api_minor; const char *type = dpaa2_ttos(dtype); int error; if (portal == NULL || cmd == NULL || reg == NULL) return (DPAA2_CMD_STAT_ERR); /* * If the DPRC object version was not yet cached, cache it now. * Otherwise use the already cached value. */ if (!portal->rc_api_major && !portal->rc_api_minor) { error = DPAA2_CMD_RC_GET_API_VERSION(dev, child, cmd, &api_major, &api_minor); if (error) return (error); portal->rc_api_major = api_major; portal->rc_api_minor = api_minor; } else { api_major = portal->rc_api_major; api_minor = portal->rc_api_minor; } /* TODO: Remove magic numbers. */ if (api_major > 6u || (api_major == 6u && api_minor >= 6u)) /* * MC API version 6.6 changed the size of the MC portals and * software portals to 64K (as implemented by hardware). */ cmdid = CMDID_RC_GET_OBJ_REG_V3; else if (api_major == 6u && api_minor >= 3u) /* * MC API version 6.3 introduced a new field to the region * descriptor: base_address. */ cmdid = CMDID_RC_GET_OBJ_REG_V2; else cmdid = CMDID_RC_GET_OBJ_REG; args = (struct obj_region_args *) &cmd->params[0]; args->obj_id = obj_id; args->reg_idx = reg_idx; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); error = dpaa2_rc_exec_cmd(portal, cmd, cmdid); if (!error) { resp = (struct obj_region *) &cmd->params[0]; reg->base_paddr = resp->base_paddr; reg->base_offset = resp->base_offset; reg->size = resp->size; reg->flags = resp->flags; reg->type = resp->type & 0xFu; } return (error); } static int dpaa2_rc_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct __packed rc_api_version { uint16_t major; uint16_t minor; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_API_VERSION); if (!error) { resp = (struct rc_api_version *) &cmd->params[0]; *major = resp->major; *minor = resp->minor; } return (error); } static int dpaa2_rc_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint8_t enable) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_enable_irq(portal, cmd, irq_idx, enable, CMDID_RC_SET_IRQ_ENABLE)); } static int dpaa2_rc_set_obj_irq(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint64_t addr, uint32_t data, uint32_t irq_usr, uint32_t obj_id, enum dpaa2_dev_type dtype) { struct __packed set_obj_irq_args { uint32_t data; uint8_t irq_idx; uint8_t _reserved1[3]; uint64_t addr; uint32_t irq_usr; uint32_t obj_id; uint8_t type[16]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); const char *type = dpaa2_ttos(dtype); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_obj_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->addr = addr; args->data = data; args->irq_usr = irq_usr; args->obj_id = obj_id; memcpy(args->type, type, min(strlen(type) + 1, TYPE_LEN_MAX)); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_SET_OBJ_IRQ)); } static int dpaa2_rc_get_conn(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ep_desc *ep1_desc, struct dpaa2_ep_desc *ep2_desc, uint32_t *link_stat) { struct __packed get_conn_args { uint32_t ep1_id; uint32_t ep1_ifid; uint8_t ep1_type[16]; uint64_t _reserved[4]; } *args; struct __packed get_conn_resp { uint64_t _reserved1[3]; uint32_t ep2_id; uint32_t ep2_ifid; uint8_t ep2_type[16]; uint32_t link_stat; uint32_t _reserved2; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || ep1_desc == NULL || ep2_desc == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct get_conn_args *) &cmd->params[0]; args->ep1_id = ep1_desc->obj_id; args->ep1_ifid = ep1_desc->if_id; /* TODO: Remove magic number. */ strncpy(args->ep1_type, dpaa2_ttos(ep1_desc->type), 16); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_RC_GET_CONN); if (!error) { resp = (struct get_conn_resp *) &cmd->params[0]; ep2_desc->obj_id = resp->ep2_id; ep2_desc->if_id = resp->ep2_ifid; ep2_desc->type = dpaa2_stot((const char *) resp->ep2_type); if (link_stat != NULL) *link_stat = resp->link_stat; } return (error); } static int dpaa2_rc_ni_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpni_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpni_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_ni_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLOSE)); } static int dpaa2_rc_ni_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ENABLE)); } static int dpaa2_rc_ni_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_DISABLE)); } static int dpaa2_rc_ni_get_api_version(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *major, uint16_t *minor) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || major == NULL || minor == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_API_VER); if (!error) { *major = cmd->params[0] & 0xFFFFU; *minor = (cmd->params[0] >> 16) & 0xFFFFU; } return (error); } static int dpaa2_rc_ni_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_RESET)); } static int dpaa2_rc_ni_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_attr *attr) { struct __packed ni_attr { uint32_t options; uint8_t num_queues; uint8_t num_rx_tcs; uint8_t mac_entries; uint8_t num_tx_tcs; uint8_t vlan_entries; uint8_t num_channels; uint8_t qos_entries; uint8_t _reserved1; uint16_t fs_entries; uint16_t _reserved2; uint8_t qos_key_size; uint8_t fs_key_size; uint16_t wriop_ver; uint8_t num_cgs; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_ATTR); if (!error) { resp = (struct ni_attr *) &cmd->params[0]; attr->options = resp->options; attr->wriop_ver = resp->wriop_ver; attr->entries.fs = resp->fs_entries; attr->entries.mac = resp->mac_entries; attr->entries.vlan = resp->vlan_entries; attr->entries.qos = resp->qos_entries; attr->num.queues = resp->num_queues; attr->num.rx_tcs = resp->num_rx_tcs; attr->num.tx_tcs = resp->num_tx_tcs; attr->num.channels = resp->num_channels; attr->num.cgs = resp->num_cgs; attr->key_size.fs = resp->fs_key_size; attr->key_size.qos = resp->qos_key_size; } return (error); } static int dpaa2_rc_ni_set_buf_layout(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_buf_layout *bl) { struct __packed set_buf_layout_args { uint8_t queue_type; uint8_t _reserved1; uint16_t _reserved2; uint16_t options; uint8_t params; uint8_t _reserved3; uint16_t priv_data_size; uint16_t data_align; uint16_t head_room; uint16_t tail_room; uint64_t _reserved4[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || bl == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_buf_layout_args *) &cmd->params[0]; args->queue_type = (uint8_t) bl->queue_type; args->options = bl->options; args->params = 0; args->priv_data_size = bl->pd_size; args->data_align = bl->fd_align; args->head_room = bl->head_size; args->tail_room = bl->tail_size; args->params |= bl->pass_timestamp ? 1U : 0U; args->params |= bl->pass_parser_result ? 2U : 0U; args->params |= bl->pass_frame_status ? 4U : 0U; args->params |= bl->pass_sw_opaque ? 8U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_BUF_LAYOUT)); } static int dpaa2_rc_ni_get_tx_data_offset(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t *offset) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || offset == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_TX_DATA_OFF); if (!error) *offset = cmd->params[0] & 0xFFFFU; return (error); } static int dpaa2_rc_ni_get_port_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PORT_MAC_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_ni_set_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed set_prim_mac_args { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct set_prim_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_PRIM_MAC_ADDR)); } static int dpaa2_rc_ni_get_prim_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed get_prim_mac_resp { uint8_t _reserved[2]; uint8_t mac[ETHER_ADDR_LEN]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_PRIM_MAC_ADDR); if (!error) { resp = (struct get_prim_mac_resp *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) mac[ETHER_ADDR_LEN - i] = resp->mac[i - 1]; } return (error); } static int dpaa2_rc_ni_set_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_args { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); args = (struct link_cfg_args *) &cmd->params[0]; args->rate = cfg->rate; args->options = cfg->options; args->adv_speeds = cfg->adv_speeds; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_LINK_CFG)); } static int dpaa2_rc_ni_get_link_cfg(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_cfg *cfg) { struct __packed link_cfg_resp { uint64_t _reserved1; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t adv_speeds; uint64_t _reserved3[3]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_CFG); if (!error) { resp = (struct link_cfg_resp *) &cmd->params[0]; cfg->rate = resp->rate; cfg->options = resp->options; cfg->adv_speeds = resp->adv_speeds; } return (error); } static int dpaa2_rc_ni_get_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_link_state *state) { struct __packed link_state_resp { uint32_t _reserved1; uint32_t flags; uint32_t rate; uint32_t _reserved2; uint64_t options; uint64_t supported; uint64_t advert; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_LINK_STATE); if (!error) { resp = (struct link_state_resp *) &cmd->params[0]; state->options = resp->options; state->adv_speeds = resp->advert; state->sup_speeds = resp->supported; state->rate = resp->rate; state->link_up = resp->flags & 0x1u ? true : false; state->state_valid = resp->flags & 0x2u ? true : false; } return (error); } static int dpaa2_rc_ni_set_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_qos_table *tbl) { struct __packed qos_table_args { uint32_t _reserved1; uint8_t default_tc; uint8_t options; uint16_t _reserved2; uint64_t _reserved[5]; uint64_t kcfg_busaddr; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || tbl == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct qos_table_args *) &cmd->params[0]; args->default_tc = tbl->default_tc; args->kcfg_busaddr = tbl->kcfg_busaddr; args->options |= tbl->discard_on_miss ? 1U : 0U; args->options |= tbl->keep_entries ? 2U : 0U; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QOS_TABLE)); } static int dpaa2_rc_ni_clear_qos_table(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_QOS_TABLE)); } static int dpaa2_rc_ni_set_pools(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_pools_cfg *cfg) { struct __packed set_pools_args { uint8_t pools_num; uint8_t backup_pool_mask; uint8_t _reserved1; uint8_t pool_as; /* assigning: 0 - QPRI, 1 - QDBIN */ uint32_t bp_obj_id[DPAA2_NI_MAX_POOLS]; uint16_t buf_sz[DPAA2_NI_MAX_POOLS]; uint32_t _reserved2; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_pools_args *) &cmd->params[0]; args->pools_num = cfg->pools_num < DPAA2_NI_MAX_POOLS ? cfg->pools_num : DPAA2_NI_MAX_POOLS; for (uint32_t i = 0; i < args->pools_num; i++) { args->bp_obj_id[i] = cfg->pools[i].bp_obj_id; args->buf_sz[i] = cfg->pools[i].buf_sz; args->backup_pool_mask |= (cfg->pools[i].backup_flag & 1) << i; } return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_POOLS)); } static int dpaa2_rc_ni_set_err_behavior(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_err_cfg *cfg) { struct __packed err_behavior_args { uint32_t err_mask; uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct err_behavior_args *) &cmd->params[0]; args->err_mask = cfg->err_mask; args->flags |= cfg->set_err_fas ? 0x10u : 0u; args->flags |= ((uint8_t) cfg->action) & 0x0Fu; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_ERR_BEHAVIOR)); } static int dpaa2_rc_ni_get_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed get_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t chan_id; } *args; struct __packed get_queue_resp { uint64_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint32_t fqid; uint16_t qdbin; uint16_t _reserved3; uint8_t cgid; uint8_t _reserved[15]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->chan_id = cfg->chan_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QUEUE); if (!error) { resp = (struct get_queue_resp *) &cmd->params[0]; cfg->dest_id = resp->dest_id; cfg->priority = resp->priority; cfg->flow_ctx = resp->flc; cfg->user_ctx = resp->user_ctx; cfg->fqid = resp->fqid; cfg->qdbin = resp->qdbin; cfg->cgid = resp->cgid; cfg->dest_type = (enum dpaa2_ni_dest_type) resp->flags & 0x0Fu; cfg->cgid_valid = (resp->flags & 0x20u) > 0u ? true : false; cfg->stash_control = (resp->flags & 0x40u) > 0u ? true : false; cfg->hold_active = (resp->flags & 0x80u) > 0u ? true : false; } return (error); } static int dpaa2_rc_ni_set_queue(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_ni_queue_cfg *cfg) { struct __packed set_queue_args { uint8_t queue_type; uint8_t tc; uint8_t idx; uint8_t options; uint32_t _reserved1; uint32_t dest_id; uint16_t _reserved2; uint8_t priority; uint8_t flags; uint64_t flc; uint64_t user_ctx; uint8_t cgid; uint8_t chan_id; uint8_t _reserved[23]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_queue_args *) &cmd->params[0]; args->queue_type = (uint8_t) cfg->type; args->tc = cfg->tc; args->idx = cfg->idx; args->options = cfg->options; args->dest_id = cfg->dest_id; args->priority = cfg->priority; args->flc = cfg->flow_ctx; args->user_ctx = cfg->user_ctx; args->cgid = cfg->cgid; args->chan_id = cfg->chan_id; args->flags |= (uint8_t)(cfg->dest_type & 0x0Fu); args->flags |= cfg->stash_control ? 0x40u : 0u; args->flags |= cfg->hold_active ? 0x80u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_QUEUE)); } static int dpaa2_rc_ni_get_qdid(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_queue_type type, uint16_t *qdid) { struct __packed get_qdid_args { uint8_t queue_type; } *args; struct __packed get_qdid_resp { uint16_t qdid; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || qdid == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_qdid_args *) &cmd->params[0]; args->queue_type = (uint8_t) type; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_QDID); if (!error) { resp = (struct get_qdid_resp *) &cmd->params[0]; *qdid = resp->qdid; } return (error); } static int dpaa2_rc_ni_add_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed add_mac_args { uint8_t flags; uint8_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint8_t tc_id; uint8_t fq_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_ADD_MAC_ADDR)); } static int dpaa2_rc_ni_remove_mac_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct __packed rem_mac_args { uint16_t _reserved; uint8_t mac[ETHER_ADDR_LEN]; uint64_t _reserved1[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct rem_mac_args *) &cmd->params[0]; for (int i = 1; i <= ETHER_ADDR_LEN; i++) args->mac[i - 1] = mac[ETHER_ADDR_LEN - i]; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_REMOVE_MAC_ADDR)); } static int dpaa2_rc_ni_clear_mac_filters(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool rm_uni, bool rm_multi) { struct __packed clear_mac_filters_args { uint8_t flags; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct clear_mac_filters_args *) &cmd->params[0]; args->flags |= rm_uni ? 0x1 : 0x0; args->flags |= rm_multi ? 0x2 : 0x0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_CLEAR_MAC_FILTERS)); } static int dpaa2_rc_ni_set_mfl(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t length) { struct __packed set_mfl_args { uint16_t length; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_mfl_args *) &cmd->params[0]; args->length = length; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MFL)); } static int dpaa2_rc_ni_set_offload(device_t dev, device_t child, struct dpaa2_cmd *cmd, enum dpaa2_ni_ofl_type ofl_type, bool en) { struct __packed set_ofl_args { uint8_t _reserved[3]; uint8_t ofl_type; uint32_t config; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_ofl_args *) &cmd->params[0]; args->ofl_type = (uint8_t) ofl_type; args->config = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_OFFLOAD)); } static int dpaa2_rc_ni_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_MASK)); } static int dpaa2_rc_ni_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_IRQ_ENABLE)); } static int dpaa2_rc_ni_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_ni_set_uni_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { struct __packed set_uni_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_uni_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_UNI_PROMISC)); } static int dpaa2_rc_ni_set_multi_promisc(device_t dev, device_t child, struct dpaa2_cmd *cmd, bool en) { /* TODO: Implementation is the same as for ni_set_uni_promisc(). */ struct __packed set_multi_promisc_args { uint8_t en; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_multi_promisc_args *) &cmd->params[0]; args->en = en ? 1u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_MULTI_PROMISC)); } static int dpaa2_rc_ni_get_statistics(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t page, uint16_t param, uint64_t *cnt) { struct __packed get_statistics_args { uint8_t page; uint16_t param; } *args; struct __packed get_statistics_resp { uint64_t cnt[7]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || cnt == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_statistics_args *) &cmd->params[0]; args->page = page; args->param = param; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_GET_STATISTICS); if (!error) { resp = (struct get_statistics_resp *) &cmd->params[0]; for (int i = 0; i < DPAA2_NI_STAT_COUNTERS; i++) cnt[i] = resp->cnt[i]; } return (error); } static int dpaa2_rc_ni_set_rx_tc_dist(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint16_t dist_size, uint8_t tc, enum dpaa2_ni_dist_mode dist_mode, bus_addr_t key_cfg_buf) { struct __packed set_rx_tc_dist_args { uint16_t dist_size; uint8_t tc; uint8_t ma_dm; /* miss action + dist. mode */ uint32_t _reserved1; uint64_t _reserved2[5]; uint64_t key_cfg_iova; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_rx_tc_dist_args *) &cmd->params[0]; args->dist_size = dist_size; args->tc = tc; args->ma_dm = ((uint8_t) dist_mode) & 0x0Fu; args->key_cfg_iova = key_cfg_buf; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_NI_SET_RX_TC_DIST)); } static int dpaa2_rc_io_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpio_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpio_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_io_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_CLOSE)); } static int dpaa2_rc_io_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ENABLE)); } static int dpaa2_rc_io_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_DISABLE)); } static int dpaa2_rc_io_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_RESET)); } static int dpaa2_rc_io_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_io_attr *attr) { struct __packed dpaa2_io_attr { uint32_t id; uint16_t swp_id; uint8_t priors_num; uint8_t chan_mode; uint64_t swp_ce_paddr; uint64_t swp_ci_paddr; uint32_t swp_version; uint32_t _reserved1; uint32_t swp_clk; uint32_t _reserved2[5]; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_ATTR); if (!error) { pattr = (struct dpaa2_io_attr *) &cmd->params[0]; attr->swp_ce_paddr = pattr->swp_ce_paddr; attr->swp_ci_paddr = pattr->swp_ci_paddr; attr->swp_version = pattr->swp_version; attr->swp_clk = pattr->swp_clk; attr->id = pattr->id; attr->swp_id = pattr->swp_id; attr->priors_num = pattr->priors_num; attr->chan_mode = (enum dpaa2_io_chan_mode) pattr->chan_mode; } return (error); } static int dpaa2_rc_io_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Extract similar *_set_irq_mask() into one function. */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_MASK)); } static int dpaa2_rc_io_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Extract similar *_get_irq_status() into one function. */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_io_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Extract similar *_set_irq_enable() into one function. */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_SET_IRQ_ENABLE)); } static int dpaa2_rc_io_add_static_dq_chan(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint8_t *chan_idx) { struct __packed add_static_dq_chan_args { uint32_t dpcon_id; } *args; struct __packed add_static_dq_chan_resp { uint8_t chan_idx; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || chan_idx == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct add_static_dq_chan_args *) &cmd->params[0]; args->dpcon_id = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_IO_ADD_STATIC_DQ_CHAN); if (!error) { resp = (struct add_static_dq_chan_resp *) &cmd->params[0]; *chan_idx = resp->chan_idx; } return (error); } static int dpaa2_rc_bp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpbp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpbp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_bp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_CLOSE)); } static int dpaa2_rc_bp_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_ENABLE)); } static int dpaa2_rc_bp_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_DISABLE)); } static int dpaa2_rc_bp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_RESET)); } static int dpaa2_rc_bp_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_bp_attr *attr) { struct __packed dpaa2_bp_attr { uint16_t _reserved1; uint16_t bpid; uint32_t id; } *pattr; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_BP_GET_ATTR); if (!error) { pattr = (struct dpaa2_bp_attr *) &cmd->params[0]; attr->id = pattr->id; attr->bpid = pattr->bpid; } return (error); } static int dpaa2_rc_mac_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmac_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmac_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mac_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_CLOSE)); } static int dpaa2_rc_mac_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_RESET)); } static int dpaa2_rc_mac_mdio_read(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t *val) { struct __packed mdio_read_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint32_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || val == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_read_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->clause = 0; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_READ); if (!error) *val = cmd->params[0] & 0xFFFF; return (error); } static int dpaa2_rc_mac_mdio_write(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t phy, uint16_t reg, uint16_t val) { struct __packed mdio_write_args { uint8_t clause; /* set to 0 by default */ uint8_t phy; uint16_t reg; uint16_t val; uint16_t _reserved1; uint64_t _reserved2[6]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mdio_write_args *) &cmd->params[0]; args->phy = phy; args->reg = reg; args->val = val; args->clause = 0; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_MDIO_WRITE)); } static int dpaa2_rc_mac_get_addr(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t *mac) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || mac == NULL) return (DPAA2_CMD_STAT_ERR); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ADDR); if (!error) { mac[0] = (cmd->params[0] >> 56) & 0xFFU; mac[1] = (cmd->params[0] >> 48) & 0xFFU; mac[2] = (cmd->params[0] >> 40) & 0xFFU; mac[3] = (cmd->params[0] >> 32) & 0xFFU; mac[4] = (cmd->params[0] >> 24) & 0xFFU; mac[5] = (cmd->params[0] >> 16) & 0xFFU; } return (error); } static int dpaa2_rc_mac_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_attr *attr) { struct __packed mac_attr_resp { uint8_t eth_if; uint8_t link_type; uint16_t id; uint32_t max_rate; uint8_t fec_mode; uint8_t ifg_mode; uint8_t ifg_len; uint8_t _reserved1; uint32_t _reserved2; uint8_t sgn_post_pre; uint8_t serdes_cfg_mode; uint8_t eq_amp_red; uint8_t eq_post1q; uint8_t eq_preq; uint8_t eq_type; uint16_t _reserved3; uint64_t _reserved[4]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_ATTR); if (!error) { resp = (struct mac_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->max_rate = resp->max_rate; attr->eth_if = resp->eth_if; attr->link_type = resp->link_type; } return (error); } static int dpaa2_rc_mac_set_link_state(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_mac_link_state *state) { struct __packed mac_set_link_args { uint64_t options; uint32_t rate; uint32_t _reserved1; uint32_t flags; uint32_t _reserved2; uint64_t supported; uint64_t advert; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || state == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct mac_set_link_args *) &cmd->params[0]; args->options = state->options; args->rate = state->rate; args->supported = state->supported; args->advert = state->advert; args->flags |= state->up ? 0x1u : 0u; args->flags |= state->state_valid ? 0x2u : 0u; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_LINK_STATE)); } static int dpaa2_rc_mac_set_irq_mask(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t mask) { /* TODO: Implementation is the same as for ni_set_irq_mask(). */ struct __packed set_irq_mask_args { uint32_t mask; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_mask_args *) &cmd->params[0]; args->mask = mask; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_MASK)); } static int dpaa2_rc_mac_set_irq_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool en) { /* TODO: Implementation is the same as for ni_set_irq_enable(). */ struct __packed set_irq_enable_args { uint32_t en; uint8_t irq_idx; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct set_irq_enable_args *) &cmd->params[0]; args->en = en ? 1u : 0u; args->irq_idx = irq_idx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_SET_IRQ_ENABLE)); } static int dpaa2_rc_mac_get_irq_status(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint8_t irq_idx, uint32_t *status) { /* TODO: Implementation is the same as ni_get_irq_status(). */ struct __packed get_irq_stat_args { uint32_t status; uint8_t irq_idx; } *args; struct __packed get_irq_stat_resp { uint32_t status; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || status == NULL) return (DPAA2_CMD_STAT_EINVAL); dpaa2_rc_reset_cmd_params(cmd); args = (struct get_irq_stat_args *) &cmd->params[0]; args->status = *status; args->irq_idx = irq_idx; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MAC_GET_IRQ_STATUS); if (!error) { resp = (struct get_irq_stat_resp *) &cmd->params[0]; *status = resp->status; } return (error); } static int dpaa2_rc_con_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpcon_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpcon_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_con_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_CLOSE)); } static int dpaa2_rc_con_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_RESET)); } static int dpaa2_rc_con_enable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_ENABLE)); } static int dpaa2_rc_con_disable(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_DISABLE)); } static int dpaa2_rc_con_get_attributes(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_attr *attr) { struct __packed con_attr_resp { uint32_t id; uint16_t chan_id; uint8_t prior_num; uint8_t _reserved1; uint64_t _reserved2[6]; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || attr == NULL) return (DPAA2_CMD_STAT_EINVAL); error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_GET_ATTR); if (!error) { resp = (struct con_attr_resp *) &cmd->params[0]; attr->id = resp->id; attr->chan_id = resp->chan_id; attr->prior_num = resp->prior_num; } return (error); } static int dpaa2_rc_con_set_notif(device_t dev, device_t child, struct dpaa2_cmd *cmd, struct dpaa2_con_notif_cfg *cfg) { struct __packed set_notif_args { uint32_t dpio_id; uint8_t prior; uint8_t _reserved1; uint16_t _reserved2; uint64_t ctx; uint64_t _reserved3[5]; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL || cfg == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct set_notif_args *) &cmd->params[0]; args->dpio_id = cfg->dpio_id; args->prior = cfg->prior; args->ctx = cfg->qman_ctx; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_CON_SET_NOTIF)); } static int dpaa2_rc_mcp_create(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t portal_id, uint32_t options, uint32_t *dpmcp_id) { struct __packed mcp_create_args { uint32_t portal_id; uint32_t options; uint64_t _reserved[6]; } *args; struct __packed mcp_create_resp { uint32_t dpmcp_id; } *resp; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); int error; if (portal == NULL || cmd == NULL || dpmcp_id == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_create_args *) &cmd->params[0]; args->portal_id = portal_id; args->options = options; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CREATE); if (!error) { resp = (struct mcp_create_resp *) &cmd->params[0]; *dpmcp_id = resp->dpmcp_id; } return (error); } static int dpaa2_rc_mcp_destroy(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id) { struct __packed mcp_destroy_args { uint32_t dpmcp_id; } *args; struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); args = (struct mcp_destroy_args *) &cmd->params[0]; args->dpmcp_id = dpmcp_id; return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_DESTROY)); } static int dpaa2_rc_mcp_open(device_t dev, device_t child, struct dpaa2_cmd *cmd, uint32_t dpmcp_id, uint16_t *token) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); struct dpaa2_cmd_header *hdr; int error; if (portal == NULL || cmd == NULL || token == NULL) return (DPAA2_CMD_STAT_ERR); cmd->params[0] = dpmcp_id; error = dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_OPEN); if (!error) { hdr = (struct dpaa2_cmd_header *) &cmd->header; *token = hdr->token; } return (error); } static int dpaa2_rc_mcp_close(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_CLOSE)); } static int dpaa2_rc_mcp_reset(device_t dev, device_t child, struct dpaa2_cmd *cmd) { struct dpaa2_mcp *portal = dpaa2_rc_select_portal(dev, child); if (portal == NULL || cmd == NULL) return (DPAA2_CMD_STAT_ERR); return (dpaa2_rc_exec_cmd(portal, cmd, CMDID_MCP_RESET)); } /** * @brief Create and add devices for DPAA2 objects in this resource container. */ static int dpaa2_rc_discover(struct dpaa2_rc_softc *sc) { device_t rcdev = sc->dev; device_t child = sc->dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(rcdev); struct dpaa2_cmd cmd; struct dpaa2_rc_attr dprc_attr; struct dpaa2_obj obj; uint32_t major, minor, rev, obj_count; uint16_t rc_token; int rc; DPAA2_CMD_INIT(&cmd); /* Print MC firmware version. */ rc = DPAA2_CMD_MNG_GET_VERSION(rcdev, child, &cmd, &major, &minor, &rev); if (rc) { device_printf(rcdev, "%s: failed to get MC firmware version: " "error=%d\n", __func__, rc); return (ENXIO); } device_printf(rcdev, "MC firmware version: %u.%u.%u\n", major, minor, rev); /* Obtain container ID associated with a given MC portal. */ rc = DPAA2_CMD_MNG_GET_CONTAINER_ID(rcdev, child, &cmd, &sc->cont_id); if (rc) { device_printf(rcdev, "%s: failed to get container id: " "error=%d\n", __func__, rc); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Resource container ID: %u\n", sc->cont_id); } /* Open the resource container. */ rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, sc->cont_id, &rc_token); if (rc) { device_printf(rcdev, "%s: failed to open container: cont_id=%u, " "error=%d\n", __func__, sc->cont_id, rc); return (ENXIO); } /* Obtain a number of objects in this container. */ rc = DPAA2_CMD_RC_GET_OBJ_COUNT(rcdev, child, &cmd, &obj_count); if (rc) { device_printf(rcdev, "%s: failed to count objects in container: " "cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); (void)DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Objects in container: %u\n", obj_count); } rc = DPAA2_CMD_RC_GET_ATTRIBUTES(rcdev, child, &cmd, &dprc_attr); if (rc) { device_printf(rcdev, "%s: failed to get attributes of the " "container: cont_id=%u, error=%d\n", __func__, sc->cont_id, rc); DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (ENXIO); } if (bootverbose) { device_printf(rcdev, "Isolation context ID: %u\n", dprc_attr.icid); } if (rcinfo) { rcinfo->id = dprc_attr.cont_id; rcinfo->portal_id = dprc_attr.portal_id; rcinfo->icid = dprc_attr.icid; } /* * Add MC portals before everything else. * TODO: Discover DPAA2 objects on-demand. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc) { continue; /* Skip silently for now. */ } if (obj.type != DPAA2_DEV_MCP) { continue; } dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach MC portals. */ - bus_generic_probe(rcdev); + bus_identify_children(rcdev); rc = bus_generic_attach(rcdev); if (rc) { DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add managed devices (except DPMCPs) to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc && bootverbose) { if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ) { device_printf(rcdev, "%s: skip unsupported " "DPAA2 object: idx=%u\n", __func__, i); continue; } else { device_printf(rcdev, "%s: failed to get " "information about DPAA2 object: idx=%u, " "error=%d\n", __func__, i, rc); continue; } } if (obj.type == DPAA2_DEV_MCP) { continue; /* Already added. */ } dpaa2_rc_add_managed_child(sc, &cmd, &obj); } /* Probe and attach managed devices properly. */ - bus_generic_probe(rcdev); + bus_identify_children(rcdev); rc = bus_generic_attach(rcdev); if (rc) { DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); return (rc); } /* Add other devices to the resource container. */ for (uint32_t i = 0; i < obj_count; i++) { rc = DPAA2_CMD_RC_GET_OBJ(rcdev, child, &cmd, i, &obj); if (rc == DPAA2_CMD_STAT_UNKNOWN_OBJ && bootverbose) { device_printf(rcdev, "%s: skip unsupported DPAA2 " "object: idx=%u\n", __func__, i); continue; } else if (rc) { device_printf(rcdev, "%s: failed to get object: " "idx=%u, error=%d\n", __func__, i, rc); continue; } dpaa2_rc_add_child(sc, &cmd, &obj); } DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); /* Probe and attach the rest of devices. */ - bus_generic_probe(rcdev); + bus_identify_children(rcdev); return (bus_generic_attach(rcdev)); } /** * @brief Add a new DPAA2 device to the resource container bus. */ static int dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev; struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; struct resource_spec *res_spec; const char *devclass; int dpio_n = 0; /* to limit DPIOs by # of CPUs */ int dpcon_n = 0; /* to limit DPCONs by # of CPUs */ int rid, error; rcdev = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_NI: devclass = "dpaa2_ni"; res_spec = dpaa2_ni_spec; break; default: return (ENXIO); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for a child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; /* Limit DPIOs and DPCONs by number of CPUs. */ if (res_spec->type == DPAA2_DEV_IO && dpio_n >= mp_ncpus) { dpio_n++; continue; } if (res_spec->type == DPAA2_DEV_CON && dpcon_n >= mp_ncpus) { dpcon_n++; continue; } error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); if (res_spec->type == DPAA2_DEV_IO) dpio_n++; if (res_spec->type == DPAA2_DEV_CON) dpcon_n++; } return (0); } /** * @brief Add a new managed DPAA2 device to the resource container bus. * * There are DPAA2 objects (DPIO, DPBP) which have their own drivers and can be * allocated as resources or associated with the other DPAA2 objects. This * function is supposed to discover such managed objects in the resource * container and add them as children to perform a proper initialization. * - * NOTE: It must be called together with bus_generic_probe() and + * NOTE: It must be called together with bus_identify_children() and * bus_generic_attach() before dpaa2_rc_add_child(). */ static int dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd, struct dpaa2_obj *obj) { device_t rcdev, dev, child; struct dpaa2_devinfo *rcinfo, *dinfo; struct dpaa2_rc_obj_region reg; struct resource_spec *res_spec; const char *devclass; uint64_t start, end, count; uint32_t flags = 0; int rid, error; rcdev = sc->dev; child = sc->dev; rcinfo = device_get_ivars(rcdev); switch (obj->type) { case DPAA2_DEV_IO: devclass = "dpaa2_io"; res_spec = dpaa2_io_spec; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; case DPAA2_DEV_BP: devclass = "dpaa2_bp"; res_spec = dpaa2_bp_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_CON: devclass = "dpaa2_con"; res_spec = dpaa2_con_spec; flags = DPAA2_MC_DEV_ALLOCATABLE; break; case DPAA2_DEV_MAC: devclass = "dpaa2_mac"; res_spec = dpaa2_mac_spec; flags = DPAA2_MC_DEV_ASSOCIATED; break; case DPAA2_DEV_MCP: devclass = "dpaa2_mcp"; res_spec = NULL; flags = DPAA2_MC_DEV_ALLOCATABLE | DPAA2_MC_DEV_SHAREABLE; break; default: /* Only managed devices above are supported. */ return (EINVAL); } /* Add a device for the DPAA2 object. */ dev = device_add_child(rcdev, devclass, -1); if (dev == NULL) { device_printf(rcdev, "%s: failed to add a device for DPAA2 " "object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } /* Allocate devinfo for the child. */ dinfo = malloc(sizeof(struct dpaa2_devinfo), M_DPAA2_RC, M_WAITOK | M_ZERO); if (!dinfo) { device_printf(rcdev, "%s: failed to allocate dpaa2_devinfo " "for: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type), obj->id); return (ENXIO); } device_set_ivars(dev, dinfo); dinfo->pdev = rcdev; dinfo->dev = dev; dinfo->id = obj->id; dinfo->dtype = obj->type; dinfo->portal = NULL; /* Children share their parent container's ICID and portal ID. */ dinfo->icid = rcinfo->icid; dinfo->portal_id = rcinfo->portal_id; /* MSI configuration */ dinfo->msi.msi_msgnum = obj->irq_count; dinfo->msi.msi_alloc = 0; dinfo->msi.msi_handlers = 0; /* Initialize a resource list for the child. */ resource_list_init(&dinfo->resources); /* Add memory regions to the resource list. */ for (uint8_t i = 0; i < obj->reg_count; i++) { error = DPAA2_CMD_RC_GET_OBJ_REGION(rcdev, child, cmd, obj->id, i, obj->type, ®); if (error) { device_printf(rcdev, "%s: failed to obtain memory " "region for type=%s, id=%u, reg_idx=%u: error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, i, error); continue; } count = reg.size; start = reg.base_paddr + reg.base_offset; end = reg.base_paddr + reg.base_offset + reg.size - 1; resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, start, end, count); } /* Add DPAA2-specific resources to the resource list. */ for (; res_spec && res_spec->type != -1; res_spec++) { if (res_spec->type < DPAA2_DEV_MC) continue; /* Skip non-DPAA2 resource. */ rid = res_spec->rid; error = dpaa2_rc_add_res(rcdev, dev, res_spec->type, &rid, res_spec->flags); if (error) device_printf(rcdev, "%s: dpaa2_rc_add_res() failed: " "error=%d\n", __func__, error); } /* Inform MC about a new managed device. */ error = DPAA2_MC_MANAGE_DEV(rcdev, dev, flags); if (error) { device_printf(rcdev, "%s: failed to add a managed DPAA2 device: " "type=%s, id=%u, error=%d\n", __func__, dpaa2_ttos(obj->type), obj->id, error); return (ENXIO); } return (0); } /** * @brief Configure given IRQ using MC command interface. */ static int dpaa2_rc_configure_irq(device_t rcdev, device_t child, int rid, uint64_t addr, uint32_t data) { struct dpaa2_devinfo *rcinfo; struct dpaa2_devinfo *dinfo; struct dpaa2_cmd cmd; uint16_t rc_token; int rc = EINVAL; DPAA2_CMD_INIT(&cmd); if (device_get_parent(child) == rcdev && rid >= 1) { rcinfo = device_get_ivars(rcdev); dinfo = device_get_ivars(child); rc = DPAA2_CMD_RC_OPEN(rcdev, child, &cmd, rcinfo->id, &rc_token); if (rc) { device_printf(rcdev, "%s: failed to open DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } /* Set MSI address and value. */ rc = DPAA2_CMD_RC_SET_OBJ_IRQ(rcdev, child, &cmd, rid - 1, addr, data, rid, dinfo->id, dinfo->dtype); if (rc) { device_printf(rcdev, "%s: failed to setup IRQ: " "rid=%d, addr=%jx, data=%x, error=%d\n", __func__, rid, addr, data, rc); return (ENODEV); } rc = DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd); if (rc) { device_printf(rcdev, "%s: failed to close DPRC: " "error=%d\n", __func__, rc); return (ENODEV); } rc = 0; } return (rc); } /** * @brief General implementation of the MC command to enable IRQ. */ static int dpaa2_rc_enable_irq(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint8_t irq_idx, bool enable, uint16_t cmdid) { struct __packed enable_irq_args { uint8_t enable; uint8_t _reserved1; uint16_t _reserved2; uint8_t irq_idx; uint8_t _reserved3; uint16_t _reserved4; uint64_t _reserved5[6]; } *args; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); args = (struct enable_irq_args *) &cmd->params[0]; args->irq_idx = irq_idx; args->enable = enable == 0u ? 0u : 1u; return (dpaa2_rc_exec_cmd(mcp, cmd, cmdid)); } /** * @brief Sends a command to MC and waits for response. */ static int dpaa2_rc_exec_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd, uint16_t cmdid) { struct dpaa2_cmd_header *hdr; uint16_t flags; int error; if (!mcp || !cmd) return (DPAA2_CMD_STAT_ERR); /* Prepare a command for the MC hardware. */ hdr = (struct dpaa2_cmd_header *) &cmd->header; hdr->cmdid = cmdid; hdr->status = DPAA2_CMD_STAT_READY; DPAA2_MCP_LOCK(mcp, &flags); if (flags & DPAA2_PORTAL_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_INVALID_STATE); } /* Send a command to MC and wait for the result. */ dpaa2_rc_send_cmd(mcp, cmd); error = dpaa2_rc_wait_for_cmd(mcp, cmd); if (error) { DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_ERR); } if (hdr->status != DPAA2_CMD_STAT_OK) { DPAA2_MCP_UNLOCK(mcp); return (int)(hdr->status); } DPAA2_MCP_UNLOCK(mcp); return (DPAA2_CMD_STAT_OK); } /** * @brief Writes a command to the MC command portal. */ static int dpaa2_rc_send_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { /* Write command parameters. */ for (uint32_t i = 1; i <= DPAA2_CMD_PARAMS_N; i++) bus_write_8(mcp->map, sizeof(uint64_t) * i, cmd->params[i-1]); bus_barrier(mcp->map, 0, sizeof(struct dpaa2_cmd), BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Write command header to trigger execution. */ bus_write_8(mcp->map, 0, cmd->header); return (0); } /** * @brief Polls the MC command portal in order to receive a result of the * command execution. */ static int dpaa2_rc_wait_for_cmd(struct dpaa2_mcp *mcp, struct dpaa2_cmd *cmd) { struct dpaa2_cmd_header *hdr; uint64_t val; uint32_t i; /* Wait for a command execution result from the MC hardware. */ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) { val = bus_read_8(mcp->map, 0); hdr = (struct dpaa2_cmd_header *) &val; if (hdr->status != DPAA2_CMD_STAT_READY) { break; } DELAY(CMD_SPIN_TIMEOUT); } if (i > CMD_SPIN_ATTEMPTS) { /* Return an error on expired timeout. */ return (DPAA2_CMD_STAT_TIMEOUT); } else { /* Read command response. */ cmd->header = val; for (i = 1; i <= DPAA2_CMD_PARAMS_N; i++) { cmd->params[i-1] = bus_read_8(mcp->map, i * sizeof(uint64_t)); } } return (DPAA2_CMD_STAT_OK); } /** * @brief Reserve a DPAA2-specific device of the given devtype for the child. */ static int dpaa2_rc_add_res(device_t rcdev, device_t child, enum dpaa2_dev_type devtype, int *rid, int flags) { device_t dpaa2_dev; struct dpaa2_devinfo *dinfo = device_get_ivars(child); struct resource *res; bool shared = false; int error; /* Request a free DPAA2 device of the given type from MC. */ error = DPAA2_MC_GET_FREE_DEV(rcdev, &dpaa2_dev, devtype); if (error && !(flags & RF_SHAREABLE)) { device_printf(rcdev, "%s: failed to obtain a free %s (rid=%d) " "for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } /* Request a shared DPAA2 device of the given type from MC. */ if (error) { error = DPAA2_MC_GET_SHARED_DEV(rcdev, &dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to obtain a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } shared = true; } /* Add DPAA2 device to the resource list of the child device. */ resource_list_add(&dinfo->resources, devtype, *rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1); /* Reserve a newly added DPAA2 resource. */ res = resource_list_reserve(&dinfo->resources, rcdev, child, devtype, rid, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev, 1, flags & ~RF_ACTIVE); if (!res) { device_printf(rcdev, "%s: failed to reserve %s (rid=%d) for: %s " "(id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (EBUSY); } /* Reserve a shared DPAA2 device of the given type. */ if (shared) { error = DPAA2_MC_RESERVE_DEV(rcdev, dpaa2_dev, devtype); if (error) { device_printf(rcdev, "%s: failed to reserve a shared " "%s (rid=%d) for: %s (id=%u)\n", __func__, dpaa2_ttos(devtype), *rid, dpaa2_ttos(dinfo->dtype), dinfo->id); return (error); } } return (0); } static int dpaa2_rc_print_type(struct resource_list *rl, enum dpaa2_dev_type type) { struct dpaa2_devinfo *dinfo; struct resource_list_entry *rle; uint32_t prev_id; int printed = 0, series = 0; int retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { dinfo = device_get_ivars((device_t) rle->start); if (printed == 0) { retval += printf(" %s (id=", dpaa2_ttos(dinfo->dtype)); } else { if (dinfo->id == prev_id + 1) { if (series == 0) { series = 1; retval += printf("-"); } } else { if (series == 1) { retval += printf("%u", prev_id); series = 0; } retval += printf(","); } } printed++; if (series == 0) retval += printf("%u", dinfo->id); prev_id = dinfo->id; } } if (printed) { if (series == 1) retval += printf("%u", prev_id); retval += printf(")"); } return (retval); } static int dpaa2_rc_reset_cmd_params(struct dpaa2_cmd *cmd) { if (cmd != NULL) { memset(cmd->params, 0, sizeof(cmd->params[0]) * DPAA2_CMD_PARAMS_N); } return (0); } static struct dpaa2_mcp * dpaa2_rc_select_portal(device_t dev, device_t child) { struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *cinfo = device_get_ivars(child); if (cinfo == NULL || dinfo == NULL || dinfo->dtype != DPAA2_DEV_RC) return (NULL); return (cinfo->portal != NULL ? cinfo->portal : dinfo->portal); } static device_method_t dpaa2_rc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_rc_probe), DEVMETHOD(device_attach, dpaa2_rc_attach), DEVMETHOD(device_detach, dpaa2_rc_detach), /* Bus interface */ DEVMETHOD(bus_get_resource_list, dpaa2_rc_get_resource_list), DEVMETHOD(bus_delete_resource, dpaa2_rc_delete_resource), DEVMETHOD(bus_alloc_resource, dpaa2_rc_alloc_resource), DEVMETHOD(bus_release_resource, dpaa2_rc_release_resource), DEVMETHOD(bus_child_deleted, dpaa2_rc_child_deleted), DEVMETHOD(bus_child_detached, dpaa2_rc_child_detached), DEVMETHOD(bus_setup_intr, dpaa2_rc_setup_intr), DEVMETHOD(bus_teardown_intr, dpaa2_rc_teardown_intr), DEVMETHOD(bus_print_child, dpaa2_rc_print_child), DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), /* Pseudo-PCI interface */ DEVMETHOD(pci_alloc_msi, dpaa2_rc_alloc_msi), DEVMETHOD(pci_release_msi, dpaa2_rc_release_msi), DEVMETHOD(pci_msi_count, dpaa2_rc_msi_count), DEVMETHOD(pci_get_id, dpaa2_rc_get_id), /* DPAA2 MC command interface */ DEVMETHOD(dpaa2_cmd_mng_get_version, dpaa2_rc_mng_get_version), DEVMETHOD(dpaa2_cmd_mng_get_soc_version, dpaa2_rc_mng_get_soc_version), DEVMETHOD(dpaa2_cmd_mng_get_container_id, dpaa2_rc_mng_get_container_id), /* DPRC commands */ DEVMETHOD(dpaa2_cmd_rc_open, dpaa2_rc_open), DEVMETHOD(dpaa2_cmd_rc_close, dpaa2_rc_close), DEVMETHOD(dpaa2_cmd_rc_get_obj_count, dpaa2_rc_get_obj_count), DEVMETHOD(dpaa2_cmd_rc_get_obj, dpaa2_rc_get_obj), DEVMETHOD(dpaa2_cmd_rc_get_obj_descriptor, dpaa2_rc_get_obj_descriptor), DEVMETHOD(dpaa2_cmd_rc_get_attributes, dpaa2_rc_get_attributes), DEVMETHOD(dpaa2_cmd_rc_get_obj_region, dpaa2_rc_get_obj_region), DEVMETHOD(dpaa2_cmd_rc_get_api_version, dpaa2_rc_get_api_version), DEVMETHOD(dpaa2_cmd_rc_set_irq_enable, dpaa2_rc_set_irq_enable), DEVMETHOD(dpaa2_cmd_rc_set_obj_irq, dpaa2_rc_set_obj_irq), DEVMETHOD(dpaa2_cmd_rc_get_conn, dpaa2_rc_get_conn), /* DPNI commands */ DEVMETHOD(dpaa2_cmd_ni_open, dpaa2_rc_ni_open), DEVMETHOD(dpaa2_cmd_ni_close, dpaa2_rc_ni_close), DEVMETHOD(dpaa2_cmd_ni_enable, dpaa2_rc_ni_enable), DEVMETHOD(dpaa2_cmd_ni_disable, dpaa2_rc_ni_disable), DEVMETHOD(dpaa2_cmd_ni_get_api_version, dpaa2_rc_ni_get_api_version), DEVMETHOD(dpaa2_cmd_ni_reset, dpaa2_rc_ni_reset), DEVMETHOD(dpaa2_cmd_ni_get_attributes, dpaa2_rc_ni_get_attributes), DEVMETHOD(dpaa2_cmd_ni_set_buf_layout, dpaa2_rc_ni_set_buf_layout), DEVMETHOD(dpaa2_cmd_ni_get_tx_data_off, dpaa2_rc_ni_get_tx_data_offset), DEVMETHOD(dpaa2_cmd_ni_get_port_mac_addr, dpaa2_rc_ni_get_port_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_prim_mac_addr, dpaa2_rc_ni_set_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_get_prim_mac_addr, dpaa2_rc_ni_get_prim_mac_addr), DEVMETHOD(dpaa2_cmd_ni_set_link_cfg, dpaa2_rc_ni_set_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_cfg, dpaa2_rc_ni_get_link_cfg), DEVMETHOD(dpaa2_cmd_ni_get_link_state, dpaa2_rc_ni_get_link_state), DEVMETHOD(dpaa2_cmd_ni_set_qos_table, dpaa2_rc_ni_set_qos_table), DEVMETHOD(dpaa2_cmd_ni_clear_qos_table, dpaa2_rc_ni_clear_qos_table), DEVMETHOD(dpaa2_cmd_ni_set_pools, dpaa2_rc_ni_set_pools), DEVMETHOD(dpaa2_cmd_ni_set_err_behavior,dpaa2_rc_ni_set_err_behavior), DEVMETHOD(dpaa2_cmd_ni_get_queue, dpaa2_rc_ni_get_queue), DEVMETHOD(dpaa2_cmd_ni_set_queue, dpaa2_rc_ni_set_queue), DEVMETHOD(dpaa2_cmd_ni_get_qdid, dpaa2_rc_ni_get_qdid), DEVMETHOD(dpaa2_cmd_ni_add_mac_addr, dpaa2_rc_ni_add_mac_addr), DEVMETHOD(dpaa2_cmd_ni_remove_mac_addr, dpaa2_rc_ni_remove_mac_addr), DEVMETHOD(dpaa2_cmd_ni_clear_mac_filters, dpaa2_rc_ni_clear_mac_filters), DEVMETHOD(dpaa2_cmd_ni_set_mfl, dpaa2_rc_ni_set_mfl), DEVMETHOD(dpaa2_cmd_ni_set_offload, dpaa2_rc_ni_set_offload), DEVMETHOD(dpaa2_cmd_ni_set_irq_mask, dpaa2_rc_ni_set_irq_mask), DEVMETHOD(dpaa2_cmd_ni_set_irq_enable, dpaa2_rc_ni_set_irq_enable), DEVMETHOD(dpaa2_cmd_ni_get_irq_status, dpaa2_rc_ni_get_irq_status), DEVMETHOD(dpaa2_cmd_ni_set_uni_promisc, dpaa2_rc_ni_set_uni_promisc), DEVMETHOD(dpaa2_cmd_ni_set_multi_promisc, dpaa2_rc_ni_set_multi_promisc), DEVMETHOD(dpaa2_cmd_ni_get_statistics, dpaa2_rc_ni_get_statistics), DEVMETHOD(dpaa2_cmd_ni_set_rx_tc_dist, dpaa2_rc_ni_set_rx_tc_dist), /* DPIO commands */ DEVMETHOD(dpaa2_cmd_io_open, dpaa2_rc_io_open), DEVMETHOD(dpaa2_cmd_io_close, dpaa2_rc_io_close), DEVMETHOD(dpaa2_cmd_io_enable, dpaa2_rc_io_enable), DEVMETHOD(dpaa2_cmd_io_disable, dpaa2_rc_io_disable), DEVMETHOD(dpaa2_cmd_io_reset, dpaa2_rc_io_reset), DEVMETHOD(dpaa2_cmd_io_get_attributes, dpaa2_rc_io_get_attributes), DEVMETHOD(dpaa2_cmd_io_set_irq_mask, dpaa2_rc_io_set_irq_mask), DEVMETHOD(dpaa2_cmd_io_get_irq_status, dpaa2_rc_io_get_irq_status), DEVMETHOD(dpaa2_cmd_io_set_irq_enable, dpaa2_rc_io_set_irq_enable), DEVMETHOD(dpaa2_cmd_io_add_static_dq_chan, dpaa2_rc_io_add_static_dq_chan), /* DPBP commands */ DEVMETHOD(dpaa2_cmd_bp_open, dpaa2_rc_bp_open), DEVMETHOD(dpaa2_cmd_bp_close, dpaa2_rc_bp_close), DEVMETHOD(dpaa2_cmd_bp_enable, dpaa2_rc_bp_enable), DEVMETHOD(dpaa2_cmd_bp_disable, dpaa2_rc_bp_disable), DEVMETHOD(dpaa2_cmd_bp_reset, dpaa2_rc_bp_reset), DEVMETHOD(dpaa2_cmd_bp_get_attributes, dpaa2_rc_bp_get_attributes), /* DPMAC commands */ DEVMETHOD(dpaa2_cmd_mac_open, dpaa2_rc_mac_open), DEVMETHOD(dpaa2_cmd_mac_close, dpaa2_rc_mac_close), DEVMETHOD(dpaa2_cmd_mac_reset, dpaa2_rc_mac_reset), DEVMETHOD(dpaa2_cmd_mac_mdio_read, dpaa2_rc_mac_mdio_read), DEVMETHOD(dpaa2_cmd_mac_mdio_write, dpaa2_rc_mac_mdio_write), DEVMETHOD(dpaa2_cmd_mac_get_addr, dpaa2_rc_mac_get_addr), DEVMETHOD(dpaa2_cmd_mac_get_attributes, dpaa2_rc_mac_get_attributes), DEVMETHOD(dpaa2_cmd_mac_set_link_state, dpaa2_rc_mac_set_link_state), DEVMETHOD(dpaa2_cmd_mac_set_irq_mask, dpaa2_rc_mac_set_irq_mask), DEVMETHOD(dpaa2_cmd_mac_set_irq_enable, dpaa2_rc_mac_set_irq_enable), DEVMETHOD(dpaa2_cmd_mac_get_irq_status, dpaa2_rc_mac_get_irq_status), /* DPCON commands */ DEVMETHOD(dpaa2_cmd_con_open, dpaa2_rc_con_open), DEVMETHOD(dpaa2_cmd_con_close, dpaa2_rc_con_close), DEVMETHOD(dpaa2_cmd_con_reset, dpaa2_rc_con_reset), DEVMETHOD(dpaa2_cmd_con_enable, dpaa2_rc_con_enable), DEVMETHOD(dpaa2_cmd_con_disable, dpaa2_rc_con_disable), DEVMETHOD(dpaa2_cmd_con_get_attributes, dpaa2_rc_con_get_attributes), DEVMETHOD(dpaa2_cmd_con_set_notif, dpaa2_rc_con_set_notif), /* DPMCP commands */ DEVMETHOD(dpaa2_cmd_mcp_create, dpaa2_rc_mcp_create), DEVMETHOD(dpaa2_cmd_mcp_destroy, dpaa2_rc_mcp_destroy), DEVMETHOD(dpaa2_cmd_mcp_open, dpaa2_rc_mcp_open), DEVMETHOD(dpaa2_cmd_mcp_close, dpaa2_rc_mcp_close), DEVMETHOD(dpaa2_cmd_mcp_reset, dpaa2_rc_mcp_reset), DEVMETHOD_END }; static driver_t dpaa2_rc_driver = { "dpaa2_rc", dpaa2_rc_methods, sizeof(struct dpaa2_rc_softc), }; /* For root container */ DRIVER_MODULE(dpaa2_rc, dpaa2_mc, dpaa2_rc_driver, 0, 0); /* For child containers */ DRIVER_MODULE(dpaa2_rc, dpaa2_rc, dpaa2_rc_driver, 0, 0); diff --git a/sys/dev/dpaa2/memac_mdio_acpi.c b/sys/dev/dpaa2/memac_mdio_acpi.c index 8040b636d06b..a3e88303b1d9 100644 --- a/sys/dev/dpaa2/memac_mdio_acpi.c +++ b/sys/dev/dpaa2/memac_mdio_acpi.c @@ -1,307 +1,307 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "memac_mdio.h" #include "memac_mdio_if.h" #include "acpi_bus_if.h" #include "miibus_if.h" /* -------------------------------------------------------------------------- */ struct memacphy_softc_acpi { struct memacphy_softc_common scc; int uid; uint64_t phy_channel; char compatible[64]; }; static void memacphy_acpi_miibus_statchg(device_t dev) { struct memacphy_softc_acpi *sc; sc = device_get_softc(dev); memacphy_miibus_statchg(&sc->scc); } static int memacphy_acpi_set_ni_dev(device_t dev, device_t nidev) { struct memacphy_softc_acpi *sc; sc = device_get_softc(dev); return (memacphy_set_ni_dev(&sc->scc, nidev)); } static int memacphy_acpi_get_phy_loc(device_t dev, int *phy_loc) { struct memacphy_softc_acpi *sc; sc = device_get_softc(dev); return (memacphy_get_phy_loc(&sc->scc, phy_loc)); } static int memacphy_acpi_probe(device_t dev) { device_set_desc(dev, "MEMAC PHY (acpi)"); return (BUS_PROBE_DEFAULT); } static int memacphy_acpi_attach(device_t dev) { struct memacphy_softc_acpi *sc; ACPI_HANDLE h; ssize_t s; sc = device_get_softc(dev); sc->scc.dev = dev; h = acpi_get_handle(dev); s = acpi_GetInteger(h, "_UID", &sc->uid); if (ACPI_FAILURE(s)) { device_printf(dev, "Cannot get '_UID' property: %zd\n", s); return (ENXIO); } s = device_get_property(dev, "phy-channel", &sc->phy_channel, sizeof(sc->phy_channel), DEVICE_PROP_UINT64); if (s != -1) sc->scc.phy = sc->phy_channel; else sc->scc.phy = -1; s = device_get_property(dev, "compatible", sc->compatible, sizeof(sc->compatible), DEVICE_PROP_ANY); if (bootverbose) device_printf(dev, "UID %#04x phy-channel %ju compatible '%s' phy %u\n", sc->uid, sc->phy_channel, sc->compatible[0] != '\0' ? sc->compatible : "", sc->scc.phy); if (sc->scc.phy == -1) return (ENXIO); return (0); } static device_method_t memacphy_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, memacphy_acpi_probe), DEVMETHOD(device_attach, memacphy_acpi_attach), DEVMETHOD(device_detach, bus_generic_detach), /* MII interface */ DEVMETHOD(miibus_readreg, memacphy_miibus_readreg), DEVMETHOD(miibus_writereg, memacphy_miibus_writereg), DEVMETHOD(miibus_statchg, memacphy_acpi_miibus_statchg), /* memac */ DEVMETHOD(memac_mdio_set_ni_dev, memacphy_acpi_set_ni_dev), DEVMETHOD(memac_mdio_get_phy_loc, memacphy_acpi_get_phy_loc), DEVMETHOD_END }; DEFINE_CLASS_0(memacphy_acpi, memacphy_acpi_driver, memacphy_acpi_methods, sizeof(struct memacphy_softc_acpi)); EARLY_DRIVER_MODULE(memacphy_acpi, memac_mdio_acpi, memacphy_acpi_driver, 0, 0, BUS_PASS_SUPPORTDEV); DRIVER_MODULE(miibus, memacphy_acpi, miibus_driver, 0, 0); MODULE_DEPEND(memacphy_acpi, miibus, 1, 1, 1); /* -------------------------------------------------------------------------- */ struct memac_mdio_softc_acpi { struct memac_mdio_softc_common scc; }; static int memac_acpi_miibus_readreg(device_t dev, int phy, int reg) { struct memac_mdio_softc_acpi *sc; sc = device_get_softc(dev); return (memac_miibus_readreg(&sc->scc, phy, reg)); } static int memac_acpi_miibus_writereg(device_t dev, int phy, int reg, int data) { struct memac_mdio_softc_acpi *sc; sc = device_get_softc(dev); return (memac_miibus_writereg(&sc->scc, phy, reg, data)); } /* Context for walking PHY child devices. */ struct memac_mdio_walk_ctx { device_t dev; int count; int countok; }; static char *memac_mdio_ids[] = { "NXP0006", NULL }; static int memac_mdio_acpi_probe(device_t dev) { int rc; if (acpi_disabled("fsl_memac_mdio")) return (ENXIO); rc = ACPI_ID_PROBE(device_get_parent(dev), dev, memac_mdio_ids, NULL); if (rc <= 0) device_set_desc(dev, "Freescale XGMAC MDIO Bus"); return (rc); } static ACPI_STATUS memac_mdio_acpi_probe_child(ACPI_HANDLE h, device_t *dev, int level, void *arg) { struct memac_mdio_walk_ctx *ctx; struct acpi_device *ad; device_t child; uint32_t adr; ctx = (struct memac_mdio_walk_ctx *)arg; ctx->count++; if (ACPI_FAILURE(acpi_GetInteger(h, "_ADR", &adr))) return (AE_OK); /* Technically M_ACPIDEV */ if ((ad = malloc(sizeof(*ad), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (AE_OK); child = device_add_child(ctx->dev, "memacphy_acpi", -1); if (child == NULL) { free(ad, M_DEVBUF); return (AE_OK); } ad->ad_handle = h; ad->ad_cls_class = 0xffffff; resource_list_init(&ad->ad_rl); device_set_ivars(child, ad); *dev = child; ctx->countok++; return (AE_OK); } static int memac_mdio_acpi_attach(device_t dev) { struct memac_mdio_softc_acpi *sc; struct memac_mdio_walk_ctx ctx; int error; sc = device_get_softc(dev); sc->scc.dev = dev; error = memac_mdio_generic_attach(&sc->scc); if (error != 0) return (error); ctx.dev = dev; ctx.count = 0; ctx.countok = 0; ACPI_SCAN_CHILDREN(device_get_parent(dev), dev, 1, memac_mdio_acpi_probe_child, &ctx); if (ctx.countok > 0) { - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); } return (0); } static int memac_mdio_acpi_detach(device_t dev) { struct memac_mdio_softc_acpi *sc; sc = device_get_softc(dev); return (memac_mdio_generic_detach(&sc->scc)); } static device_method_t memac_mdio_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, memac_mdio_acpi_probe), DEVMETHOD(device_attach, memac_mdio_acpi_attach), DEVMETHOD(device_detach, memac_mdio_acpi_detach), /* MII interface */ DEVMETHOD(miibus_readreg, memac_acpi_miibus_readreg), DEVMETHOD(miibus_writereg, memac_acpi_miibus_writereg), /* .. */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, memac_mdio_read_ivar), DEVMETHOD(bus_get_property, memac_mdio_get_property), DEVMETHOD_END }; DEFINE_CLASS_0(memac_mdio_acpi, memac_mdio_acpi_driver, memac_mdio_acpi_methods, sizeof(struct memac_mdio_softc_acpi)); EARLY_DRIVER_MODULE(memac_mdio_acpi, acpi, memac_mdio_acpi_driver, 0, 0, BUS_PASS_SUPPORTDEV); DRIVER_MODULE(miibus, memac_mdio_acpi, miibus_driver, 0, 0); MODULE_DEPEND(memac_mdio_acpi, miibus, 1, 1, 1); MODULE_VERSION(memac_mdio_acpi, 1); diff --git a/sys/dev/dpaa2/memac_mdio_fdt.c b/sys/dev/dpaa2/memac_mdio_fdt.c index 18643522ed8a..247a70c6545c 100644 --- a/sys/dev/dpaa2/memac_mdio_fdt.c +++ b/sys/dev/dpaa2/memac_mdio_fdt.c @@ -1,305 +1,305 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Bjoern A. Zeeb * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "memac_mdio.h" #include "memac_mdio_if.h" #include "ofw_bus_if.h" #include "miibus_if.h" /* -------------------------------------------------------------------------- */ struct memacphy_softc_fdt { struct memacphy_softc_common scc; uint32_t reg; phandle_t xref; }; static void memacphy_fdt_miibus_statchg(device_t dev) { struct memacphy_softc_fdt *sc; sc = device_get_softc(dev); memacphy_miibus_statchg(&sc->scc); } static int memacphy_fdt_set_ni_dev(device_t dev, device_t nidev) { struct memacphy_softc_fdt *sc; sc = device_get_softc(dev); return (memacphy_set_ni_dev(&sc->scc, nidev)); } static int memacphy_fdt_get_phy_loc(device_t dev, int *phy_loc) { struct memacphy_softc_fdt *sc; sc = device_get_softc(dev); return (memacphy_get_phy_loc(&sc->scc, phy_loc)); } static int memacphy_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); device_set_desc(dev, "MEMAC PHY (fdt)"); return (BUS_PROBE_DEFAULT); } static int memacphy_fdt_attach(device_t dev) { struct memacphy_softc_fdt *sc; phandle_t node; ssize_t s; int error; sc = device_get_softc(dev); sc->scc.dev = dev; node = ofw_bus_get_node(dev); s = device_get_property(dev, "reg", &sc->reg, sizeof(sc->reg), DEVICE_PROP_UINT32); if (s != -1) sc->scc.phy = sc->reg; else sc->scc.phy = -1; sc->xref = OF_xref_from_node(node); error = OF_device_register_xref(sc->xref, dev); if (error != 0) device_printf(dev, "Failed to register xref %#x\n", sc->xref); if (bootverbose) device_printf(dev, "node %#x '%s': reg %#x xref %#x phy %u\n", node, ofw_bus_get_name(dev), sc->reg, sc->xref, sc->scc.phy); if (sc->scc.phy == -1) error = ENXIO; return (error); } static device_method_t memacphy_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, memacphy_fdt_probe), DEVMETHOD(device_attach, memacphy_fdt_attach), DEVMETHOD(device_detach, bus_generic_detach), /* MII interface */ DEVMETHOD(miibus_readreg, memacphy_miibus_readreg), DEVMETHOD(miibus_writereg, memacphy_miibus_writereg), DEVMETHOD(miibus_statchg, memacphy_fdt_miibus_statchg), /* memac */ DEVMETHOD(memac_mdio_set_ni_dev, memacphy_fdt_set_ni_dev), DEVMETHOD(memac_mdio_get_phy_loc, memacphy_fdt_get_phy_loc), DEVMETHOD_END }; DEFINE_CLASS_0(memacphy_fdt, memacphy_fdt_driver, memacphy_fdt_methods, sizeof(struct memacphy_softc_fdt)); EARLY_DRIVER_MODULE(memacphy_fdt, memac_mdio_fdt, memacphy_fdt_driver, 0, 0, BUS_PASS_SUPPORTDEV); DRIVER_MODULE(miibus, memacphy_fdt, miibus_driver, 0, 0); MODULE_DEPEND(memacphy_fdt, miibus, 1, 1, 1); /* -------------------------------------------------------------------------- */ /* * Order in this softc is important; memac_mdio_fdt_attach() calls * simplebus_init() which expects sb_sc at the beginning. */ struct memac_mdio_softc_fdt { struct simplebus_softc sb_sc; /* Must stay first. */ struct memac_mdio_softc_common scc; }; static int memac_fdt_miibus_readreg(device_t dev, int phy, int reg) { struct memac_mdio_softc_fdt *sc; sc = device_get_softc(dev); return (memac_miibus_readreg(&sc->scc, phy, reg)); } static int memac_fdt_miibus_writereg(device_t dev, int phy, int reg, int data) { struct memac_mdio_softc_fdt *sc; sc = device_get_softc(dev); return (memac_miibus_writereg(&sc->scc, phy, reg, data)); } static struct ofw_compat_data compat_data[] = { { "fsl,fman-memac-mdio", 1 }, { NULL, 0 } }; static int memac_mdio_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "Freescale XGMAC MDIO Bus (FDT)"); return (BUS_PROBE_DEFAULT); } static int memac_mdio_fdt_probe_child(device_t bus, phandle_t child) { device_t childdev; /* Make sure we do not aliready have a device. */ childdev = ofw_bus_find_child_device_by_phandle(bus, child); if (childdev != NULL) return (0); childdev = simplebus_add_device(bus, child, 0, NULL, -1, NULL); if (childdev == NULL) return (ENXIO); return (device_probe_and_attach(childdev)); } static int memac_mdio_fdt_attach(device_t dev) { struct memac_mdio_softc_fdt *sc; phandle_t node, child; int error; sc = device_get_softc(dev); sc->scc.dev = dev; error = memac_mdio_generic_attach(&sc->scc); if (error != 0) return (error); /* Attach the *phy* children represented in the device tree. */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); node = ofw_bus_get_node(dev); simplebus_init(dev, node); for (child = OF_child(node); child > 0; child = OF_peer(child)) { if (!OF_hasprop(child, "reg")) continue; if (memac_mdio_fdt_probe_child(dev, child) != 0) continue; } return (0); } static int memac_mdio_fdt_detach(device_t dev) { struct memac_mdio_softc_fdt *sc; sc = device_get_softc(dev); return (memac_mdio_generic_detach(&sc->scc)); } static const struct ofw_bus_devinfo * memac_simplebus_get_devinfo(device_t bus, device_t child) { return (OFW_BUS_GET_DEVINFO(device_get_parent(bus), child)); } static device_method_t memac_mdio_fdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, memac_mdio_fdt_probe), DEVMETHOD(device_attach, memac_mdio_fdt_attach), DEVMETHOD(device_detach, memac_mdio_fdt_detach), /* MII interface */ DEVMETHOD(miibus_readreg, memac_fdt_miibus_readreg), DEVMETHOD(miibus_writereg, memac_fdt_miibus_writereg), /* OFW/simplebus */ DEVMETHOD(ofw_bus_get_devinfo, memac_simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, memac_mdio_read_ivar), DEVMETHOD(bus_get_property, memac_mdio_get_property), DEVMETHOD_END }; DEFINE_CLASS_0(memac_mdio_fdt, memac_mdio_fdt_driver, memac_mdio_fdt_methods, sizeof(struct memac_mdio_softc_fdt)); EARLY_DRIVER_MODULE(memac_mdio_fdt, simplebus, memac_mdio_fdt_driver, 0, 0, BUS_PASS_SUPPORTDEV); DRIVER_MODULE(miibus, memac_mdio_fdt, miibus_driver, 0, 0); MODULE_DEPEND(memac_mdio_fdt, miibus, 1, 1, 1); MODULE_VERSION(memac_mdio_fdt, 1); diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_main.c b/sys/dev/etherswitch/ar40xx/ar40xx_main.c index 757dbe23071e..979d5bb39444 100644 --- a/sys/dev/etherswitch/ar40xx/ar40xx_main.c +++ b/sys/dev/etherswitch/ar40xx/ar40xx_main.c @@ -1,967 +1,967 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Adrian Chadd . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" static struct ofw_compat_data compat_data[] = { { "qcom,ess-switch", 1 }, { NULL, 0 }, }; static int ar40xx_probe(device_t dev) { if (! ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "IPQ4018 ESS Switch fabric / PSGMII PHY"); return (BUS_PROBE_DEFAULT); } static void ar40xx_tick(void *arg) { struct ar40xx_softc *sc = arg; (void) ar40xx_phy_tick(sc); callout_reset(&sc->sc_phy_callout, hz, ar40xx_tick, sc); } static void ar40xx_statchg(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_DPRINTF(sc, AR40XX_DBG_PORT_STATUS, "%s\n", __func__); } static int ar40xx_readphy(device_t dev, int phy, int reg) { struct ar40xx_softc *sc = device_get_softc(dev); return MDIO_READREG(sc->sc_mdio_dev, phy, reg); } static int ar40xx_writephy(device_t dev, int phy, int reg, int val) { struct ar40xx_softc *sc = device_get_softc(dev); return MDIO_WRITEREG(sc->sc_mdio_dev, phy, reg, val); } /* * Do the initial switch configuration. */ static int ar40xx_reset_switch(struct ar40xx_softc *sc) { int ret, i; AR40XX_DPRINTF(sc, AR40XX_DBG_HW_INIT, "%s: called\n", __func__); /* blank the VLAN config */ memset(&sc->sc_vlan, 0, sizeof(sc->sc_vlan)); /* initial vlan port mapping */ for (i = 0; i < AR40XX_NUM_VTU_ENTRIES; i++) sc->sc_vlan.vlan_id[i] = 0; /* init vlan config */ ret = ar40xx_hw_vlan_init(sc); /* init monitor config */ sc->sc_monitor.mirror_tx = false; sc->sc_monitor.mirror_rx = false; sc->sc_monitor.source_port = 0; sc->sc_monitor.monitor_port = 0; /* apply switch config */ ret = ar40xx_hw_sw_hw_apply(sc); return (ret); } static int ar40xx_sysctl_dump_port_state(SYSCTL_HANDLER_ARGS) { struct ar40xx_softc *sc = arg1; int val = 0; int error; int i; (void) i; (void) sc; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); if (val < 0 || val > 5) { return (EINVAL); } AR40XX_LOCK(sc); device_printf(sc->sc_dev, "port %d: PORT_STATUS=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_STATUS(val))); device_printf(sc->sc_dev, "port %d: PORT_HEADER=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_HEADER(val))); device_printf(sc->sc_dev, "port %d: PORT_VLAN0=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_VLAN0(val))); device_printf(sc->sc_dev, "port %d: PORT_VLAN1=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_VLAN1(val))); device_printf(sc->sc_dev, "port %d: PORT_LOOKUP=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_LOOKUP(val))); device_printf(sc->sc_dev, "port %d: PORT_HOL_CTRL1=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_HOL_CTRL1(val))); device_printf(sc->sc_dev, "port %d: PORT_FLOWCTRL_THRESH=0x%08x\n", val, AR40XX_REG_READ(sc, AR40XX_REG_PORT_FLOWCTRL_THRESH(val))); AR40XX_UNLOCK(sc); return (0); } static int ar40xx_sysctl_dump_port_mibstats(SYSCTL_HANDLER_ARGS) { struct ar40xx_softc *sc = arg1; int val = 0; int error; int i; (void) i; (void) sc; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return (error); if (val < 0 || val > 5) { return (EINVAL); } AR40XX_LOCK(sc); /* Yes, this snapshots all ports */ (void) ar40xx_hw_mib_capture(sc); (void) ar40xx_hw_mib_fetch(sc, val); AR40XX_UNLOCK(sc); return (0); } static int ar40xx_sysctl_attach(struct ar40xx_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debugging flags"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_state", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ar40xx_sysctl_dump_port_state, "I", ""); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "port_mibstats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ar40xx_sysctl_dump_port_mibstats, "I", ""); return (0); } static int ar40xx_detach(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); int i; device_printf(sc->sc_dev, "%s: called\n", __func__); callout_drain(&sc->sc_phy_callout); /* Free PHYs */ for (i = 0; i < AR40XX_NUM_PHYS; i++) { if (sc->sc_phys.miibus[i] != NULL) device_delete_child(dev, sc->sc_phys.miibus[i]); if (sc->sc_phys.ifp[i] != NULL) if_free(sc->sc_phys.ifp[i]); free(sc->sc_phys.ifname[i], M_DEVBUF); } bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } static int ar40xx_attach(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); phandle_t psgmii_p, root_p, mdio_p; int ret, i; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ar40xx_switch", NULL, MTX_DEF); psgmii_p = OF_finddevice("/soc/ess-psgmii"); if (psgmii_p == -1) { device_printf(dev, "%s: couldn't find /soc/ess-psgmii DT node\n", __func__); goto error; } /* * Get the ipq4019-mdio node here, to talk to our local PHYs * if needed */ root_p = OF_finddevice("/soc"); mdio_p = ofw_bus_find_compatible(root_p, "qcom,ipq4019-mdio"); if (mdio_p == -1) { device_printf(dev, "%s: couldn't find ipq4019-mdio DT node\n", __func__); goto error; } sc->sc_mdio_phandle = mdio_p; sc->sc_mdio_dev = OF_device_from_xref(OF_xref_from_node(mdio_p)); if (sc->sc_mdio_dev == NULL) { device_printf(dev, "%s: couldn't get mdio device (mdio_p=%u)\n", __func__, mdio_p); goto error; } /* get psgmii base address from psgmii node */ ret = OF_decode_addr(psgmii_p, 0, &sc->sc_psgmii_mem_tag, &sc->sc_psgmii_mem_handle, &sc->sc_psgmii_mem_size); if (ret != 0) { device_printf(dev, "%s: couldn't map psgmii mem (%d)\n", __func__, ret); goto error; } /* get switch base address */ sc->sc_ess_mem_rid = 0; sc->sc_ess_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_ess_mem_rid, RF_ACTIVE); if (sc->sc_ess_mem_res == NULL) { device_printf(dev, "%s: failed to find memory resource\n", __func__); goto error; } sc->sc_ess_mem_size = (size_t) bus_get_resource_count(dev, SYS_RES_MEMORY, sc->sc_ess_mem_rid); if (sc->sc_ess_mem_size == 0) { device_printf(dev, "%s: failed to get device memory size\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_mac_mode", &sc->sc_config.switch_mac_mode, sizeof(sc->sc_config.switch_mac_mode)); if (ret < 0) { device_printf(dev, "%s: missing switch_mac_mode property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_cpu_bmp", &sc->sc_config.switch_cpu_bmp, sizeof(sc->sc_config.switch_cpu_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_cpu_bmp property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_lan_bmp", &sc->sc_config.switch_lan_bmp, sizeof(sc->sc_config.switch_lan_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_lan_bmp property\n", __func__); goto error; } ret = OF_getencprop(ofw_bus_get_node(dev), "switch_wan_bmp", &sc->sc_config.switch_wan_bmp, sizeof(sc->sc_config.switch_wan_bmp)); if (ret < 0) { device_printf(dev, "%s: missing switch_wan_bmp property\n", __func__); goto error; } ret = clk_get_by_ofw_name(dev, 0, "ess_clk", &sc->sc_ess_clk); if (ret != 0) { device_printf(dev, "%s: failed to find ess_clk (%d)\n", __func__, ret); goto error; } ret = clk_enable(sc->sc_ess_clk); if (ret != 0) { device_printf(dev, "%s: failed to enable clock (%d)\n", __func__, ret); goto error; } ret = hwreset_get_by_ofw_name(dev, 0, "ess_rst", &sc->sc_ess_rst); if (ret != 0) { device_printf(dev, "%s: failed to find ess_rst (%d)\n", __func__, ret); goto error; } /* * Ok, at this point we have enough resources to do an initial * reset and configuration. */ AR40XX_LOCK(sc); /* Initial PSGMII/RGMII port configuration */ ret = ar40xx_hw_psgmii_init_config(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to init PSGMII (%d)\n", ret); goto error_locked; } /* * ESS reset - this resets both the ethernet switch * AND the ethernet block. */ ret = ar40xx_hw_ess_reset(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to reset ESS block (%d)\n", ret); goto error_locked; } /* * Check the PHY IDs for each of the PHYs from 0..4; * this is useful to make sure that we can SEE the external * PHY(s). */ if (bootverbose) { ret = ar40xx_hw_phy_get_ids(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to check PHY IDs (%d)\n", ret); goto error_locked; } } /* * Do PSGMII PHY self-test; work-around issues. */ ret = ar40xx_hw_psgmii_self_test(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to do PSGMII self-test (%d)\n", ret); goto error_locked; } /* Return port config to runtime state */ ret = ar40xx_hw_psgmii_self_test_clean(sc); if (ret != 0) { device_printf(sc->sc_dev, "ERROR: failed to do PSGMII runtime config (%d)\n", ret); goto error_locked; } /* mac_mode_init */ ret = ar40xx_hw_psgmii_set_mac_mode(sc, sc->sc_config.switch_mac_mode); /* Initialise each hardware port */ for (i = 0; i < AR40XX_NUM_PORTS; i++) { ret = ar40xx_hw_port_init(sc, i); } /* initialise the global switch configuration */ ret = ar40xx_hw_init_globals(sc); /* reset the switch vlan/port learning config */ ret = ar40xx_reset_switch(sc); /* cpuport setup */ ret = ar40xx_hw_port_cpuport_setup(sc); AR40XX_UNLOCK(sc); #if 0 /* We may end up needing the QM workaround code here.. */ device_printf(dev, "%s: TODO: QM error check\n", __func__); #endif /* Attach PHYs */ ret = ar40xx_attach_phys(sc); - ret = bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); ret = bus_generic_attach(dev); /* Start timer */ callout_init_mtx(&sc->sc_phy_callout, &sc->sc_mtx, 0); /* * Setup the etherswitch info block. */ strlcpy(sc->sc_info.es_name, device_get_desc(dev), sizeof(sc->sc_info.es_name)); sc->sc_info.es_nports = AR40XX_NUM_PORTS; sc->sc_info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; /* XXX TODO: double-tag / 802.1ad */ sc->sc_info.es_nvlangroups = AR40XX_NUM_VTU_ENTRIES; /* * Fetch the initial port configuration. */ AR40XX_LOCK(sc); ar40xx_tick(sc); AR40XX_UNLOCK(sc); ar40xx_sysctl_attach(sc); return (0); error_locked: AR40XX_UNLOCK(sc); error: ar40xx_detach(dev); return (ENXIO); } static void ar40xx_lock(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_LOCK(sc); } static void ar40xx_unlock(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); AR40XX_LOCK_ASSERT(sc); AR40XX_UNLOCK(sc); } static etherswitch_info_t * ar40xx_getinfo(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); return (&sc->sc_info); } static int ar40xx_readreg(device_t dev, int addr) { struct ar40xx_softc *sc = device_get_softc(dev); if (addr >= sc->sc_ess_mem_size - 1) return (-1); AR40XX_REG_BARRIER_READ(sc); return AR40XX_REG_READ(sc, addr); } static int ar40xx_writereg(device_t dev, int addr, int value) { struct ar40xx_softc *sc = device_get_softc(dev); if (addr >= sc->sc_ess_mem_size - 1) return (-1); AR40XX_REG_WRITE(sc, addr, value); AR40XX_REG_BARRIER_WRITE(sc); return (0); } /* * Get the port configuration and status. */ static int ar40xx_getport(device_t dev, etherswitch_port_t *p) { struct ar40xx_softc *sc = device_get_softc(dev); struct mii_data *mii = NULL; struct ifmediareq *ifmr; int err; if (p->es_port < 0 || p->es_port > sc->sc_info.es_nports) return (ENXIO); AR40XX_LOCK(sc); /* Fetch the current VLAN configuration for this port */ /* PVID */ ar40xx_hw_get_port_pvid(sc, p->es_port, &p->es_pvid); /* * The VLAN egress aren't appropriate to the ports; * instead it's part of the VLAN group config. */ /* Get MII config */ mii = ar40xx_phy_miiforport(sc, p->es_port); AR40XX_UNLOCK(sc); if (p->es_port == 0) { /* CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { /* non-CPU port */ err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } /* * Set the port configuration and status. */ static int ar40xx_setport(device_t dev, etherswitch_port_t *p) { struct ar40xx_softc *sc = device_get_softc(dev); struct ifmedia *ifm; struct mii_data *mii; if_t ifp; int ret; if (p->es_port < 0 || p->es_port > sc->sc_info.es_nports) return (EINVAL); /* Port flags */ AR40XX_LOCK(sc); ret = ar40xx_hw_set_port_pvid(sc, p->es_port, p->es_pvid); if (ret != 0) { AR40XX_UNLOCK(sc); return (ret); } /* XXX TODO: tag strip/unstrip, double-tag, etc */ AR40XX_UNLOCK(sc); /* Don't change media config on CPU port */ if (p->es_port == 0) return (0); mii = ar40xx_phy_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = ar40xx_phy_ifpforport(sc, p->es_port); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); return (0); } /* * Get the current VLAN group (per-port, ISL, dot1q) configuration. * * For now the only supported operating mode is dot1q. */ static int ar40xx_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct ar40xx_softc *sc = device_get_softc(dev); int vid, ret; if (vg->es_vlangroup > sc->sc_info.es_nvlangroups) return (EINVAL); vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_fid = 0; AR40XX_LOCK(sc); /* Note: only supporting 802.1q VLAN config for now */ if (sc->sc_vlan.vlan != 1) { vg->es_member_ports = 0; vg->es_untagged_ports = 0; AR40XX_UNLOCK(sc); return (-1); } /* Get vlangroup mapping to VLAN id */ vid = sc->sc_vlan.vlan_id[vg->es_vlangroup]; if ((vid & ETHERSWITCH_VID_VALID) == 0) { /* Not an active vgroup; bail */ AR40XX_UNLOCK(sc); return (0); } vg->es_vid = vid; ret = ar40xx_hw_vtu_get_vlan(sc, vid, &vg->es_member_ports, &vg->es_untagged_ports); AR40XX_UNLOCK(sc); if (ret == 0) { vg->es_vid |= ETHERSWITCH_VID_VALID; } return (ret); } /* * Set the current VLAN group (per-port, ISL, dot1q) configuration. * * For now the only supported operating mode is dot1q. */ static int ar40xx_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct ar40xx_softc *sc = device_get_softc(dev); int err, vid; /* For now we only support 802.1q mode */ if (sc->sc_vlan.vlan == 0) return (EINVAL); AR40XX_LOCK(sc); vid = sc->sc_vlan.vlan_id[vg->es_vlangroup]; /* * If we have an 802.1q VID and it's different to the current one, * purge the current VTU entry. */ if ((vid != 0) && ((vid & ETHERSWITCH_VID_VALID) != 0) && ((vid & ETHERSWITCH_VID_MASK) != (vg->es_vid & ETHERSWITCH_VID_MASK))) { AR40XX_DPRINTF(sc, AR40XX_DBG_VTU_OP, "%s: purging VID %d first\n", __func__, vid); err = ar40xx_hw_vtu_flush(sc); if (err != 0) { AR40XX_UNLOCK(sc); return (err); } } /* Update VLAN ID */ vid = vg->es_vid & ETHERSWITCH_VID_MASK; sc->sc_vlan.vlan_id[vg->es_vlangroup] = vid; if (vid == 0) { /* Setting it to 0 disables the group */ AR40XX_UNLOCK(sc); return (0); } /* Add valid bit for this entry */ sc->sc_vlan.vlan_id[vg->es_vlangroup] = vid | ETHERSWITCH_VID_VALID; /* Update hardware */ err = ar40xx_hw_vtu_load_vlan(sc, vid, vg->es_member_ports, vg->es_untagged_ports); if (err != 0) { AR40XX_UNLOCK(sc); return (err); } /* Update the config for the given entry */ sc->sc_vlan.vlan_ports[vg->es_vlangroup] = vg->es_member_ports; sc->sc_vlan.vlan_untagged[vg->es_vlangroup] = vg->es_untagged_ports; AR40XX_UNLOCK(sc); return (0); } /* * Get the current configuration mode. */ static int ar40xx_getconf(device_t dev, etherswitch_conf_t *conf) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); /* Only support dot1q VLAN for now */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; /* Switch MAC address */ ret = ar40xx_hw_read_switch_mac_address(sc, &conf->switch_macaddr); if (ret == 0) conf->cmd |= ETHERSWITCH_CONF_SWITCH_MACADDR; AR40XX_UNLOCK(sc); return (0); } /* * Set the current configuration and do a switch reset. * * For now the only supported operating mode is dot1q, don't * allow it to be set to non-dot1q. */ static int ar40xx_setconf(device_t dev, etherswitch_conf_t *conf) { struct ar40xx_softc *sc = device_get_softc(dev); int ret = 0; if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { /* Only support dot1q VLAN for now */ if (conf->vlan_mode != ETHERSWITCH_VLAN_DOT1Q) return (EINVAL); } if (conf->cmd & ETHERSWITCH_CONF_SWITCH_MACADDR) { AR40XX_LOCK(sc); ret = ar40xx_hw_read_switch_mac_address(sc, &conf->switch_macaddr); AR40XX_UNLOCK(sc); } return (ret); } /* * Flush all ATU entries. */ static int ar40xx_atu_flush_all(device_t dev) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); ret = ar40xx_hw_atu_flush_all(sc); AR40XX_UNLOCK(sc); return (ret); } /* * Flush all ATU entries for the given port. */ static int ar40xx_atu_flush_port(device_t dev, int port) { struct ar40xx_softc *sc = device_get_softc(dev); int ret; AR40XX_LOCK(sc); ret = ar40xx_hw_atu_flush_port(sc, port); AR40XX_UNLOCK(sc); return (ret); } /* * Load the ATU table into local storage so it can be iterated * over. */ static int ar40xx_atu_fetch_table(device_t dev, etherswitch_atu_table_t *table) { struct ar40xx_softc *sc = device_get_softc(dev); int err, nitems; memset(&sc->atu.entries, 0, sizeof(sc->atu.entries)); table->es_nitems = 0; nitems = 0; AR40XX_LOCK(sc); sc->atu.count = 0; err = ar40xx_hw_atu_fetch_entry(sc, NULL, 0); if (err != 0) goto done; while (nitems < AR40XX_NUM_ATU_ENTRIES) { err = ar40xx_hw_atu_fetch_entry(sc, &sc->atu.entries[nitems], 1); if (err != 0) goto done; sc->atu.entries[nitems].id = nitems; nitems++; } done: sc->atu.count = nitems; table->es_nitems = nitems; AR40XX_UNLOCK(sc); return (0); } /* * Iterate over the ATU table entries that have been previously * fetched. */ static int ar40xx_atu_fetch_table_entry(device_t dev, etherswitch_atu_entry_t *e) { struct ar40xx_softc *sc = device_get_softc(dev); int id, err = 0; id = e->id; AR40XX_LOCK(sc); if (id > sc->atu.count) { err = ENOENT; goto done; } memcpy(e, &sc->atu.entries[id], sizeof(*e)); done: AR40XX_UNLOCK(sc); return (err); } static device_method_t ar40xx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar40xx_probe), DEVMETHOD(device_attach, ar40xx_attach), DEVMETHOD(device_detach, ar40xx_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ar40xx_readphy), DEVMETHOD(miibus_writereg, ar40xx_writephy), DEVMETHOD(miibus_statchg, ar40xx_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, ar40xx_readphy), DEVMETHOD(mdio_writereg, ar40xx_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ar40xx_lock), DEVMETHOD(etherswitch_unlock, ar40xx_unlock), DEVMETHOD(etherswitch_getinfo, ar40xx_getinfo), DEVMETHOD(etherswitch_readreg, ar40xx_readreg), DEVMETHOD(etherswitch_writereg, ar40xx_writereg), DEVMETHOD(etherswitch_readphyreg, ar40xx_readphy), DEVMETHOD(etherswitch_writephyreg, ar40xx_writephy), DEVMETHOD(etherswitch_getport, ar40xx_getport), DEVMETHOD(etherswitch_setport, ar40xx_setport), DEVMETHOD(etherswitch_getvgroup, ar40xx_getvgroup), DEVMETHOD(etherswitch_setvgroup, ar40xx_setvgroup), DEVMETHOD(etherswitch_getconf, ar40xx_getconf), DEVMETHOD(etherswitch_setconf, ar40xx_setconf), DEVMETHOD(etherswitch_flush_all, ar40xx_atu_flush_all), DEVMETHOD(etherswitch_flush_port, ar40xx_atu_flush_port), DEVMETHOD(etherswitch_fetch_table, ar40xx_atu_fetch_table), DEVMETHOD(etherswitch_fetch_table_entry, ar40xx_atu_fetch_table_entry), DEVMETHOD_END }; DEFINE_CLASS_0(ar40xx, ar40xx_driver, ar40xx_methods, sizeof(struct ar40xx_softc)); DRIVER_MODULE(ar40xx, simplebus, ar40xx_driver, 0, 0); DRIVER_MODULE(ar40xx, ofwbus, ar40xx_driver, 0, 0); DRIVER_MODULE(miibus, ar40xx, miibus_driver, 0, 0); DRIVER_MODULE(mdio, ar40xx, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, ar40xx, etherswitch_driver, 0, 0); MODULE_DEPEND(ar40xx, mdio, 1, 1, 1); MODULE_DEPEND(ar40xx, miibus, 1, 1, 1); MODULE_DEPEND(ar40xx, etherswitch, 1, 1, 1); MODULE_VERSION(ar40xx, 1); diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c index f1e6d1944038..395edf3322ae 100644 --- a/sys/dev/etherswitch/arswitch/arswitch.c +++ b/sys/dev/etherswitch/arswitch/arswitch.c @@ -1,1317 +1,1317 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" /* Map ETHERSWITCH_PORT_LED_* to Atheros pattern codes */ static int led_pattern_table[] = { [ETHERSWITCH_PORT_LED_DEFAULT] = 0x3, [ETHERSWITCH_PORT_LED_ON] = 0x2, [ETHERSWITCH_PORT_LED_OFF] = 0x0, [ETHERSWITCH_PORT_LED_BLINK] = 0x1 }; static inline int arswitch_portforphy(int phy); static void arswitch_tick(void *arg); static int arswitch_ifmedia_upd(if_t); static void arswitch_ifmedia_sts(if_t, struct ifmediareq *); static int ar8xxx_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p); static int ar8xxx_port_vlan_get(struct arswitch_softc *sc, etherswitch_port_t *p); static int arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style); static int arswitch_probe(device_t dev) { struct arswitch_softc *sc; uint32_t id; char *chipname; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->page = -1; /* AR8xxx probe */ id = arswitch_readreg(dev, AR8X16_REG_MASK_CTRL); sc->chip_rev = (id & AR8X16_MASK_CTRL_REV_MASK); sc->chip_ver = (id & AR8X16_MASK_CTRL_VER_MASK) >> AR8X16_MASK_CTRL_VER_SHIFT; switch (id & (AR8X16_MASK_CTRL_VER_MASK | AR8X16_MASK_CTRL_REV_MASK)) { case 0x0101: chipname = "AR8216"; sc->sc_switchtype = AR8X16_SWITCH_AR8216; break; case 0x0201: chipname = "AR8226"; sc->sc_switchtype = AR8X16_SWITCH_AR8226; break; /* 0x0301 - AR8236 */ case 0x1000: case 0x1001: chipname = "AR8316"; sc->sc_switchtype = AR8X16_SWITCH_AR8316; break; case 0x1202: case 0x1204: chipname = "AR8327"; sc->sc_switchtype = AR8X16_SWITCH_AR8327; sc->mii_lo_first = 1; break; default: chipname = NULL; } DPRINTF(sc, ARSWITCH_DBG_ANY, "chipname=%s, id=%08x\n", chipname, id); if (chipname != NULL) { device_set_descf(dev, "Atheros %s Ethernet Switch (ver %d rev %d)", chipname, sc->chip_ver, sc->chip_rev); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int arswitch_attach_phys(struct arswitch_softc *sc) { int phy, err = 0; char name[IFNAMSIZ]; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < sc->numphys; phy++) { sc->ifp[phy] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[phy], sc); if_setflagbits(sc->ifp[phy], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX, 0); sc->ifname[phy] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK); bcopy(name, sc->ifname[phy], strlen(name)+1); if_initname(sc->ifp[phy], sc->ifname[phy], arswitch_portforphy(phy)); err = mii_attach(sc->sc_dev, &sc->miibus[phy], sc->ifp[phy], arswitch_ifmedia_upd, arswitch_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); #if 0 DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(sc->miibus[phy]), sc->ifp[phy]->if_xname); #endif if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); return (err); } if (AR8X16_IS_SWITCH(sc, AR8327)) { int led; char ledname[IFNAMSIZ+4]; for (led = 0; led < 3; led++) { sprintf(ledname, "%s%dled%d", name, arswitch_portforphy(phy), led+1); sc->dev_led[phy][led].sc = sc; sc->dev_led[phy][led].phy = phy; sc->dev_led[phy][led].lednum = led; } } } return (0); } static int arswitch_reset(device_t dev) { arswitch_writereg(dev, AR8X16_REG_MASK_CTRL, AR8X16_MASK_CTRL_SOFT_RESET); DELAY(1000); if (arswitch_readreg(dev, AR8X16_REG_MASK_CTRL) & AR8X16_MASK_CTRL_SOFT_RESET) { device_printf(dev, "unable to reset switch\n"); return (-1); } return (0); } static int arswitch_set_vlan_mode(struct arswitch_softc *sc, uint32_t mode) { /* Check for invalid modes. */ if ((mode & sc->info.es_vlan_caps) != mode) return (EINVAL); switch (mode) { case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; break; case ETHERSWITCH_VLAN_PORT: sc->vlan_mode = ETHERSWITCH_VLAN_PORT; break; default: sc->vlan_mode = 0; } /* Reset VLANs. */ sc->hal.arswitch_vlan_init_hw(sc); return (0); } static void ar8xxx_port_init(struct arswitch_softc *sc, int port) { /* Port0 - CPU */ if (port == AR8X16_PORT_CPU) { arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_STS(0), (AR8X16_IS_SWITCH(sc, AR8216) ? AR8X16_PORT_STS_SPEED_100 : AR8X16_PORT_STS_SPEED_1000) | (AR8X16_IS_SWITCH(sc, AR8216) ? 0 : AR8X16_PORT_STS_RXFLOW) | (AR8X16_IS_SWITCH(sc, AR8216) ? 0 : AR8X16_PORT_STS_TXFLOW) | AR8X16_PORT_STS_RXMAC | AR8X16_PORT_STS_TXMAC | AR8X16_PORT_STS_DUPLEX); arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_CTRL(0), arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(0)) & ~AR8X16_PORT_CTRL_HEADER); } else { /* Set ports to auto negotiation. */ arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_STS(port), AR8X16_PORT_STS_LINK_AUTO); arswitch_writereg(sc->sc_dev, AR8X16_REG_PORT_CTRL(port), arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(port)) & ~AR8X16_PORT_CTRL_HEADER); } } static int ar8xxx_atu_wait_ready(struct arswitch_softc *sc) { int ret; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); ret = arswitch_waitreg(sc->sc_dev, AR8216_REG_ATU, AR8216_ATU_ACTIVE, 0, 1000); return (ret); } /* * Flush all ATU entries. */ static int ar8xxx_atu_flush(struct arswitch_softc *sc) { int ret; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: flushing all ports\n", __func__); ret = ar8xxx_atu_wait_ready(sc); if (ret) device_printf(sc->sc_dev, "%s: waitreg failed\n", __func__); if (!ret) arswitch_writereg(sc->sc_dev, AR8216_REG_ATU, AR8216_ATU_OP_FLUSH | AR8216_ATU_ACTIVE); return (ret); } /* * Flush ATU entries for a single port. */ static int ar8xxx_atu_flush_port(struct arswitch_softc *sc, int port) { int ret, val; DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: flushing port %d\n", __func__, port); ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); /* Flush unicast entries on port */ val = AR8216_ATU_OP_FLUSH_UNICAST; /* TODO: bit 4 indicates whether to flush dynamic (0) or static (1) */ /* Which port */ val |= SM(port, AR8216_ATU_PORT_NUM); ret = ar8xxx_atu_wait_ready(sc); if (ret) device_printf(sc->sc_dev, "%s: waitreg failed\n", __func__); if (!ret) arswitch_writereg(sc->sc_dev, AR8216_REG_ATU, val | AR8216_ATU_ACTIVE); return (ret); } /* * XXX TODO: flush a single MAC address. */ /* * Fetch a single entry from the ATU. */ static int ar8xxx_atu_fetch_table(struct arswitch_softc *sc, etherswitch_atu_entry_t *e, int atu_fetch_op) { uint32_t ret0, ret1, ret2, val; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); switch (atu_fetch_op) { case 0: /* Initialise things for the first fetch */ DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: initializing\n", __func__); (void) ar8xxx_atu_wait_ready(sc); arswitch_writereg(sc->sc_dev, AR8216_REG_ATU, AR8216_ATU_OP_GET_NEXT); arswitch_writereg(sc->sc_dev, AR8216_REG_ATU_DATA, 0); arswitch_writereg(sc->sc_dev, AR8216_REG_ATU_CTRL2, 0); return (0); case 1: DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: reading next\n", __func__); /* * Attempt to read the next address entry; don't modify what * is there in AT_ADDR{4,5} as its used for the next fetch */ (void) ar8xxx_atu_wait_ready(sc); /* Begin the next read event; not modifying anything */ val = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU); val |= AR8216_ATU_ACTIVE; arswitch_writereg(sc->sc_dev, AR8216_REG_ATU, val); /* Wait for it to complete */ (void) ar8xxx_atu_wait_ready(sc); /* Fetch the ethernet address and ATU status */ ret0 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU); ret1 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU_DATA); ret2 = arswitch_readreg(sc->sc_dev, AR8216_REG_ATU_CTRL2); /* If the status is zero, then we're done */ if (MS(ret2, AR8216_ATU_CTRL2_AT_STATUS) == 0) return (-1); /* MAC address */ e->es_macaddr[5] = MS(ret0, AR8216_ATU_ADDR5); e->es_macaddr[4] = MS(ret0, AR8216_ATU_ADDR4); e->es_macaddr[3] = MS(ret1, AR8216_ATU_ADDR3); e->es_macaddr[2] = MS(ret1, AR8216_ATU_ADDR2); e->es_macaddr[1] = MS(ret1, AR8216_ATU_ADDR1); e->es_macaddr[0] = MS(ret1, AR8216_ATU_ADDR0); /* Bitmask of ports this entry is for */ e->es_portmask = MS(ret2, AR8216_ATU_CTRL2_DESPORT); /* TODO: other flags that are interesting */ DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: MAC %6D portmask 0x%08x\n", __func__, e->es_macaddr, ":", e->es_portmask); return (0); default: return (-1); } return (-1); } /* * Configure aging register defaults. */ static int ar8xxx_atu_learn_default(struct arswitch_softc *sc) { int ret; uint32_t val; DPRINTF(sc, ARSWITCH_DBG_ATU, "%s: resetting learning\n", __func__); /* * For now, configure the aging defaults: * * + ARP_EN - enable "acknowledgement" of ARP frames - they are * forwarded to the CPU port * + LEARN_CHANGE_EN - hash table violations when learning MAC addresses * will force an entry to be expired/updated and a new one to be * programmed in. * + AGE_EN - enable address table aging * + AGE_TIME - set to 5 minutes */ val = 0; val |= AR8216_ATU_CTRL_ARP_EN; val |= AR8216_ATU_CTRL_LEARN_CHANGE; val |= AR8216_ATU_CTRL_AGE_EN; val |= 0x2b; /* 5 minutes; bits 15:0 */ ret = arswitch_writereg(sc->sc_dev, AR8216_REG_ATU_CTRL, val); if (ret) device_printf(sc->sc_dev, "%s: writereg failed\n", __func__); return (ret); } /* * XXX TODO: add another routine to configure the leaky behaviour * when unknown frames are received. These must be consistent * between ethernet switches. */ /* * Fetch the configured switch MAC address. */ static int ar8xxx_hw_get_switch_macaddr(struct arswitch_softc *sc, struct ether_addr *ea) { uint32_t ret0, ret1; char *s; s = (void *) ea; ret0 = arswitch_readreg(sc->sc_dev, AR8X16_REG_SW_MAC_ADDR0); ret1 = arswitch_readreg(sc->sc_dev, AR8X16_REG_SW_MAC_ADDR1); s[5] = MS(ret0, AR8X16_REG_SW_MAC_ADDR0_BYTE5); s[4] = MS(ret0, AR8X16_REG_SW_MAC_ADDR0_BYTE4); s[3] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE3); s[2] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE2); s[1] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE1); s[0] = MS(ret1, AR8X16_REG_SW_MAC_ADDR1_BYTE0); return (0); } /* * Set the switch mac address. */ static int ar8xxx_hw_set_switch_macaddr(struct arswitch_softc *sc, const struct ether_addr *ea) { return (ENXIO); } /* * XXX TODO: this attach routine does NOT free all memory, resources * upon failure! */ static int arswitch_attach(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; int err = 0; int port; /* sc->sc_switchtype is already decided in arswitch_probe() */ sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "arswitch", NULL, MTX_DEF); sc->page = -1; strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* Debugging */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); /* Allocate a 128 entry ATU table; hopefully its big enough! */ /* XXX TODO: make this per chip */ sc->atu.entries = malloc(sizeof(etherswitch_atu_entry_t) * 128, M_DEVBUF, M_NOWAIT); if (sc->atu.entries == NULL) { device_printf(sc->sc_dev, "%s: failed to allocate ATU table\n", __func__); return (ENXIO); } sc->atu.count = 0; sc->atu.size = 128; /* Default HAL methods */ sc->hal.arswitch_port_init = ar8xxx_port_init; sc->hal.arswitch_port_vlan_setup = ar8xxx_port_vlan_setup; sc->hal.arswitch_port_vlan_get = ar8xxx_port_vlan_get; sc->hal.arswitch_vlan_init_hw = ar8xxx_reset_vlans; sc->hal.arswitch_hw_get_switch_macaddr = ar8xxx_hw_get_switch_macaddr; sc->hal.arswitch_hw_set_switch_macaddr = ar8xxx_hw_set_switch_macaddr; sc->hal.arswitch_vlan_getvgroup = ar8xxx_getvgroup; sc->hal.arswitch_vlan_setvgroup = ar8xxx_setvgroup; sc->hal.arswitch_vlan_get_pvid = ar8xxx_get_pvid; sc->hal.arswitch_vlan_set_pvid = ar8xxx_set_pvid; sc->hal.arswitch_get_dot1q_vlan = ar8xxx_get_dot1q_vlan; sc->hal.arswitch_set_dot1q_vlan = ar8xxx_set_dot1q_vlan; sc->hal.arswitch_flush_dot1q_vlan = ar8xxx_flush_dot1q_vlan; sc->hal.arswitch_purge_dot1q_vlan = ar8xxx_purge_dot1q_vlan; sc->hal.arswitch_get_port_vlan = ar8xxx_get_port_vlan; sc->hal.arswitch_set_port_vlan = ar8xxx_set_port_vlan; sc->hal.arswitch_atu_flush = ar8xxx_atu_flush; sc->hal.arswitch_atu_flush_port = ar8xxx_atu_flush_port; sc->hal.arswitch_atu_learn_default = ar8xxx_atu_learn_default; sc->hal.arswitch_atu_fetch_table = ar8xxx_atu_fetch_table; sc->hal.arswitch_phy_read = arswitch_readphy_internal; sc->hal.arswitch_phy_write = arswitch_writephy_internal; /* * Attach switch related functions */ if (AR8X16_IS_SWITCH(sc, AR8216)) ar8216_attach(sc); else if (AR8X16_IS_SWITCH(sc, AR8226)) ar8226_attach(sc); else if (AR8X16_IS_SWITCH(sc, AR8316)) ar8316_attach(sc); else if (AR8X16_IS_SWITCH(sc, AR8327)) ar8327_attach(sc); else { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: unknown switch (%d)?\n", __func__, sc->sc_switchtype); return (ENXIO); } /* Common defaults. */ sc->info.es_nports = 5; /* XXX technically 6, but 6th not used */ /* XXX Defaults for externally connected AR8316 */ sc->numphys = 4; sc->phy4cpu = 1; sc->is_rgmii = 1; sc->is_gmii = 0; sc->is_mii = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "numphys", &sc->numphys); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phy4cpu", &sc->phy4cpu); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "is_rgmii", &sc->is_rgmii); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "is_gmii", &sc->is_gmii); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "is_mii", &sc->is_mii); if (sc->numphys > AR8X16_NUM_PHYS) sc->numphys = AR8X16_NUM_PHYS; /* Reset the switch. */ if (arswitch_reset(dev)) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: arswitch_reset: failed\n", __func__); return (ENXIO); } err = sc->hal.arswitch_hw_setup(sc); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: hw_setup: err=%d\n", __func__, err); return (err); } err = sc->hal.arswitch_hw_global_setup(sc); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: hw_global_setup: err=%d\n", __func__, err); return (err); } /* * Configure the default address table learning parameters for this * switch. */ err = sc->hal.arswitch_atu_learn_default(sc); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: atu_learn_default: err=%d\n", __func__, err); return (err); } /* Initialize the switch ports. */ for (port = 0; port <= sc->numphys; port++) { sc->hal.arswitch_port_init(sc, port); } /* * Attach the PHYs and complete the bus enumeration. */ err = arswitch_attach_phys(sc); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: attach_phys: err=%d\n", __func__, err); return (err); } /* Default to ingress filters off. */ err = arswitch_set_vlan_mode(sc, 0); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: set_vlan_mode: err=%d\n", __func__, err); return (err); } - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) { DPRINTF(sc, ARSWITCH_DBG_ANY, "%s: bus_generic_attach: err=%d\n", __func__, err); return (err); } callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0); ARSWITCH_LOCK(sc); arswitch_tick(sc); ARSWITCH_UNLOCK(sc); return (err); } static int arswitch_detach(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); int i; callout_drain(&sc->callout_tick); for (i=0; i < sc->numphys; i++) { if (sc->miibus[i] != NULL) device_delete_child(dev, sc->miibus[i]); if (sc->ifp[i] != NULL) if_free(sc->ifp[i]); free(sc->ifname[i], M_DEVBUF); } free(sc->atu.entries, M_DEVBUF); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* * Convert PHY number to port number. PHY0 is connected to port 1, PHY1 to * port 2, etc. */ static inline int arswitch_portforphy(int phy) { return (phy+1); } static inline struct mii_data * arswitch_miiforport(struct arswitch_softc *sc, int port) { int phy = port-1; if (phy < 0 || phy >= sc->numphys) return (NULL); return (device_get_softc(sc->miibus[phy])); } static inline if_t arswitch_ifpforport(struct arswitch_softc *sc, int port) { int phy = port-1; if (phy < 0 || phy >= sc->numphys) return (NULL); return (sc->ifp[phy]); } /* * Convert port status to ifmedia. */ static void arswitch_update_ifmedia(int portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & AR8X16_PORT_STS_LINK_UP) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & AR8X16_PORT_STS_SPEED_MASK) { case AR8X16_PORT_STS_SPEED_10: *media_active |= IFM_10_T; break; case AR8X16_PORT_STS_SPEED_100: *media_active |= IFM_100_TX; break; case AR8X16_PORT_STS_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & AR8X16_PORT_STS_DUPLEX) == 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; if ((portstatus & AR8X16_PORT_STS_TXFLOW) != 0) *media_active |= IFM_ETH_TXPAUSE; if ((portstatus & AR8X16_PORT_STS_RXFLOW) != 0) *media_active |= IFM_ETH_RXPAUSE; } /* * Poll the status for all PHYs. We're using the switch port status because * thats a lot quicker to read than talking to all the PHYs. Care must be * taken that the resulting ifmedia_active is identical to what the PHY will * compute, or gratuitous link status changes will occur whenever the PHYs * update function is called. */ static void arswitch_miipollstat(struct arswitch_softc *sc) { int i; struct mii_data *mii; struct mii_softc *miisc; int portstatus; int port_flap = 0; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); for (i = 0; i < sc->numphys; i++) { if (sc->miibus[i] == NULL) continue; mii = device_get_softc(sc->miibus[i]); /* XXX This would be nice to have abstracted out to be per-chip */ /* AR8327/AR8337 has a different register base */ if (AR8X16_IS_SWITCH(sc, AR8327)) portstatus = arswitch_readreg(sc->sc_dev, AR8327_REG_PORT_STATUS(arswitch_portforphy(i))); else portstatus = arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_STS(arswitch_portforphy(i))); #if 1 DPRINTF(sc, ARSWITCH_DBG_POLL, "p[%d]=0x%08x (%b)\n", i, portstatus, portstatus, "\20\3TXMAC\4RXMAC\5TXFLOW\6RXFLOW\7" "DUPLEX\11LINK_UP\12LINK_AUTO\13LINK_PAUSE"); #endif /* * If the current status is down, but we have a link * status showing up, we need to do an ATU flush. */ if ((mii->mii_media_status & IFM_ACTIVE) == 0 && (portstatus & AR8X16_PORT_STS_LINK_UP) != 0) { device_printf(sc->sc_dev, "%s: port %d: port -> UP\n", __func__, i); port_flap = 1; } /* * and maybe if a port goes up->down? */ if ((mii->mii_media_status & IFM_ACTIVE) != 0 && (portstatus & AR8X16_PORT_STS_LINK_UP) == 0) { device_printf(sc->sc_dev, "%s: port %d: port -> DOWN\n", __func__, i); port_flap = 1; } arswitch_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } /* If a port went from down->up, flush the ATU */ if (port_flap) sc->hal.arswitch_atu_flush(sc); } static void arswitch_tick(void *arg) { struct arswitch_softc *sc = arg; arswitch_miipollstat(sc); callout_reset(&sc->callout_tick, hz, arswitch_tick, sc); } static void arswitch_lock(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); ARSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); ARSWITCH_LOCK(sc); } static void arswitch_unlock(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); ARSWITCH_UNLOCK(sc); } static etherswitch_info_t * arswitch_getinfo(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); return (&sc->info); } static int ar8xxx_port_vlan_get(struct arswitch_softc *sc, etherswitch_port_t *p) { uint32_t reg; ARSWITCH_LOCK(sc); /* Retrieve the PVID. */ sc->hal.arswitch_vlan_get_pvid(sc, p->es_port, &p->es_pvid); /* Port flags. */ reg = arswitch_readreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(p->es_port)); if (reg & AR8X16_PORT_CTRL_DOUBLE_TAG) p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG; reg >>= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT; if ((reg & 0x3) == AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_ADD) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; if ((reg & 0x3) == AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_STRIP) p->es_flags |= ETHERSWITCH_PORT_STRIPTAG; ARSWITCH_UNLOCK(sc); return (0); } static int arswitch_is_cpuport(struct arswitch_softc *sc, int port) { return ((port == AR8X16_PORT_CPU) || ((AR8X16_IS_SWITCH(sc, AR8327) && port == AR8327_PORT_GMAC6))); } static int arswitch_getport(device_t dev, etherswitch_port_t *p) { struct arswitch_softc *sc; struct mii_data *mii; struct ifmediareq *ifmr; int err; sc = device_get_softc(dev); /* XXX +1 is for AR8327; should make this configurable! */ if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); err = sc->hal.arswitch_port_vlan_get(sc, p); if (err != 0) return (err); mii = arswitch_miiforport(sc, p->es_port); if (arswitch_is_cpuport(sc, p->es_port)) { /* fill in fixed values for CPU port */ /* XXX is this valid in all cases? */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } if (!arswitch_is_cpuport(sc, p->es_port) && AR8X16_IS_SWITCH(sc, AR8327)) { int led; p->es_nleds = 3; for (led = 0; led < p->es_nleds; led++) { int style; uint32_t val; /* Find the right style enum for our pattern */ val = arswitch_readreg(dev, ar8327_led_mapping[p->es_port-1][led].reg); val = (val>>ar8327_led_mapping[p->es_port-1][led].shift)&0x03; for (style = 0; style < ETHERSWITCH_PORT_LED_MAX; style++) { if (led_pattern_table[style] == val) break; } /* can't happen */ if (style == ETHERSWITCH_PORT_LED_MAX) style = ETHERSWITCH_PORT_LED_DEFAULT; p->es_led[led] = style; } } else { p->es_nleds = 0; } return (0); } static int ar8xxx_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p) { uint32_t reg; int err; ARSWITCH_LOCK(sc); /* Set the PVID. */ if (p->es_pvid != 0) sc->hal.arswitch_vlan_set_pvid(sc, p->es_port, p->es_pvid); /* Mutually exclusive. */ if (p->es_flags & ETHERSWITCH_PORT_ADDTAG && p->es_flags & ETHERSWITCH_PORT_STRIPTAG) { ARSWITCH_UNLOCK(sc); return (EINVAL); } reg = 0; if (p->es_flags & ETHERSWITCH_PORT_DOUBLE_TAG) reg |= AR8X16_PORT_CTRL_DOUBLE_TAG; if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) reg |= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_ADD << AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT; if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG) reg |= AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_STRIP << AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT; err = arswitch_modifyreg(sc->sc_dev, AR8X16_REG_PORT_CTRL(p->es_port), 0x3 << AR8X16_PORT_CTRL_EGRESS_VLAN_MODE_SHIFT | AR8X16_PORT_CTRL_DOUBLE_TAG, reg); ARSWITCH_UNLOCK(sc); return (err); } static int arswitch_setport(device_t dev, etherswitch_port_t *p) { int err, i; struct arswitch_softc *sc; struct ifmedia *ifm; struct mii_data *mii; if_t ifp; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); /* Port flags. */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { err = sc->hal.arswitch_port_vlan_setup(sc, p); if (err) return (err); } /* Do not allow media or led changes on CPU port. */ if (arswitch_is_cpuport(sc, p->es_port)) return (0); if (AR8X16_IS_SWITCH(sc, AR8327)) { for (i = 0; i < 3; i++) { int err; err = arswitch_setled(sc, p->es_port-1, i, p->es_led[i]); if (err) return (err); } } mii = arswitch_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = arswitch_ifpforport(sc, p->es_port); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static int arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style) { int shift; int err; if (phy < 0 || phy > sc->numphys) return EINVAL; if (style < 0 || style > ETHERSWITCH_PORT_LED_MAX) return (EINVAL); ARSWITCH_LOCK(sc); shift = ar8327_led_mapping[phy][led].shift; err = (arswitch_modifyreg(sc->sc_dev, ar8327_led_mapping[phy][led].reg, 0x03 << shift, led_pattern_table[style] << shift)); ARSWITCH_UNLOCK(sc); return (err); } static void arswitch_statchg(device_t dev) { struct arswitch_softc *sc = device_get_softc(dev); DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__); } static int arswitch_ifmedia_upd(if_t ifp) { struct arswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = arswitch_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void arswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct arswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = arswitch_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int arswitch_getconf(device_t dev, etherswitch_conf_t *conf) { struct arswitch_softc *sc; int ret; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; /* Return the switch ethernet address. */ ret = sc->hal.arswitch_hw_get_switch_macaddr(sc, &conf->switch_macaddr); if (ret == 0) { conf->cmd |= ETHERSWITCH_CONF_SWITCH_MACADDR; } return (0); } static int arswitch_setconf(device_t dev, etherswitch_conf_t *conf) { struct arswitch_softc *sc; int err; sc = device_get_softc(dev); /* Set the VLAN mode. */ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { err = arswitch_set_vlan_mode(sc, conf->vlan_mode); if (err != 0) return (err); } /* TODO: Set the switch ethernet address. */ return (0); } static int arswitch_atu_flush_all(device_t dev) { struct arswitch_softc *sc; int err; sc = device_get_softc(dev); ARSWITCH_LOCK(sc); err = sc->hal.arswitch_atu_flush(sc); /* Invalidate cached ATU */ sc->atu.count = 0; ARSWITCH_UNLOCK(sc); return (err); } static int arswitch_atu_flush_port(device_t dev, int port) { struct arswitch_softc *sc; int err; sc = device_get_softc(dev); ARSWITCH_LOCK(sc); err = sc->hal.arswitch_atu_flush_port(sc, port); /* Invalidate cached ATU */ sc->atu.count = 0; ARSWITCH_UNLOCK(sc); return (err); } static int arswitch_atu_fetch_table(device_t dev, etherswitch_atu_table_t *table) { struct arswitch_softc *sc; int err, nitems; sc = device_get_softc(dev); ARSWITCH_LOCK(sc); /* Initial setup */ nitems = 0; err = sc->hal.arswitch_atu_fetch_table(sc, NULL, 0); /* fetch - ideally yes we'd fetch into a separate table then switch */ while (err == 0 && nitems < sc->atu.size) { err = sc->hal.arswitch_atu_fetch_table(sc, &sc->atu.entries[nitems], 1); if (err == 0) { sc->atu.entries[nitems].id = nitems; nitems++; } } sc->atu.count = nitems; ARSWITCH_UNLOCK(sc); table->es_nitems = nitems; return (0); } static int arswitch_atu_fetch_table_entry(device_t dev, etherswitch_atu_entry_t *e) { struct arswitch_softc *sc; int id; sc = device_get_softc(dev); id = e->id; ARSWITCH_LOCK(sc); if (id > sc->atu.count) { ARSWITCH_UNLOCK(sc); return (ENOENT); } memcpy(e, &sc->atu.entries[id], sizeof(*e)); ARSWITCH_UNLOCK(sc); return (0); } static int arswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *e) { struct arswitch_softc *sc = device_get_softc(dev); return (sc->hal.arswitch_vlan_getvgroup(sc, e)); } static int arswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *e) { struct arswitch_softc *sc = device_get_softc(dev); return (sc->hal.arswitch_vlan_setvgroup(sc, e)); } static int arswitch_readphy(device_t dev, int phy, int reg) { struct arswitch_softc *sc = device_get_softc(dev); return (sc->hal.arswitch_phy_read(dev, phy, reg)); } static int arswitch_writephy(device_t dev, int phy, int reg, int val) { struct arswitch_softc *sc = device_get_softc(dev); return (sc->hal.arswitch_phy_write(dev, phy, reg, val)); } static device_method_t arswitch_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arswitch_probe), DEVMETHOD(device_attach, arswitch_attach), DEVMETHOD(device_detach, arswitch_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, arswitch_readphy), DEVMETHOD(miibus_writereg, arswitch_writephy), DEVMETHOD(miibus_statchg, arswitch_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, arswitch_readphy), DEVMETHOD(mdio_writereg, arswitch_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, arswitch_lock), DEVMETHOD(etherswitch_unlock, arswitch_unlock), DEVMETHOD(etherswitch_getinfo, arswitch_getinfo), DEVMETHOD(etherswitch_readreg, arswitch_readreg), DEVMETHOD(etherswitch_writereg, arswitch_writereg), DEVMETHOD(etherswitch_readphyreg, arswitch_readphy), DEVMETHOD(etherswitch_writephyreg, arswitch_writephy), DEVMETHOD(etherswitch_getport, arswitch_getport), DEVMETHOD(etherswitch_setport, arswitch_setport), DEVMETHOD(etherswitch_getvgroup, arswitch_getvgroup), DEVMETHOD(etherswitch_setvgroup, arswitch_setvgroup), DEVMETHOD(etherswitch_getconf, arswitch_getconf), DEVMETHOD(etherswitch_setconf, arswitch_setconf), DEVMETHOD(etherswitch_flush_all, arswitch_atu_flush_all), DEVMETHOD(etherswitch_flush_port, arswitch_atu_flush_port), DEVMETHOD(etherswitch_fetch_table, arswitch_atu_fetch_table), DEVMETHOD(etherswitch_fetch_table_entry, arswitch_atu_fetch_table_entry), DEVMETHOD_END }; DEFINE_CLASS_0(arswitch, arswitch_driver, arswitch_methods, sizeof(struct arswitch_softc)); DRIVER_MODULE(arswitch, mdio, arswitch_driver, 0, 0); DRIVER_MODULE(miibus, arswitch, miibus_driver, 0, 0); DRIVER_MODULE(mdio, arswitch, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, arswitch, etherswitch_driver, 0, 0); MODULE_VERSION(arswitch, 1); MODULE_DEPEND(arswitch, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(arswitch, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c index 4e95287399e3..26152d1ebf26 100644 --- a/sys/dev/etherswitch/e6000sw/e6000sw.c +++ b/sys/dev/etherswitch/e6000sw/e6000sw.c @@ -1,1782 +1,1782 @@ /*- * Copyright (c) 2015 Semihalf * Copyright (c) 2015 Stormshield * Copyright (c) 2018-2019, Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #else #include #endif #include "e6000swreg.h" #include "etherswitch_if.h" #include "miibus_if.h" #include "mdio_if.h" MALLOC_DECLARE(M_E6000SW); MALLOC_DEFINE(M_E6000SW, "e6000sw", "e6000sw switch"); #define E6000SW_LOCK(_sc) sx_xlock(&(_sc)->sx) #define E6000SW_UNLOCK(_sc) sx_unlock(&(_sc)->sx) #define E6000SW_LOCK_ASSERT(_sc, _what) sx_assert(&(_sc)->sx, (_what)) #define E6000SW_TRYLOCK(_sc) sx_tryxlock(&(_sc)->sx) #define E6000SW_LOCKED(_sc) sx_xlocked(&(_sc)->sx) #define E6000SW_WAITREADY(_sc, _reg, _bit) \ e6000sw_waitready((_sc), REG_GLOBAL, (_reg), (_bit)) #define E6000SW_WAITREADY2(_sc, _reg, _bit) \ e6000sw_waitready((_sc), REG_GLOBAL2, (_reg), (_bit)) #define MDIO_READ(dev, addr, reg) \ MDIO_READREG(device_get_parent(dev), (addr), (reg)) #define MDIO_WRITE(dev, addr, reg, val) \ MDIO_WRITEREG(device_get_parent(dev), (addr), (reg), (val)) typedef struct e6000sw_softc { device_t dev; #ifdef FDT phandle_t node; #endif struct sx sx; if_t ifp[E6000SW_MAX_PORTS]; char *ifname[E6000SW_MAX_PORTS]; device_t miibus[E6000SW_MAX_PORTS]; struct taskqueue *sc_tq; struct timeout_task sc_tt; int vlans[E6000SW_NUM_VLANS]; uint32_t swid; uint32_t vlan_mode; uint32_t cpuports_mask; uint32_t fixed_mask; uint32_t fixed25_mask; uint32_t ports_mask; int phy_base; int sw_addr; int num_ports; } e6000sw_softc_t; static etherswitch_info_t etherswitch_info = { .es_nports = 0, .es_nvlangroups = 0, .es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q, .es_name = "Marvell 6000 series switch" }; static void e6000sw_identify(driver_t *, device_t); static int e6000sw_probe(device_t); #ifdef FDT static int e6000sw_parse_fixed_link(e6000sw_softc_t *, phandle_t, uint32_t); static int e6000sw_parse_ethernet(e6000sw_softc_t *, phandle_t, uint32_t); #endif static int e6000sw_attach(device_t); static int e6000sw_detach(device_t); static int e6000sw_read_xmdio(device_t, int, int, int); static int e6000sw_write_xmdio(device_t, int, int, int, int); static int e6000sw_readphy(device_t, int, int); static int e6000sw_writephy(device_t, int, int, int); static int e6000sw_readphy_locked(device_t, int, int); static int e6000sw_writephy_locked(device_t, int, int, int); static etherswitch_info_t* e6000sw_getinfo(device_t); static int e6000sw_getconf(device_t, etherswitch_conf_t *); static int e6000sw_setconf(device_t, etherswitch_conf_t *); static void e6000sw_lock(device_t); static void e6000sw_unlock(device_t); static int e6000sw_getport(device_t, etherswitch_port_t *); static int e6000sw_setport(device_t, etherswitch_port_t *); static int e6000sw_set_vlan_mode(e6000sw_softc_t *, uint32_t); static int e6000sw_readreg_wrapper(device_t, int); static int e6000sw_writereg_wrapper(device_t, int, int); static int e6000sw_getvgroup_wrapper(device_t, etherswitch_vlangroup_t *); static int e6000sw_setvgroup_wrapper(device_t, etherswitch_vlangroup_t *); static int e6000sw_setvgroup(device_t, etherswitch_vlangroup_t *); static int e6000sw_getvgroup(device_t, etherswitch_vlangroup_t *); static void e6000sw_setup(device_t, e6000sw_softc_t *); static void e6000sw_tick(void *, int); static void e6000sw_set_atustat(device_t, e6000sw_softc_t *, int, int); static int e6000sw_atu_flush(device_t, e6000sw_softc_t *, int); static int e6000sw_vtu_flush(e6000sw_softc_t *); static int e6000sw_vtu_update(e6000sw_softc_t *, int, int, int, int, int); static __inline void e6000sw_writereg(e6000sw_softc_t *, int, int, int); static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *, int, int); static int e6000sw_ifmedia_upd(if_t); static void e6000sw_ifmedia_sts(if_t, struct ifmediareq *); static int e6000sw_atu_mac_table(device_t, e6000sw_softc_t *, struct atu_opt *, int); static int e6000sw_get_pvid(e6000sw_softc_t *, int, int *); static void e6000sw_set_pvid(e6000sw_softc_t *, int, int); static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *, int); static __inline bool e6000sw_is_phyport(e6000sw_softc_t *, int); static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *, int); static __inline struct mii_data *e6000sw_miiforphy(e6000sw_softc_t *, unsigned int); static device_method_t e6000sw_methods[] = { /* device interface */ DEVMETHOD(device_identify, e6000sw_identify), DEVMETHOD(device_probe, e6000sw_probe), DEVMETHOD(device_attach, e6000sw_attach), DEVMETHOD(device_detach, e6000sw_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* mii interface */ DEVMETHOD(miibus_readreg, e6000sw_readphy), DEVMETHOD(miibus_writereg, e6000sw_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, e6000sw_getinfo), DEVMETHOD(etherswitch_getconf, e6000sw_getconf), DEVMETHOD(etherswitch_setconf, e6000sw_setconf), DEVMETHOD(etherswitch_lock, e6000sw_lock), DEVMETHOD(etherswitch_unlock, e6000sw_unlock), DEVMETHOD(etherswitch_getport, e6000sw_getport), DEVMETHOD(etherswitch_setport, e6000sw_setport), DEVMETHOD(etherswitch_readreg, e6000sw_readreg_wrapper), DEVMETHOD(etherswitch_writereg, e6000sw_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, e6000sw_readphy), DEVMETHOD(etherswitch_writephyreg, e6000sw_writephy), DEVMETHOD(etherswitch_setvgroup, e6000sw_setvgroup_wrapper), DEVMETHOD(etherswitch_getvgroup, e6000sw_getvgroup_wrapper), DEVMETHOD_END }; DEFINE_CLASS_0(e6000sw, e6000sw_driver, e6000sw_methods, sizeof(e6000sw_softc_t)); DRIVER_MODULE(e6000sw, mdio, e6000sw_driver, 0, 0); DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, 0, 0); DRIVER_MODULE(miibus, e6000sw, miibus_driver, 0, 0); MODULE_DEPEND(e6000sw, mdio, 1, 1, 1); static void e6000sw_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "e6000sw", -1) == NULL) BUS_ADD_CHILD(parent, 0, "e6000sw", DEVICE_UNIT_ANY); } static int e6000sw_probe(device_t dev) { e6000sw_softc_t *sc; const char *description; #ifdef FDT phandle_t switch_node; #else int is_6190; #endif sc = device_get_softc(dev); sc->dev = dev; #ifdef FDT switch_node = ofw_bus_find_compatible(OF_finddevice("/"), "marvell,mv88e6085"); if (switch_node == 0) { switch_node = ofw_bus_find_compatible(OF_finddevice("/"), "marvell,mv88e6190"); if (switch_node == 0) return (ENXIO); /* * Trust DTS and fix the port register offset for the MV88E6190 * detection bellow. */ sc->swid = MV88E6190; } if (bootverbose) device_printf(dev, "Found switch_node: 0x%x\n", switch_node); sc->node = switch_node; if (OF_getencprop(sc->node, "reg", &sc->sw_addr, sizeof(sc->sw_addr)) < 0) return (ENXIO); #else if (resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "addr", &sc->sw_addr) != 0) return (ENXIO); if (resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "is6190", &is_6190) != 0) /* * Check "is8190" to keep backward compatibility with * older setups. */ resource_int_value(device_get_name(sc->dev), device_get_unit(sc->dev), "is8190", &is_6190); if (is_6190 != 0) sc->swid = MV88E6190; #endif if (sc->sw_addr < 0 || sc->sw_addr > 32) return (ENXIO); /* * Create temporary lock, just to satisfy assertions, * when obtaining the switch ID. Destroy immediately afterwards. */ sx_init(&sc->sx, "e6000sw_tmp"); E6000SW_LOCK(sc); sc->swid = e6000sw_readreg(sc, REG_PORT(sc, 0), SWITCH_ID) & 0xfff0; E6000SW_UNLOCK(sc); sx_destroy(&sc->sx); switch (sc->swid) { case MV88E6141: description = "Marvell 88E6141"; sc->phy_base = 0x10; sc->num_ports = 6; break; case MV88E6341: description = "Marvell 88E6341"; sc->phy_base = 0x10; sc->num_ports = 6; break; case MV88E6352: description = "Marvell 88E6352"; sc->num_ports = 7; break; case MV88E6172: description = "Marvell 88E6172"; sc->num_ports = 7; break; case MV88E6176: description = "Marvell 88E6176"; sc->num_ports = 7; break; case MV88E6190: description = "Marvell 88E6190"; sc->num_ports = 11; break; default: device_printf(dev, "Unrecognized device, id 0x%x.\n", sc->swid); return (ENXIO); } device_set_desc(dev, description); return (BUS_PROBE_DEFAULT); } #ifdef FDT static int e6000sw_parse_fixed_link(e6000sw_softc_t *sc, phandle_t node, uint32_t port) { int speed; phandle_t fixed_link; fixed_link = ofw_bus_find_child(node, "fixed-link"); if (fixed_link != 0) { sc->fixed_mask |= (1 << port); if (OF_getencprop(fixed_link, "speed", &speed, sizeof(speed)) < 0) { device_printf(sc->dev, "Port %d has a fixed-link node without a speed " "property\n", port); return (ENXIO); } if (speed == 2500 && (MVSWITCH(sc, MV88E6141) || MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190))) sc->fixed25_mask |= (1 << port); } return (0); } static int e6000sw_parse_ethernet(e6000sw_softc_t *sc, phandle_t port_handle, uint32_t port) { phandle_t switch_eth, switch_eth_handle; if (OF_getencprop(port_handle, "ethernet", (void*)&switch_eth_handle, sizeof(switch_eth_handle)) > 0) { if (switch_eth_handle > 0) { switch_eth = OF_node_from_xref(switch_eth_handle); device_printf(sc->dev, "CPU port at %d\n", port); sc->cpuports_mask |= (1 << port); return (e6000sw_parse_fixed_link(sc, switch_eth, port)); } else device_printf(sc->dev, "Port %d has ethernet property but it points " "to an invalid location\n", port); } return (0); } static int e6000sw_parse_child_fdt(e6000sw_softc_t *sc, phandle_t child, int *pport) { uint32_t port; if (pport == NULL) return (ENXIO); if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) return (ENXIO); if (port >= sc->num_ports) return (ENXIO); *pport = port; if (e6000sw_parse_fixed_link(sc, child, port) != 0) return (ENXIO); if (e6000sw_parse_ethernet(sc, child, port) != 0) return (ENXIO); if ((sc->fixed_mask & (1 << port)) != 0) device_printf(sc->dev, "fixed port at %d\n", port); else device_printf(sc->dev, "PHY at port %d\n", port); return (0); } #else static int e6000sw_check_hint_val(device_t dev, int *val, char *fmt, ...) { char *resname; int err, len; va_list ap; len = min(strlen(fmt) * 2, 128); if (len == 0) return (-1); resname = malloc(len, M_E6000SW, M_WAITOK); memset(resname, 0, len); va_start(ap, fmt); vsnprintf(resname, len - 1, fmt, ap); va_end(ap); err = resource_int_value(device_get_name(dev), device_get_unit(dev), resname, val); free(resname, M_E6000SW); return (err); } static int e6000sw_parse_hinted_port(e6000sw_softc_t *sc, int port) { int err, val; err = e6000sw_check_hint_val(sc->dev, &val, "port%ddisabled", port); if (err == 0 && val != 0) return (1); err = e6000sw_check_hint_val(sc->dev, &val, "port%dcpu", port); if (err == 0 && val != 0) { sc->cpuports_mask |= (1 << port); sc->fixed_mask |= (1 << port); if (bootverbose) device_printf(sc->dev, "CPU port at %d\n", port); } err = e6000sw_check_hint_val(sc->dev, &val, "port%dspeed", port); if (err == 0 && val != 0) { sc->fixed_mask |= (1 << port); if (val == 2500) sc->fixed25_mask |= (1 << port); } if (bootverbose) { if ((sc->fixed_mask & (1 << port)) != 0) device_printf(sc->dev, "fixed port at %d\n", port); else device_printf(sc->dev, "PHY at port %d\n", port); } return (0); } #endif static int e6000sw_init_interface(e6000sw_softc_t *sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ifp[port] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[port], sc); if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX, 0); sc->ifname[port] = malloc(strlen(name) + 1, M_E6000SW, M_NOWAIT); if (sc->ifname[port] == NULL) { if_free(sc->ifp[port]); return (ENOMEM); } memcpy(sc->ifname[port], name, strlen(name) + 1); if_initname(sc->ifp[port], sc->ifname[port], port); return (0); } static int e6000sw_attach_miibus(e6000sw_softc_t *sc, int port) { int err; err = mii_attach(sc->dev, &sc->miibus[port], sc->ifp[port], e6000sw_ifmedia_upd, e6000sw_ifmedia_sts, BMSR_DEFCAPMASK, port + sc->phy_base, MII_OFFSET_ANY, 0); if (err != 0) return (err); return (0); } static void e6000sw_serdes_power(device_t dev, int port, bool sgmii) { uint32_t reg; /* SGMII */ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_SGMII_CTL); if (sgmii) reg &= ~E6000SW_SERDES_PDOWN; else reg |= E6000SW_SERDES_PDOWN; e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_SGMII_CTL, reg); /* 10GBASE-R/10GBASE-X4/X2 */ reg = e6000sw_read_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_PCS_CTL1); if (sgmii) reg |= E6000SW_SERDES_PDOWN; else reg &= ~E6000SW_SERDES_PDOWN; e6000sw_write_xmdio(dev, port, E6000SW_SERDES_DEV, E6000SW_SERDES_PCS_CTL1, reg); } static int e6000sw_attach(device_t dev) { bool sgmii; e6000sw_softc_t *sc; #ifdef FDT phandle_t child, ports; #endif int err, port; uint32_t reg; err = 0; sc = device_get_softc(dev); /* * According to the Linux source code, all of the Switch IDs we support * are multi_chip capable, and should go into multi-chip mode if the * sw_addr != 0. */ if (MVSWITCH_MULTICHIP(sc)) device_printf(dev, "multi-chip addressing mode (%#x)\n", sc->sw_addr); else device_printf(dev, "single-chip addressing mode\n"); sx_init(&sc->sx, "e6000sw"); E6000SW_LOCK(sc); e6000sw_setup(dev, sc); sc->sc_tq = taskqueue_create("e6000sw_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); TIMEOUT_TASK_INIT(sc->sc_tq, &sc->sc_tt, 0, e6000sw_tick, sc); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); #ifdef FDT ports = ofw_bus_find_child(sc->node, "ports"); if (ports == 0) { device_printf(dev, "failed to parse DTS: no ports found for " "switch\n"); E6000SW_UNLOCK(sc); return (ENXIO); } for (child = OF_child(ports); child != 0; child = OF_peer(child)) { err = e6000sw_parse_child_fdt(sc, child, &port); if (err != 0) { device_printf(sc->dev, "failed to parse DTS\n"); goto out_fail; } #else for (port = 0; port < sc->num_ports; port++) { err = e6000sw_parse_hinted_port(sc, port); if (err != 0) continue; #endif /* Port is in use. */ sc->ports_mask |= (1 << port); err = e6000sw_init_interface(sc, port); if (err != 0) { device_printf(sc->dev, "failed to init interface\n"); goto out_fail; } if (e6000sw_is_fixedport(sc, port)) { /* Link must be down to change speed force value. */ reg = e6000sw_readreg(sc, REG_PORT(sc, port), PSC_CONTROL); reg &= ~PSC_CONTROL_LINK_UP; reg |= PSC_CONTROL_FORCED_LINK; e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL, reg); /* * Force speed, full-duplex, EEE off and flow-control * on. */ reg &= ~(PSC_CONTROL_SPD2500 | PSC_CONTROL_ALT_SPD | PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON | PSC_CONTROL_FORCED_EEE); if (e6000sw_is_fixed25port(sc, port)) reg |= PSC_CONTROL_SPD2500; else reg |= PSC_CONTROL_SPD1000; if (MVSWITCH(sc, MV88E6190) && e6000sw_is_fixed25port(sc, port)) reg |= PSC_CONTROL_ALT_SPD; reg |= PSC_CONTROL_FORCED_DPX | PSC_CONTROL_FULLDPX | PSC_CONTROL_FORCED_LINK | PSC_CONTROL_LINK_UP | PSC_CONTROL_FORCED_SPD; if (!MVSWITCH(sc, MV88E6190)) reg |= PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON; if (MVSWITCH(sc, MV88E6141) || MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190)) reg |= PSC_CONTROL_FORCED_EEE; e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL, reg); /* Power on the SERDES interfaces. */ if (MVSWITCH(sc, MV88E6190) && (port == 9 || port == 10)) { if (e6000sw_is_fixed25port(sc, port)) sgmii = false; else sgmii = true; e6000sw_serdes_power(sc->dev, port, sgmii); } } /* Don't attach miibus at CPU/fixed ports */ if (!e6000sw_is_phyport(sc, port)) continue; err = e6000sw_attach_miibus(sc, port); if (err != 0) { device_printf(sc->dev, "failed to attach miibus\n"); goto out_fail; } } etherswitch_info.es_nports = sc->num_ports; /* Default to port vlan. */ e6000sw_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT); reg = e6000sw_readreg(sc, REG_GLOBAL, SWITCH_GLOBAL_STATUS); if (reg & SWITCH_GLOBAL_STATUS_IR) device_printf(dev, "switch is ready.\n"); E6000SW_UNLOCK(sc); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_tt, hz); return (0); out_fail: e6000sw_detach(dev); return (err); } static int e6000sw_waitready(e6000sw_softc_t *sc, uint32_t phy, uint32_t reg, uint32_t busybit) { int i; for (i = 0; i < E6000SW_RETRIES; i++) { if ((e6000sw_readreg(sc, phy, reg) & busybit) == 0) return (0); DELAY(1); } return (1); } /* XMDIO/Clause 45 access. */ static int e6000sw_read_xmdio(device_t dev, int phy, int devaddr, int devreg) { e6000sw_softc_t *sc; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } reg = devaddr & SMI_CMD_REG_ADDR_MASK; reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK; /* Load C45 register address. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_ADDR); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Start C45 read operation. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_READ); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Read C45 data. */ reg = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG); return (reg & PHY_DATA_MASK); } static int e6000sw_write_xmdio(device_t dev, int phy, int devaddr, int devreg, int val) { e6000sw_softc_t *sc; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } reg = devaddr & SMI_CMD_REG_ADDR_MASK; reg |= (phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK; /* Load C45 register address. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_ADDR); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } /* Load data and start the C45 write operation. */ e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, devreg); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, reg | SMI_CMD_OP_C45_WRITE); return (0); } static int e6000sw_readphy(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; int locked, ret; sc = device_get_softc(dev); locked = E6000SW_LOCKED(sc); if (!locked) E6000SW_LOCK(sc); ret = e6000sw_readphy_locked(dev, phy, reg); if (!locked) E6000SW_UNLOCK(sc); return (ret); } /* * PHY registers are paged. Put page index in reg 22 (accessible from every * page), then access specific register. */ static int e6000sw_readphy_locked(device_t dev, int phy, int reg) { e6000sw_softc_t *sc; uint32_t val; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) | ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } val = e6000sw_readreg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG); return (val & PHY_DATA_MASK); } static int e6000sw_writephy(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; int locked, ret; sc = device_get_softc(dev); locked = E6000SW_LOCKED(sc); if (!locked) E6000SW_LOCK(sc); ret = e6000sw_writephy_locked(dev, phy, reg, data); if (!locked) E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_writephy_locked(device_t dev, int phy, int reg, int data) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!e6000sw_is_phyport(sc, phy) || reg >= E6000SW_NUM_PHY_REGS) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } if (E6000SW_WAITREADY2(sc, SMI_PHY_CMD_REG, SMI_CMD_BUSY)) { device_printf(dev, "Timeout while waiting for switch\n"); return (ETIMEDOUT); } e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_DATA_REG, data & PHY_DATA_MASK); e6000sw_writereg(sc, REG_GLOBAL2, SMI_PHY_CMD_REG, SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) | ((phy << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); return (0); } static int e6000sw_detach(device_t dev) { int phy; e6000sw_softc_t *sc; sc = device_get_softc(dev); if (device_is_attached(dev)) taskqueue_drain_timeout(sc->sc_tq, &sc->sc_tt); if (sc->sc_tq != NULL) taskqueue_free(sc->sc_tq); device_delete_children(dev); sx_destroy(&sc->sx); for (phy = 0; phy < sc->num_ports; phy++) { if (sc->ifp[phy] != NULL) if_free(sc->ifp[phy]); if (sc->ifname[phy] != NULL) free(sc->ifname[phy], M_E6000SW); } return (0); } static etherswitch_info_t* e6000sw_getinfo(device_t dev) { return (ðerswitch_info); } static int e6000sw_getconf(device_t dev, etherswitch_conf_t *conf) { struct e6000sw_softc *sc; /* Return the VLAN mode. */ sc = device_get_softc(dev); conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int e6000sw_setconf(device_t dev, etherswitch_conf_t *conf) { struct e6000sw_softc *sc; /* Set the VLAN mode. */ sc = device_get_softc(dev); if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { E6000SW_LOCK(sc); e6000sw_set_vlan_mode(sc, conf->vlan_mode); E6000SW_UNLOCK(sc); } return (0); } static void e6000sw_lock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); } static void e6000sw_unlock(device_t dev) { struct e6000sw_softc *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); E6000SW_UNLOCK(sc); } static int e6000sw_getport(device_t dev, etherswitch_port_t *p) { struct mii_data *mii; int err; struct ifmediareq *ifmr; uint32_t reg; e6000sw_softc_t *sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); if (p->es_port >= sc->num_ports || p->es_port < 0) return (EINVAL); if (!e6000sw_is_portenabled(sc, p->es_port)) return (0); E6000SW_LOCK(sc); e6000sw_get_pvid(sc, p->es_port, &p->es_pvid); /* Port flags. */ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2); if (reg & PORT_CONTROL2_DISC_TAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED; if (reg & PORT_CONTROL2_DISC_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; err = 0; if (e6000sw_is_fixedport(sc, p->es_port)) { if (e6000sw_is_cpuport(sc, p->es_port)) p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; if (e6000sw_is_fixed25port(sc, p->es_port)) ifmr->ifm_active = IFM_2500_T; else ifmr->ifm_active = IFM_1000_T; ifmr->ifm_active |= IFM_ETHER | IFM_FDX; ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; } else { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } E6000SW_UNLOCK(sc); return (err); } static int e6000sw_setport(device_t dev, etherswitch_port_t *p) { e6000sw_softc_t *sc; int err; struct mii_data *mii; uint32_t reg; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); if (p->es_port >= sc->num_ports || p->es_port < 0) return (EINVAL); if (!e6000sw_is_portenabled(sc, p->es_port)) return (0); E6000SW_LOCK(sc); /* Port flags. */ reg = e6000sw_readreg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2); if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED) reg |= PORT_CONTROL2_DISC_TAGGED; else reg &= ~PORT_CONTROL2_DISC_TAGGED; if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= PORT_CONTROL2_DISC_UNTAGGED; else reg &= ~PORT_CONTROL2_DISC_UNTAGGED; e6000sw_writereg(sc, REG_PORT(sc, p->es_port), PORT_CONTROL2, reg); err = 0; if (p->es_pvid != 0) e6000sw_set_pvid(sc, p->es_port, p->es_pvid); if (e6000sw_is_phyport(sc, p->es_port)) { mii = e6000sw_miiforphy(sc, p->es_port); err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } E6000SW_UNLOCK(sc); return (err); } static __inline void e6000sw_port_vlan_assign(e6000sw_softc_t *sc, int port, uint32_t fid, uint32_t members) { uint32_t reg; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP); reg &= ~(PORT_MASK(sc) | PORT_VLAN_MAP_FID_MASK); reg |= members & PORT_MASK(sc) & ~(1 << port); reg |= (fid << PORT_VLAN_MAP_FID) & PORT_VLAN_MAP_FID_MASK; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VLAN_MAP, reg); reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1); reg &= ~PORT_CONTROL1_FID_MASK; reg |= (fid >> 4) & PORT_CONTROL1_FID_MASK; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL1, reg); } static int e6000sw_init_vlan(struct e6000sw_softc *sc) { int i, port, ret; uint32_t members; /* Disable all ports */ for (port = 0; port < sc->num_ports; port++) { ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, (ret & ~PORT_CONTROL_ENABLE)); } /* Flush VTU. */ e6000sw_vtu_flush(sc); for (port = 0; port < sc->num_ports; port++) { /* Reset the egress and frame mode. */ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); ret &= ~(PORT_CONTROL_EGRESS | PORT_CONTROL_FRAME); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, ret); /* Set the 802.1q mode. */ ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL2); ret &= ~PORT_CONTROL2_DOT1Q; if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) ret |= PORT_CONTROL2_DOT1Q; e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL2, ret); } for (port = 0; port < sc->num_ports; port++) { if (!e6000sw_is_portenabled(sc, port)) continue; ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID); /* Set port priority */ ret &= ~PORT_VID_PRIORITY_MASK; /* Set VID map */ ret &= ~PORT_VID_DEF_VID_MASK; if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) ret |= 1; else ret |= (port + 1); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, ret); } /* Assign the member ports to each origin port. */ for (port = 0; port < sc->num_ports; port++) { members = 0; if (e6000sw_is_portenabled(sc, port)) { for (i = 0; i < sc->num_ports; i++) { if (i == port || !e6000sw_is_portenabled(sc, i)) continue; members |= (1 << i); } } /* Default to FID 0. */ e6000sw_port_vlan_assign(sc, port, 0, members); } /* Reset internal VLAN table. */ for (i = 0; i < nitems(sc->vlans); i++) sc->vlans[i] = 0; /* Create default VLAN (1). */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { sc->vlans[0] = 1; e6000sw_vtu_update(sc, 0, sc->vlans[0], 1, 0, sc->ports_mask); } /* Enable all ports */ for (port = 0; port < sc->num_ports; port++) { if (!e6000sw_is_portenabled(sc, port)) continue; ret = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_CONTROL, (ret | PORT_CONTROL_ENABLE)); } return (0); } static int e6000sw_set_vlan_mode(struct e6000sw_softc *sc, uint32_t mode) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); switch (mode) { case ETHERSWITCH_VLAN_PORT: sc->vlan_mode = ETHERSWITCH_VLAN_PORT; etherswitch_info.es_nvlangroups = sc->num_ports; return (e6000sw_init_vlan(sc)); break; case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; etherswitch_info.es_nvlangroups = E6000SW_NUM_VLANS; return (e6000sw_init_vlan(sc)); break; default: return (EINVAL); } } /* * Registers in this switch are divided into sections, specified in * documentation. So as to access any of them, section index and reg index * is necessary. etherswitchcfg uses only one variable, so indexes were * compressed into addr_reg: 32 * section_index + reg_index. */ static int e6000sw_readreg_wrapper(device_t dev, int addr_reg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(sc, 0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } return (e6000sw_readreg(device_get_softc(dev), addr_reg / 32, addr_reg % 32)); } static int e6000sw_writereg_wrapper(device_t dev, int addr_reg, int val) { e6000sw_softc_t *sc; sc = device_get_softc(dev); if ((addr_reg > (REG_GLOBAL2 * 32 + REG_NUM_MAX)) || (addr_reg < (REG_PORT(sc, 0) * 32))) { device_printf(dev, "Wrong register address.\n"); return (EINVAL); } e6000sw_writereg(device_get_softc(dev), addr_reg / 32, addr_reg % 32, val); return (0); } /* * setvgroup/getvgroup called from etherswitchfcg need to be locked, * while internal calls do not. */ static int e6000sw_setvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_setvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_getvgroup_wrapper(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; int ret; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); ret = e6000sw_getvgroup(dev, vg); E6000SW_UNLOCK(sc); return (ret); } static int e6000sw_set_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { uint32_t port; port = vg->es_vlangroup; if (port > sc->num_ports) return (EINVAL); if (vg->es_member_ports != vg->es_untagged_ports) { device_printf(sc->dev, "Tagged ports not supported.\n"); return (EINVAL); } e6000sw_port_vlan_assign(sc, port, 0, vg->es_untagged_ports); vg->es_vid = port | ETHERSWITCH_VID_VALID; return (0); } static int e6000sw_set_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { int i, vlan; vlan = vg->es_vid & ETHERSWITCH_VID_MASK; /* Set VLAN to '0' removes it from table. */ if (vlan == 0) { e6000sw_vtu_update(sc, VTU_PURGE, sc->vlans[vg->es_vlangroup], 0, 0, 0); sc->vlans[vg->es_vlangroup] = 0; return (0); } /* Is this VLAN already in table ? */ for (i = 0; i < etherswitch_info.es_nvlangroups; i++) if (i != vg->es_vlangroup && vlan == sc->vlans[i]) return (EINVAL); sc->vlans[vg->es_vlangroup] = vlan; e6000sw_vtu_update(sc, 0, vlan, vg->es_vlangroup + 1, vg->es_member_ports & sc->ports_mask, vg->es_untagged_ports & sc->ports_mask); return (0); } static int e6000sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) return (e6000sw_set_port_vlan(sc, vg)); else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) return (e6000sw_set_dot1q_vlan(sc, vg)); return (EINVAL); } static int e6000sw_get_port_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { uint32_t port, reg; port = vg->es_vlangroup; if (port > sc->num_ports) return (EINVAL); if (!e6000sw_is_portenabled(sc, port)) { vg->es_vid = port; return (0); } reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VLAN_MAP); vg->es_untagged_ports = vg->es_member_ports = reg & PORT_MASK(sc); vg->es_vid = port | ETHERSWITCH_VID_VALID; vg->es_fid = (reg & PORT_VLAN_MAP_FID_MASK) >> PORT_VLAN_MAP_FID; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_CONTROL1); vg->es_fid |= (reg & PORT_CONTROL1_FID_MASK) << 4; return (0); } static int e6000sw_get_dot1q_vlan(e6000sw_softc_t *sc, etherswitch_vlangroup_t *vg) { int i, port; uint32_t reg; vg->es_fid = 0; vg->es_vid = sc->vlans[vg->es_vlangroup]; vg->es_untagged_ports = vg->es_member_ports = 0; if (vg->es_vid == 0) return (0); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, vg->es_vid - 1); reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_OPERATION); reg &= ~VTU_OP_MASK; reg |= VTU_GET_NEXT | VTU_BUSY; e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, reg); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while reading\n"); return (EBUSY); } reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_VID); if (reg == VTU_VID_MASK || (reg & VTU_VID_VALID) == 0) return (EINVAL); if ((reg & VTU_VID_MASK) != vg->es_vid) return (EINVAL); vg->es_vid |= ETHERSWITCH_VID_VALID; reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA); for (i = 0; i < sc->num_ports; i++) { if (i == VTU_PPREG(sc)) reg = e6000sw_readreg(sc, REG_GLOBAL, VTU_DATA2); port = (reg >> VTU_PORT(sc, i)) & VTU_PORT_MASK; if (port == VTU_PORT_UNTAGGED) { vg->es_untagged_ports |= (1 << i); vg->es_member_ports |= (1 << i); } else if (port == VTU_PORT_TAGGED) vg->es_member_ports |= (1 << i); } return (0); } static int e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { e6000sw_softc_t *sc; sc = device_get_softc(dev); E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) return (e6000sw_get_port_vlan(sc, vg)); else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) return (e6000sw_get_dot1q_vlan(sc, vg)); return (EINVAL); } static __inline struct mii_data* e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy) { if (!e6000sw_is_phyport(sc, phy)) return (NULL); return (device_get_softc(sc->miibus[phy])); } static int e6000sw_ifmedia_upd(if_t ifp) { e6000sw_softc_t *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6000sw_miiforphy(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void e6000sw_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { e6000sw_softc_t *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6000sw_miiforphy(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int e6000sw_smi_waitready(e6000sw_softc_t *sc, int phy) { int i; for (i = 0; i < E6000SW_SMI_TIMEOUT; i++) { if ((MDIO_READ(sc->dev, phy, SMI_CMD) & SMI_CMD_BUSY) == 0) return (0); DELAY(1); } return (1); } static __inline uint32_t e6000sw_readreg(e6000sw_softc_t *sc, int addr, int reg) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!MVSWITCH_MULTICHIP(sc)) return (MDIO_READ(sc->dev, addr, reg) & 0xffff); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_C22_READ | (reg & SMI_CMD_REG_ADDR_MASK) | ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return (0xffff); } return (MDIO_READ(sc->dev, sc->sw_addr, SMI_DATA) & 0xffff); } static __inline void e6000sw_writereg(e6000sw_softc_t *sc, int addr, int reg, int val) { E6000SW_LOCK_ASSERT(sc, SA_XLOCKED); if (!MVSWITCH_MULTICHIP(sc)) { MDIO_WRITE(sc->dev, addr, reg, val); return; } if (e6000sw_smi_waitready(sc, sc->sw_addr)) { printf("e6000sw: readreg timeout\n"); return; } MDIO_WRITE(sc->dev, sc->sw_addr, SMI_DATA, val); MDIO_WRITE(sc->dev, sc->sw_addr, SMI_CMD, SMI_CMD_OP_C22_WRITE | (reg & SMI_CMD_REG_ADDR_MASK) | ((addr << SMI_CMD_DEV_ADDR) & SMI_CMD_DEV_ADDR_MASK)); } static __inline bool e6000sw_is_cpuport(e6000sw_softc_t *sc, int port) { return ((sc->cpuports_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_fixedport(e6000sw_softc_t *sc, int port) { return ((sc->fixed_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_fixed25port(e6000sw_softc_t *sc, int port) { return ((sc->fixed25_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_phyport(e6000sw_softc_t *sc, int port) { uint32_t phy_mask; phy_mask = ~(sc->fixed_mask | sc->cpuports_mask); return ((phy_mask & (1 << port)) ? true : false); } static __inline bool e6000sw_is_portenabled(e6000sw_softc_t *sc, int port) { return ((sc->ports_mask & (1 << port)) ? true : false); } static __inline void e6000sw_set_pvid(e6000sw_softc_t *sc, int port, int pvid) { uint32_t reg; reg = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID); reg &= ~PORT_VID_DEF_VID_MASK; reg |= (pvid & PORT_VID_DEF_VID_MASK); e6000sw_writereg(sc, REG_PORT(sc, port), PORT_VID, reg); } static __inline int e6000sw_get_pvid(e6000sw_softc_t *sc, int port, int *pvid) { if (pvid == NULL) return (ENXIO); *pvid = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_VID) & PORT_VID_DEF_VID_MASK; return (0); } /* * Convert port status to ifmedia. */ static void e6000sw_update_ifmedia(uint16_t portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & PORT_STATUS_LINK_MASK) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & PORT_STATUS_SPEED_MASK) { case PORT_STATUS_SPEED_10: *media_active |= IFM_10_T; break; case PORT_STATUS_SPEED_100: *media_active |= IFM_100_TX; break; case PORT_STATUS_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & PORT_STATUS_DUPLEX_MASK) == 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; } static void e6000sw_tick(void *arg, int p __unused) { e6000sw_softc_t *sc; struct mii_data *mii; struct mii_softc *miisc; uint16_t portstatus; int port; sc = arg; E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED); E6000SW_LOCK(sc); for (port = 0; port < sc->num_ports; port++) { /* Tick only on PHY ports */ if (!e6000sw_is_portenabled(sc, port) || !e6000sw_is_phyport(sc, port)) continue; mii = e6000sw_miiforphy(sc, port); if (mii == NULL) continue; portstatus = e6000sw_readreg(sc, REG_PORT(sc, port), PORT_STATUS); e6000sw_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } E6000SW_UNLOCK(sc); } static void e6000sw_setup(device_t dev, e6000sw_softc_t *sc) { uint32_t atu_ctrl; /* Set aging time. */ atu_ctrl = e6000sw_readreg(sc, REG_GLOBAL, ATU_CONTROL); atu_ctrl &= ~ATU_CONTROL_AGETIME_MASK; atu_ctrl |= E6000SW_DEFAULT_AGETIME << ATU_CONTROL_AGETIME; e6000sw_writereg(sc, REG_GLOBAL, ATU_CONTROL, atu_ctrl); /* Send all with specific mac address to cpu port */ e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_2x, MGMT_EN_ALL); e6000sw_writereg(sc, REG_GLOBAL2, MGMT_EN_0x, MGMT_EN_ALL); /* Disable Remote Management */ e6000sw_writereg(sc, REG_GLOBAL, SWITCH_GLOBAL_CONTROL2, 0); /* Disable loopback filter and flow control messages */ e6000sw_writereg(sc, REG_GLOBAL2, SWITCH_MGMT, SWITCH_MGMT_PRI_MASK | (1 << SWITCH_MGMT_RSVD2CPU) | SWITCH_MGMT_FC_PRI_MASK | (1 << SWITCH_MGMT_FORCEFLOW)); e6000sw_atu_flush(dev, sc, NO_OPERATION); e6000sw_atu_mac_table(dev, sc, NULL, NO_OPERATION); e6000sw_set_atustat(dev, sc, 0, COUNT_ALL); } static void e6000sw_set_atustat(device_t dev, e6000sw_softc_t *sc, int bin, int flag) { e6000sw_readreg(sc, REG_GLOBAL2, ATU_STATS); e6000sw_writereg(sc, REG_GLOBAL2, ATU_STATS, (bin << ATU_STATS_BIN ) | (flag << ATU_STATS_FLAG)); } static int e6000sw_atu_mac_table(device_t dev, e6000sw_softc_t *sc, struct atu_opt *atu, int flag) { uint16_t ret_opt; uint16_t ret_data; if (flag == NO_OPERATION) return (0); else if ((flag & (LOAD_FROM_FIB | PURGE_FROM_FIB | GET_NEXT_IN_FIB | GET_VIOLATION_DATA | CLEAR_VIOLATION_DATA)) == 0) { device_printf(dev, "Wrong Opcode for ATU operation\n"); return (EINVAL); } if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) { device_printf(dev, "ATU unit is busy, cannot access\n"); return (EBUSY); } ret_opt = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); if (flag & LOAD_FROM_FIB) { ret_data = e6000sw_readreg(sc, REG_GLOBAL, ATU_DATA); e6000sw_writereg(sc, REG_GLOBAL2, ATU_DATA, (ret_data & ~ENTRY_STATE)); } e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR01, atu->mac_01); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR23, atu->mac_23); e6000sw_writereg(sc, REG_GLOBAL, ATU_MAC_ADDR45, atu->mac_45); e6000sw_writereg(sc, REG_GLOBAL, ATU_FID, atu->fid); e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (ret_opt | ATU_UNIT_BUSY | flag)); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) device_printf(dev, "Timeout while waiting ATU\n"); else if (flag & GET_NEXT_IN_FIB) { atu->mac_01 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR01); atu->mac_23 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR23); atu->mac_45 = e6000sw_readreg(sc, REG_GLOBAL, ATU_MAC_ADDR45); } return (0); } static int e6000sw_atu_flush(device_t dev, e6000sw_softc_t *sc, int flag) { uint32_t reg; if (flag == NO_OPERATION) return (0); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) { device_printf(dev, "ATU unit is busy, cannot access\n"); return (EBUSY); } reg = e6000sw_readreg(sc, REG_GLOBAL, ATU_OPERATION); e6000sw_writereg(sc, REG_GLOBAL, ATU_OPERATION, (reg | ATU_UNIT_BUSY | flag)); if (E6000SW_WAITREADY(sc, ATU_OPERATION, ATU_UNIT_BUSY)) device_printf(dev, "Timeout while flushing ATU\n"); return (0); } static int e6000sw_vtu_flush(e6000sw_softc_t *sc) { if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, VTU_FLUSH | VTU_BUSY); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while flushing VTU\n"); return (ETIMEDOUT); } return (0); } static int e6000sw_vtu_update(e6000sw_softc_t *sc, int purge, int vid, int fid, int members, int untagged) { int i, op; uint32_t data[2]; if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "VTU unit is busy, cannot access\n"); return (EBUSY); } *data = (vid & VTU_VID_MASK); if (purge == 0) *data |= VTU_VID_VALID; e6000sw_writereg(sc, REG_GLOBAL, VTU_VID, *data); if (purge == 0) { data[0] = 0; data[1] = 0; for (i = 0; i < sc->num_ports; i++) { if ((untagged & (1 << i)) != 0) data[i / VTU_PPREG(sc)] |= VTU_PORT_UNTAGGED << VTU_PORT(sc, i); else if ((members & (1 << i)) != 0) data[i / VTU_PPREG(sc)] |= VTU_PORT_TAGGED << VTU_PORT(sc, i); else data[i / VTU_PPREG(sc)] |= VTU_PORT_DISCARD << VTU_PORT(sc, i); } e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA, data[0]); e6000sw_writereg(sc, REG_GLOBAL, VTU_DATA2, data[1]); e6000sw_writereg(sc, REG_GLOBAL, VTU_FID, fid & VTU_FID_MASK(sc)); op = VTU_LOAD; } else op = VTU_PURGE; e6000sw_writereg(sc, REG_GLOBAL, VTU_OPERATION, op | VTU_BUSY); if (E6000SW_WAITREADY(sc, VTU_OPERATION, VTU_BUSY)) { device_printf(sc->dev, "Timeout while flushing VTU\n"); return (ETIMEDOUT); } return (0); } diff --git a/sys/dev/etherswitch/e6000sw/e6060sw.c b/sys/dev/etherswitch/e6000sw/e6060sw.c index 3ff38abb69a0..5f22b4396a9e 100644 --- a/sys/dev/etherswitch/e6000sw/e6060sw.c +++ b/sys/dev/etherswitch/e6000sw/e6060sw.c @@ -1,1022 +1,1022 @@ /*- * Copyright (c) 2016-2017 Hiroki Mori * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This code is Marvell 88E6060 ethernet switch support code on etherswitch * framework. * 88E6060 support is only port vlan support. Not support ingress/egress * trailer. * 88E6065 support is port and dot1q vlan. Also group base tag support. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" #define CORE_REGISTER 0x8 #define SWITCH_ID 3 #define PORT_CONTROL 4 #define ENGRESSFSHIFT 2 #define ENGRESSFMASK 3 #define ENGRESSTAGSHIFT 12 #define ENGRESSTAGMASK 3 #define PORT_VLAN_MAP 6 #define FORCEMAPSHIFT 8 #define FORCEMAPMASK 1 #define PORT_DEFVLAN 7 #define DEFVIDMASK 0xfff #define DEFPRIMASK 7 #define PORT_CONTROL2 8 #define DOT1QMODESHIFT 10 #define DOT1QMODEMASK 3 #define DOT1QNONE 0 #define DOT1QFALLBACK 1 #define DOT1QCHECK 2 #define DOT1QSECURE 3 #define GLOBAL_REGISTER 0xf #define VTU_OPERATION 5 #define VTU_VID_REG 6 #define VTU_DATA1_REG 7 #define VTU_DATA2_REG 8 #define VTU_DATA3_REG 9 #define VTU_BUSY 0x8000 #define VTU_FLASH 1 #define VTU_LOAD_PURGE 3 #define VTU_GET_NEXT 4 #define VTU_VIOLATION 7 MALLOC_DECLARE(M_E6060SW); MALLOC_DEFINE(M_E6060SW, "e6060sw", "e6060sw data structures"); struct e6060sw_softc { struct mtx sc_mtx; /* serialize access to softc */ device_t sc_dev; int vlan_mode; int media; /* cpu port media */ int cpuport; /* which PHY is connected to the CPU */ int phymask; /* PHYs we manage */ int numports; /* number of ports */ int ifpport[MII_NPHY]; int *portphy; char **ifname; device_t **miibus; if_t *ifp; struct callout callout_tick; etherswitch_info_t info; int smi_offset; int sw_model; }; /* Switch Identifier DeviceID */ #define E6060 0x60 #define E6063 0x63 #define E6065 0x65 #define E6060SW_LOCK(_sc) \ mtx_lock(&(_sc)->sc_mtx) #define E6060SW_UNLOCK(_sc) \ mtx_unlock(&(_sc)->sc_mtx) #define E6060SW_LOCK_ASSERT(_sc, _what) \ mtx_assert(&(_sc)->sc_mtx, (_what)) #define E6060SW_TRYLOCK(_sc) \ mtx_trylock(&(_sc)->sc_mtx) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #else #define DPRINTF(dev, args...) #endif static inline int e6060sw_portforphy(struct e6060sw_softc *, int); static void e6060sw_tick(void *); static int e6060sw_ifmedia_upd(if_t); static void e6060sw_ifmedia_sts(if_t, struct ifmediareq *); static void e6060sw_setup(device_t dev); static int e6060sw_read_vtu(device_t dev, int num, int *data1, int *data2); static void e6060sw_set_vtu(device_t dev, int num, int data1, int data2); static int e6060sw_probe(device_t dev) { int data; struct e6060sw_softc *sc; int devid, i; char *devname; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); devid = 0; for (i = 0; i < 2; ++i) { data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + i * 0x10, SWITCH_ID); if (bootverbose) device_printf(dev,"Switch Identifier Register %x\n", data); devid = data >> 4; if (devid == E6060 || devid == E6063 || devid == E6065) { sc->sw_model = devid; sc->smi_offset = i * 0x10; break; } } if (devid == E6060) devname = "88E6060"; else if (devid == E6063) devname = "88E6063"; else if (devid == E6065) devname = "88E6065"; else return (ENXIO); device_set_descf(dev, "Marvell %s MDIO switch driver at 0x%02x", devname, sc->smi_offset); return (BUS_PROBE_DEFAULT); } static int e6060sw_attach_phys(struct e6060sw_softc *sc) { int phy, port, err; char name[IFNAMSIZ]; port = 0; err = 0; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < sc->numports; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; sc->ifpport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); sc->ifp[port]->if_softc = sc; sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; if_initname(sc->ifp[port], name, port); sc->miibus[port] = malloc(sizeof(device_t), M_E6060SW, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], e6060sw_ifmedia_upd, e6060sw_ifmedia_sts, \ BMSR_DEFCAPMASK, phy + sc->smi_offset, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), sc->ifp[port]->if_xname); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); break; } ++port; } sc->info.es_nports = port; if (sc->cpuport != -1) { /* assume cpuport is last one */ sc->ifpport[sc->cpuport] = port; sc->portphy[port] = sc->cpuport; ++sc->info.es_nports; } return (err); } static int e6060sw_attach(device_t dev) { struct e6060sw_softc *sc; int err; sc = device_get_softc(dev); err = 0; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "e6060sw", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* XXX Defaults */ if (sc->sw_model == E6063) { sc->numports = 3; sc->phymask = 0x07; sc->cpuport = 2; } else { sc->numports = 6; sc->phymask = 0x1f; sc->cpuport = 5; } sc->media = 100; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "numports", &sc->numports); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phymask", &sc->phymask); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "cpuport", &sc->cpuport); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "media", &sc->media); if (sc->sw_model == E6060) { sc->info.es_nvlangroups = sc->numports; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT; } else { sc->info.es_nvlangroups = 64; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q; } e6060sw_setup(dev); sc->ifp = malloc(sizeof(if_t) * sc->numports, M_E6060SW, M_WAITOK | M_ZERO); sc->ifname = malloc(sizeof(char *) * sc->numports, M_E6060SW, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_E6060SW, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_E6060SW, M_WAITOK | M_ZERO); /* * Attach the PHYs and complete the bus enumeration. */ err = e6060sw_attach_phys(sc); if (err != 0) return (err); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); callout_init(&sc->callout_tick, 0); e6060sw_tick(sc); return (err); } static int e6060sw_detach(device_t dev) { struct e6060sw_softc *sc; int i, port; sc = device_get_softc(dev); callout_drain(&sc->callout_tick); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = e6060sw_portforphy(sc, i); if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->ifname[port], M_E6060SW); free(sc->miibus[port], M_E6060SW); } free(sc->portphy, M_E6060SW); free(sc->miibus, M_E6060SW); free(sc->ifname, M_E6060SW); free(sc->ifp, M_E6060SW); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* * Convert PHY number to port number. */ static inline int e6060sw_portforphy(struct e6060sw_softc *sc, int phy) { return (sc->ifpport[phy]); } static inline struct mii_data * e6060sw_miiforport(struct e6060sw_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); if (port == sc->cpuport) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t e6060sw_ifpforport(struct e6060sw_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void e6060sw_miipollstat(struct e6060sw_softc *sc) { int i, port; struct mii_data *mii; struct mii_softc *miisc; E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = e6060sw_portforphy(sc, i); if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void e6060sw_tick(void *arg) { struct e6060sw_softc *sc; sc = arg; e6060sw_miipollstat(sc); callout_reset(&sc->callout_tick, hz, e6060sw_tick, sc); } static void e6060sw_lock(device_t dev) { struct e6060sw_softc *sc; sc = device_get_softc(dev); E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED); E6060SW_LOCK(sc); } static void e6060sw_unlock(device_t dev) { struct e6060sw_softc *sc; sc = device_get_softc(dev); E6060SW_LOCK_ASSERT(sc, MA_OWNED); E6060SW_UNLOCK(sc); } static etherswitch_info_t * e6060sw_getinfo(device_t dev) { struct e6060sw_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int e6060sw_getport(device_t dev, etherswitch_port_t *p) { struct e6060sw_softc *sc; struct mii_data *mii; struct ifmediareq *ifmr; int err, phy; sc = device_get_softc(dev); ifmr = &p->es_ifmr; if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); p->es_pvid = 0; if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { p->es_pvid = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + p->es_port, PORT_DEFVLAN) & 0xfff; } phy = sc->portphy[p->es_port]; mii = e6060sw_miiforport(sc, p->es_port); if (sc->cpuport != -1 && phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } static int e6060sw_setport(device_t dev, etherswitch_port_t *p) { struct e6060sw_softc *sc; struct ifmedia *ifm; struct mii_data *mii; if_t ifp; int err; int data; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + p->es_port, PORT_DEFVLAN); data &= ~0xfff; data |= p->es_pvid; data |= 1 << 12; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + p->es_port, PORT_DEFVLAN, data); } if (sc->portphy[p->es_port] == sc->cpuport) return(0); mii = e6060sw_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = e6060sw_ifpforport(sc, p->es_port); ifm = &mii->mii_media; err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA); return (err); } static int e6060sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct e6060sw_softc *sc; int data1, data2; int vid; int i, tag; sc = device_get_softc(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= vg->es_vlangroup; data1 = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + vg->es_vlangroup, PORT_VLAN_MAP); vg->es_member_ports = data1 & 0x3f; vg->es_untagged_ports = vg->es_member_ports; vg->es_fid = 0; } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { if (vg->es_vlangroup == 0) return (0); vid = e6060sw_read_vtu(dev, vg->es_vlangroup, &data1, &data2); if (vid > 0) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= vid; vg->es_member_ports = 0; vg->es_untagged_ports = 0; for (i = 0; i < 4; ++i) { tag = data1 >> (i * 4) & 3; if (tag == 0 || tag == 1) { vg->es_member_ports |= 1 << i; vg->es_untagged_ports |= 1 << i; } else if (tag == 2) { vg->es_member_ports |= 1 << i; } } for (i = 0; i < 2; ++i) { tag = data2 >> (i * 4) & 3; if (tag == 0 || tag == 1) { vg->es_member_ports |= 1 << (i + 4); vg->es_untagged_ports |= 1 << (i + 4); } else if (tag == 2) { vg->es_member_ports |= 1 << (i + 4); } } } } else { vg->es_vid = 0; } return (0); } static int e6060sw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct e6060sw_softc *sc; int data1, data2; int i; sc = device_get_softc(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { data1 = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + vg->es_vlangroup, PORT_VLAN_MAP); data1 &= ~0x3f; data1 |= vg->es_member_ports; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + vg->es_vlangroup, PORT_VLAN_MAP, data1); } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { if (vg->es_vlangroup == 0) return (0); data1 = 0; data2 = 0; for (i = 0; i < 6; ++i) { if (vg->es_member_ports & vg->es_untagged_ports & (1 << i)) { if (i < 4) { data1 |= (0xd << i * 4); } else { data2 |= (0xd << (i - 4) * 4); } } else if (vg->es_member_ports & (1 << i)) { if (i < 4) { data1 |= (0xe << i * 4); } else { data2 |= (0xe << (i - 4) * 4); } } else { if (i < 4) { data1 |= (0x3 << i * 4); } else { data2 |= (0x3 << (i - 4) * 4); } } } e6060sw_set_vtu(dev, vg->es_vlangroup, data1, data2); } return (0); } static void e6060sw_reset_vlans(device_t dev) { struct e6060sw_softc *sc; uint32_t ports; int i; int data; sc = device_get_softc(dev); for (i = 0; i <= sc->numports; i++) { ports = (1 << (sc->numports + 1)) - 1; ports &= ~(1 << i); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { data = i << 12; } else if (sc->vlan_mode == 0) { data = 1 << 8; } else { data = 0; } data |= ports; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_VLAN_MAP, data); } } static void e6060sw_setup(device_t dev) { struct e6060sw_softc *sc; int i; int data; sc = device_get_softc(dev); for (i = 0; i <= sc->numports; i++) { if (sc->sw_model == E6063 || sc->sw_model == E6065) { data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_VLAN_MAP); data &= ~(FORCEMAPMASK << FORCEMAPSHIFT); MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_VLAN_MAP, data); data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL); data |= 3 << ENGRESSFSHIFT; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL, data); } } } static void e6060sw_dot1q_mode(device_t dev, int mode) { struct e6060sw_softc *sc; int i; int data; sc = device_get_softc(dev); for (i = 0; i <= sc->numports; i++) { data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL2); data &= ~(DOT1QMODEMASK << DOT1QMODESHIFT); data |= mode << DOT1QMODESHIFT; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_CONTROL2, data); data = MDIO_READREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_DEFVLAN); data &= ~0xfff; data |= 1; MDIO_WRITEREG(device_get_parent(dev), CORE_REGISTER + sc->smi_offset + i, PORT_DEFVLAN, data); } } static int e6060sw_getconf(device_t dev, etherswitch_conf_t *conf) { struct e6060sw_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static void e6060sw_init_vtu(device_t dev) { struct e6060sw_softc *sc; int busy; sc = device_get_softc(dev); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION, VTU_BUSY | (VTU_FLASH << 12)); while (1) { busy = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION); if ((busy & VTU_BUSY) == 0) break; } /* initial member set at vlan 1*/ MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA1_REG, 0xcccc); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA2_REG, 0x00cc); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_VID_REG, 0x1000 | 1); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION, VTU_BUSY | (VTU_LOAD_PURGE << 12) | 1); while (1) { busy = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION); if ((busy & VTU_BUSY) == 0) break; } } static void e6060sw_set_vtu(device_t dev, int num, int data1, int data2) { struct e6060sw_softc *sc; int busy; sc = device_get_softc(dev); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA1_REG, data1); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA2_REG, data2); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_VID_REG, 0x1000 | num); MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION, VTU_BUSY | (VTU_LOAD_PURGE << 12) | num); while (1) { busy = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION); if ((busy & VTU_BUSY) == 0) break; } } static int e6060sw_read_vtu(device_t dev, int num, int *data1, int *data2) { struct e6060sw_softc *sc; int busy; sc = device_get_softc(dev); num = num - 1; MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_VID_REG, num & 0xfff); /* Get Next */ MDIO_WRITEREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION, VTU_BUSY | (VTU_GET_NEXT << 12)); while (1) { busy = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_OPERATION); if ((busy & VTU_BUSY) == 0) break; } int vid = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_VID_REG); if (vid & 0x1000) { *data1 = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA1_REG); *data2 = MDIO_READREG(device_get_parent(dev), GLOBAL_REGISTER + sc->smi_offset, VTU_DATA2_REG); return (vid & 0xfff); } return (-1); } static int e6060sw_setconf(device_t dev, etherswitch_conf_t *conf) { struct e6060sw_softc *sc; sc = device_get_softc(dev); /* Set the VLAN mode. */ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) { sc->vlan_mode = ETHERSWITCH_VLAN_PORT; e6060sw_dot1q_mode(dev, DOT1QNONE); e6060sw_reset_vlans(dev); } else if ((sc->sw_model == E6063 || sc->sw_model == E6065) && conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; e6060sw_dot1q_mode(dev, DOT1QSECURE); e6060sw_init_vtu(dev); } else { sc->vlan_mode = 0; /* Reset VLANs. */ e6060sw_dot1q_mode(dev, DOT1QNONE); e6060sw_reset_vlans(dev); } } return (0); } static void e6060sw_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int e6060sw_ifmedia_upd(if_t ifp) { struct e6060sw_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6060sw_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void e6060sw_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct e6060sw_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = e6060sw_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int e6060sw_readphy(device_t dev, int phy, int reg) { struct e6060sw_softc *sc; int data; sc = device_get_softc(dev); E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); E6060SW_LOCK(sc); data = MDIO_READREG(device_get_parent(dev), phy, reg); E6060SW_UNLOCK(sc); return (data); } static int e6060sw_writephy(device_t dev, int phy, int reg, int data) { struct e6060sw_softc *sc; int err; sc = device_get_softc(dev); E6060SW_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); E6060SW_LOCK(sc); err = MDIO_WRITEREG(device_get_parent(dev), phy, reg, data); E6060SW_UNLOCK(sc); return (err); } /* addr is 5-8 bit is SMI Device Addres, 0-4 bit is SMI Register Address */ static int e6060sw_readreg(device_t dev, int addr) { int devaddr, regaddr; devaddr = (addr >> 5) & 0x1f; regaddr = addr & 0x1f; return MDIO_READREG(device_get_parent(dev), devaddr, regaddr); } /* addr is 5-8 bit is SMI Device Addres, 0-4 bit is SMI Register Address */ static int e6060sw_writereg(device_t dev, int addr, int value) { int devaddr, regaddr; devaddr = (addr >> 5) & 0x1f; regaddr = addr & 0x1f; return (MDIO_WRITEREG(device_get_parent(dev), devaddr, regaddr, value)); } static device_method_t e6060sw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, e6060sw_probe), DEVMETHOD(device_attach, e6060sw_attach), DEVMETHOD(device_detach, e6060sw_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, e6060sw_readphy), DEVMETHOD(miibus_writereg, e6060sw_writephy), DEVMETHOD(miibus_statchg, e6060sw_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, e6060sw_readphy), DEVMETHOD(mdio_writereg, e6060sw_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, e6060sw_lock), DEVMETHOD(etherswitch_unlock, e6060sw_unlock), DEVMETHOD(etherswitch_getinfo, e6060sw_getinfo), DEVMETHOD(etherswitch_readreg, e6060sw_readreg), DEVMETHOD(etherswitch_writereg, e6060sw_writereg), DEVMETHOD(etherswitch_readphyreg, e6060sw_readphy), DEVMETHOD(etherswitch_writephyreg, e6060sw_writephy), DEVMETHOD(etherswitch_getport, e6060sw_getport), DEVMETHOD(etherswitch_setport, e6060sw_setport), DEVMETHOD(etherswitch_getvgroup, e6060sw_getvgroup), DEVMETHOD(etherswitch_setvgroup, e6060sw_setvgroup), DEVMETHOD(etherswitch_setconf, e6060sw_setconf), DEVMETHOD(etherswitch_getconf, e6060sw_getconf), DEVMETHOD_END }; DEFINE_CLASS_0(e6060sw, e6060sw_driver, e6060sw_methods, sizeof(struct e6060sw_softc)); DRIVER_MODULE(e6060sw, mdio, e6060sw_driver, 0, 0); DRIVER_MODULE(miibus, e6060sw, miibus_driver, 0, 0); DRIVER_MODULE(mdio, e6060sw, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, e6060sw, etherswitch_driver, 0, 0); MODULE_VERSION(e6060sw, 1); MODULE_DEPEND(e6060sw, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(e6060sw, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/etherswitch/felix/felix.c b/sys/dev/etherswitch/felix/felix.c index 6e9bc29e5757..d5b40b3902f3 100644 --- a/sys/dev/etherswitch/felix/felix.c +++ b/sys/dev/etherswitch/felix/felix.c @@ -1,1006 +1,1006 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "etherswitch_if.h" #include "miibus_if.h" MALLOC_DECLARE(M_FELIX); MALLOC_DEFINE(M_FELIX, "felix", "felix switch"); static device_probe_t felix_probe; static device_attach_t felix_attach; static device_detach_t felix_detach; static etherswitch_info_t* felix_getinfo(device_t); static int felix_getconf(device_t, etherswitch_conf_t *); static int felix_setconf(device_t, etherswitch_conf_t *); static void felix_lock(device_t); static void felix_unlock(device_t); static int felix_getport(device_t, etherswitch_port_t *); static int felix_setport(device_t, etherswitch_port_t *); static int felix_readreg_wrapper(device_t, int); static int felix_writereg_wrapper(device_t, int, int); static int felix_readphy(device_t, int, int); static int felix_writephy(device_t, int, int, int); static int felix_setvgroup(device_t, etherswitch_vlangroup_t *); static int felix_getvgroup(device_t, etherswitch_vlangroup_t *); static int felix_parse_port_fdt(felix_softc_t, phandle_t, int *); static int felix_setup(felix_softc_t); static void felix_setup_port(felix_softc_t, int); static void felix_tick(void *); static int felix_ifmedia_upd(if_t); static void felix_ifmedia_sts(if_t, struct ifmediareq *); static void felix_get_port_cfg(felix_softc_t, etherswitch_port_t *); static void felix_set_port_cfg(felix_softc_t, etherswitch_port_t *); static bool felix_is_phyport(felix_softc_t, int); static struct mii_data *felix_miiforport(felix_softc_t, unsigned int); static struct felix_pci_id felix_pci_ids[] = { {PCI_VENDOR_FREESCALE, FELIX_DEV_ID, FELIX_DEV_NAME}, {0, 0, NULL} }; static device_method_t felix_methods[] = { /* device interface */ DEVMETHOD(device_probe, felix_probe), DEVMETHOD(device_attach, felix_attach), DEVMETHOD(device_detach, felix_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, felix_getinfo), DEVMETHOD(etherswitch_getconf, felix_getconf), DEVMETHOD(etherswitch_setconf, felix_setconf), DEVMETHOD(etherswitch_lock, felix_lock), DEVMETHOD(etherswitch_unlock, felix_unlock), DEVMETHOD(etherswitch_getport, felix_getport), DEVMETHOD(etherswitch_setport, felix_setport), DEVMETHOD(etherswitch_readreg, felix_readreg_wrapper), DEVMETHOD(etherswitch_writereg, felix_writereg_wrapper), DEVMETHOD(etherswitch_readphyreg, felix_readphy), DEVMETHOD(etherswitch_writephyreg, felix_writephy), DEVMETHOD(etherswitch_setvgroup, felix_setvgroup), DEVMETHOD(etherswitch_getvgroup, felix_getvgroup), /* miibus interface */ DEVMETHOD(miibus_readreg, felix_readphy), DEVMETHOD(miibus_writereg, felix_writephy), DEVMETHOD_END }; DEFINE_CLASS_0(felix, felix_driver, felix_methods, sizeof(struct felix_softc)); DRIVER_MODULE_ORDERED(felix, pci, felix_driver, NULL, NULL, SI_ORDER_ANY); DRIVER_MODULE(miibus, felix, miibus_fdt_driver, NULL, NULL); DRIVER_MODULE(etherswitch, felix, etherswitch_driver, NULL, NULL); MODULE_VERSION(felix, 1); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, felix, felix_pci_ids, nitems(felix_pci_ids) - 1); static int felix_probe(device_t dev) { struct felix_pci_id *id; felix_softc_t sc; sc = device_get_softc(dev); sc->dev = dev; for (id = felix_pci_ids; id->vendor != 0; ++id) { if (pci_get_device(dev) != id->device || pci_get_vendor(dev) != id->vendor) continue; device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int felix_parse_port_fdt(felix_softc_t sc, phandle_t child, int *pport) { uint32_t port, status; phandle_t node; if (OF_getencprop(child, "reg", (void *)&port, sizeof(port)) < 0) { device_printf(sc->dev, "Port node doesn't have reg property\n"); return (ENXIO); } *pport = port; node = OF_getproplen(child, "ethernet"); if (node <= 0) sc->ports[port].cpu_port = false; else sc->ports[port].cpu_port = true; node = ofw_bus_find_child(child, "fixed-link"); if (node <= 0) { sc->ports[port].fixed_port = false; return (0); } sc->ports[port].fixed_port = true; if (OF_getencprop(node, "speed", &status, sizeof(status)) <= 0) { device_printf(sc->dev, "Port has fixed-link node without link speed specified\n"); return (ENXIO); } switch (status) { case 2500: status = IFM_2500_T; break; case 1000: status = IFM_1000_T; break; case 100: status = IFM_100_T; break; case 10: status = IFM_10_T; break; default: device_printf(sc->dev, "Unsupported link speed value of %d\n", status); return (ENXIO); } if (OF_hasprop(node, "full-duplex")) status |= IFM_FDX; else status |= IFM_HDX; status |= IFM_ETHER; sc->ports[port].fixed_link_status = status; return (0); } static int felix_init_interface(felix_softc_t sc, int port) { char name[IFNAMSIZ]; snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev)); sc->ports[port].ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ports[port].ifp, sc); if_setflags(sc->ports[port].ifp, IFF_UP | IFF_BROADCAST | IFF_MULTICAST | IFF_DRV_RUNNING | IFF_SIMPLEX); sc->ports[port].ifname = malloc(strlen(name) + 1, M_FELIX, M_NOWAIT); if (sc->ports[port].ifname == NULL) { if_free(sc->ports[port].ifp); return (ENOMEM); } memcpy(sc->ports[port].ifname, name, strlen(name) + 1); if_initname(sc->ports[port].ifp, sc->ports[port].ifname, port); return (0); } static void felix_setup_port(felix_softc_t sc, int port) { /* Link speed has to be always set to 1000 in the clock register. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_CLK_CFG, FELIX_DEVGMII_CLK_CFG_SPEED_1000); FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_MAC_CFG, FELIX_DEVGMII_MAC_CFG_TX_ENA | FELIX_DEVGMII_MAC_CFG_RX_ENA); FELIX_WR4(sc, FELIX_QSYS_PORT_MODE(port), FELIX_QSYS_PORT_MODE_PORT_ENA); /* * Enable "VLANMTU". Each port has a configurable MTU. * Accept frames that are 8 and 4 bytes longer than it * for double and single tagged frames respectively. * Since etherswitch API doesn't provide an option to change * MTU don't touch it for now. */ FELIX_DEVGMII_PORT_WR4(sc, port, FELIX_DEVGMII_VLAN_CFG, FELIX_DEVGMII_VLAN_CFG_ENA | FELIX_DEVGMII_VLAN_CFG_LEN_ENA | FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA); } static int felix_setup(felix_softc_t sc) { int timeout, i; uint32_t reg; /* Trigger soft reset, bit is self-clearing, with 5s timeout. */ FELIX_WR4(sc, FELIX_DEVCPU_GCB_RST, FELIX_DEVCPU_GCB_RST_EN); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_DEVCPU_GCB_RST); if ((reg & FELIX_DEVCPU_GCB_RST_EN) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch to reset\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_RAM_CTRL, FELIX_SYS_RAM_CTRL_INIT); timeout = FELIX_INIT_TIMEOUT; do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_SYS_RAM_CTRL); if ((reg & FELIX_SYS_RAM_CTRL_INIT) == 0) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout while waiting for switch RAM init.\n"); return (ETIMEDOUT); } FELIX_WR4(sc, FELIX_SYS_CFG, FELIX_SYS_CFG_CORE_EN); for (i = 0; i < sc->info.es_nports; i++) felix_setup_port(sc, i); return (0); } static int felix_timer_rate(SYSCTL_HANDLER_ARGS) { felix_softc_t sc; int error, value, old; sc = arg1; old = value = sc->timer_ticks; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (value < 0) return (EINVAL); if (value == old) return (0); FELIX_LOCK(sc); sc->timer_ticks = value; callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); FELIX_UNLOCK(sc); return (0); } static int felix_attach(device_t dev) { phandle_t child, ports, node; int error, port, rid; felix_softc_t sc; uint32_t phy_addr; ssize_t size; sc = device_get_softc(dev); sc->info.es_nports = 0; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; strlcpy(sc->info.es_name, "Felix TSN Switch", sizeof(sc->info.es_name)); rid = PCIR_BAR(FELIX_BAR_MDIO); sc->mdio = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mdio == NULL) { device_printf(dev, "Failed to allocate MDIO registers.\n"); return (ENXIO); } rid = PCIR_BAR(FELIX_BAR_REGS); sc->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) { device_printf(dev, "Failed to allocate registers BAR.\n"); error = ENXIO; goto out_fail; } mtx_init(&sc->mtx, "felix lock", NULL, MTX_DEF); callout_init_mtx(&sc->tick_callout, &sc->mtx, 0); node = ofw_bus_get_node(dev); if (node <= 0) { error = ENXIO; goto out_fail; } ports = ofw_bus_find_child(node, "ports"); if (ports == 0) { device_printf(dev, "Failed to find \"ports\" property in DTS.\n"); error = ENXIO; goto out_fail; } for (child = OF_child(ports); child != 0; child = OF_peer(child)) { /* Do not parse disabled ports. */ if (ofw_bus_node_status_okay(child) == 0) continue; error = felix_parse_port_fdt(sc, child, &port); if (error != 0) goto out_fail; error = felix_init_interface(sc, port); if (error != 0) { device_printf(sc->dev, "Failed to initialize interface.\n"); goto out_fail; } if (sc->ports[port].fixed_port) { sc->info.es_nports++; continue; } size = OF_getencprop(child, "phy-handle", &node, sizeof(node)); if (size <= 0) { device_printf(sc->dev, "Failed to acquire PHY handle from FDT.\n"); error = ENXIO; goto out_fail; } node = OF_node_from_xref(node); size = OF_getencprop(node, "reg", &phy_addr, sizeof(phy_addr)); if (size <= 0) { device_printf(sc->dev, "Failed to obtain PHY address.\n"); error = ENXIO; goto out_fail; } sc->ports[port].phyaddr = phy_addr; sc->ports[port].miibus = NULL; error = mii_attach(dev, &sc->ports[port].miibus, sc->ports[port].ifp, felix_ifmedia_upd, felix_ifmedia_sts, BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, 0); if (error != 0) goto out_fail; sc->info.es_nports++; } error = felix_setup(sc); if (error != 0) goto out_fail; sc->timer_ticks = hz; /* Default to 1s. */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "timer_ticks", CTLTYPE_INT | CTLFLAG_RW, sc, 0, felix_timer_rate, "I", "Number of ticks between timer invocations"); /* The tick routine has to be called with the lock held. */ FELIX_LOCK(sc); felix_tick(sc); FELIX_UNLOCK(sc); /* Allow etherswitch to attach as our child. */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); out_fail: felix_detach(dev); return (error); } static int felix_detach(device_t dev) { felix_softc_t sc; int error; int i; error = 0; sc = device_get_softc(dev); bus_generic_detach(dev); mtx_lock(&sc->mtx); callout_stop(&sc->tick_callout); mtx_unlock(&sc->mtx); mtx_destroy(&sc->mtx); /* * If we have been fully attached do a soft reset. * This way after when driver is unloaded switch is left in unmanaged mode. */ if (device_is_attached(dev)) felix_setup(sc); for (i = 0; i < sc->info.es_nports; i++) { if (sc->ports[i].miibus != NULL) device_delete_child(dev, sc->ports[i].miibus); if (sc->ports[i].ifp != NULL) if_free(sc->ports[i].ifp); if (sc->ports[i].ifname != NULL) free(sc->ports[i].ifname, M_FELIX); } if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); if (sc->mdio != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->mdio), sc->mdio); return (error); } static etherswitch_info_t* felix_getinfo(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); return (&sc->info); } static int felix_getconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int felix_init_vlan(felix_softc_t sc) { int timeout = FELIX_INIT_TIMEOUT; uint32_t reg; int i; /* Flush VLAN table in hardware. */ FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_RESET); do { DELAY(1000); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) == FELIX_ANA_VT_IDLE) break; } while (timeout-- > 0); if (timeout == 0) { device_printf(sc->dev, "Timeout during VLAN table reset.\n"); return (ETIMEDOUT); } /* Flush VLAN table in sc. */ for (i = 0; i < sc->info.es_nvlangroups; i++) sc->vlans[i] = 0; /* * Make all ports VLAN aware. * Read VID from incoming frames and use it for port grouping * purposes. * Don't set this if pvid is set. */ for (i = 0; i < sc->info.es_nports; i++) { reg = FELIX_ANA_PORT_RD4(sc, i, FELIX_ANA_PORT_VLAN_CFG); if ((reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK) != 0) continue; reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, i, FELIX_ANA_PORT_VLAN_CFG, reg); } return (0); } static int felix_setconf(device_t dev, etherswitch_conf_t *conf) { felix_softc_t sc; int error; error = 0; /* Set the VLAN mode. */ sc = device_get_softc(dev); FELIX_LOCK(sc); if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { switch (conf->vlan_mode) { case ETHERSWITCH_VLAN_DOT1Q: sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; sc->info.es_nvlangroups = FELIX_NUM_VLANS; error = felix_init_vlan(sc); break; default: error = EINVAL; } } FELIX_UNLOCK(sc); return (error); } static void felix_lock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); FELIX_LOCK(sc); } static void felix_unlock(device_t dev) { felix_softc_t sc; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_OWNED); FELIX_UNLOCK(sc); } static void felix_get_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; p->es_flags = 0; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (reg & FELIX_ANA_PORT_DROP_CFG_TAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPTAGGED; if (reg & FELIX_ANA_PORT_DROP_CFG_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; reg = FELIX_DEVGMII_PORT_RD4(sc, p->es_port, FELIX_DEVGMII_VLAN_CFG); if (reg & FELIX_DEVGMII_VLAN_CFG_DOUBLE_ENA) p->es_flags |= ETHERSWITCH_PORT_DOUBLE_TAG; reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (reg & FELIX_REW_PORT_TAG_CFG_ALL) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (reg & FELIX_ANA_PORT_VLAN_CFG_POP) p->es_flags |= ETHERSWITCH_PORT_STRIPTAGINGRESS; p->es_pvid = reg & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; } static int felix_getport(device_t dev, etherswitch_port_t *p) { struct ifmediareq *ifmr; struct mii_data *mii; felix_softc_t sc; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_get_port_cfg(sc, p); if (sc->ports[p->es_port].fixed_port) { ifmr = &p->es_ifmr; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; ifmr->ifm_active = sc->ports[p->es_port].fixed_link_status; ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; } else { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static void felix_set_port_cfg(felix_softc_t sc, etherswitch_port_t *p) { uint32_t reg; reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG); if (p->es_flags & ETHERSWITCH_PORT_DROPTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_TAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_TAGGED; if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= FELIX_ANA_PORT_DROP_CFG_UNTAGGED; else reg &= ~FELIX_ANA_PORT_DROP_CFG_UNTAGGED; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_DROP_CFG, reg); reg = FELIX_REW_PORT_RD4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG); if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) reg |= FELIX_REW_PORT_TAG_CFG_ALL; else reg &= ~FELIX_REW_PORT_TAG_CFG_ALL; FELIX_REW_PORT_WR4(sc, p->es_port, FELIX_REW_PORT_TAG_CFG, reg); reg = FELIX_ANA_PORT_RD4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG); if (p->es_flags & ETHERSWITCH_PORT_STRIPTAGINGRESS) reg |= FELIX_ANA_PORT_VLAN_CFG_POP; else reg &= ~FELIX_ANA_PORT_VLAN_CFG_POP; reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_MASK; reg |= p->es_pvid & FELIX_ANA_PORT_VLAN_CFG_VID_MASK; /* * If port VID is set use it for VLAN classification, * instead of frame VID. * By default the frame tag takes precedence. * Force the switch to ignore it. */ if (p->es_pvid != 0) reg &= ~FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; else reg |= FELIX_ANA_PORT_VLAN_CFG_VID_AWARE; FELIX_ANA_PORT_WR4(sc, p->es_port, FELIX_ANA_PORT_VLAN_CFG, reg); } static int felix_setport(device_t dev, etherswitch_port_t *p) { felix_softc_t sc; struct mii_data *mii; int error; error = 0; sc = device_get_softc(dev); FELIX_LOCK_ASSERT(sc, MA_NOTOWNED); if (p->es_port >= sc->info.es_nports || p->es_port < 0) return (EINVAL); FELIX_LOCK(sc); felix_set_port_cfg(sc, p); if (felix_is_phyport(sc, p->es_port)) { mii = felix_miiforport(sc, p->es_port); error = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCSIFMEDIA); } FELIX_UNLOCK(sc); return (error); } static int felix_readreg_wrapper(device_t dev, int addr_reg) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (UINT32_MAX); /* Can't return errors here. */ return (FELIX_RD4(sc, addr_reg)); } static int felix_writereg_wrapper(device_t dev, int addr_reg, int val) { felix_softc_t sc; sc = device_get_softc(dev); if (addr_reg > rman_get_size(sc->regs)) return (EINVAL); FELIX_WR4(sc, addr_reg, val); return (0); } static int felix_readphy(device_t dev, int phy, int reg) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_read(sc->mdio, FELIX_MDIO_BASE, phy, reg)); } static int felix_writephy(device_t dev, int phy, int reg, int data) { felix_softc_t sc; sc = device_get_softc(dev); return (enetc_mdio_write(sc->mdio, FELIX_MDIO_BASE, phy, reg, data)); } static int felix_set_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int i, vid; vid = vg->es_vid & ETHERSWITCH_VID_MASK; /* Tagged mode is not supported. */ if (vg->es_member_ports != vg->es_untagged_ports) return (EINVAL); /* * Hardware support 4096 groups, but we can't do group_id == vid. * Note that hw_group_id == vid. */ if (vid == 0) { /* Clear VLAN table entry using old VID. */ FELIX_WR4(sc, FELIX_ANA_VTIDX, sc->vlans[vg->es_vlangroup]); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_WRITE); sc->vlans[vg->es_vlangroup] = 0; return (0); } /* The VID is already used in a different group. */ for (i = 0; i < sc->info.es_nvlangroups; i++) if (i != vg->es_vlangroup && vid == sc->vlans[i]) return (EINVAL); /* This group already uses a different VID. */ if (sc->vlans[vg->es_vlangroup] != 0 && sc->vlans[vg->es_vlangroup] != vid) return (EINVAL); sc->vlans[vg->es_vlangroup] = vid; /* Assign members to the given group. */ reg = vg->es_member_ports & FELIX_ANA_VT_PORTMASK_MASK; reg <<= FELIX_ANA_VT_PORTMASK_SHIFT; reg |= FELIX_ANA_VT_WRITE; FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, reg); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); return (0); } static int felix_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_set_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static int felix_get_dot1q_vlan(felix_softc_t sc, etherswitch_vlangroup_t *vg) { uint32_t reg; int vid; vid = sc->vlans[vg->es_vlangroup]; if (vid == 0) return (0); FELIX_WR4(sc, FELIX_ANA_VTIDX, vid); FELIX_WR4(sc, FELIX_ANA_VT, FELIX_ANA_VT_READ); /* * According to documentation read and write commands * are instant. * Add a small delay just to be safe. */ mb(); DELAY(100); reg = FELIX_RD4(sc, FELIX_ANA_VT); if ((reg & FELIX_ANA_VT_STS) != FELIX_ANA_VT_IDLE) return (ENXIO); reg >>= FELIX_ANA_VT_PORTMASK_SHIFT; reg &= FELIX_ANA_VT_PORTMASK_MASK; vg->es_untagged_ports = vg->es_member_ports = reg; vg->es_fid = 0; vg->es_vid = vid | ETHERSWITCH_VID_VALID; return (0); } static int felix_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { felix_softc_t sc; int error; sc = device_get_softc(dev); FELIX_LOCK(sc); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) error = felix_get_dot1q_vlan(sc, vg); else error = EINVAL; FELIX_UNLOCK(sc); return (error); } static void felix_tick(void *arg) { struct mii_data *mii; felix_softc_t sc; int port; sc = arg; FELIX_LOCK_ASSERT(sc, MA_OWNED); for (port = 0; port < sc->info.es_nports; port++) { if (!felix_is_phyport(sc, port)) continue; mii = felix_miiforport(sc, port); MPASS(mii != NULL); mii_tick(mii); } if (sc->timer_ticks != 0) callout_reset(&sc->tick_callout, sc->timer_ticks, felix_tick, sc); } static int felix_ifmedia_upd(if_t ifp) { struct mii_data *mii; felix_softc_t sc; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void felix_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { felix_softc_t sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = felix_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static bool felix_is_phyport(felix_softc_t sc, int port) { return (!sc->ports[port].fixed_port); } static struct mii_data* felix_miiforport(felix_softc_t sc, unsigned int port) { if (!felix_is_phyport(sc, port)) return (NULL); return (device_get_softc(sc->ports[port].miibus)); } diff --git a/sys/dev/etherswitch/infineon/adm6996fc.c b/sys/dev/etherswitch/infineon/adm6996fc.c index 64f61df93db1..f33219244ca1 100644 --- a/sys/dev/etherswitch/infineon/adm6996fc.c +++ b/sys/dev/etherswitch/infineon/adm6996fc.c @@ -1,840 +1,840 @@ /*- * Copyright (c) 2016 Hiroki Mori * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is Infineon ADM6996FC/M/MX driver code on etherswitch framework. * Support PORT and DOT1Q VLAN. * This code suppose ADM6996FC SDC/SDIO connect to SOC network interface * MDC/MDIO. * This code development on Netgear WGR614Cv7. * etherswitchcfg command port option support addtag. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" #define ADM6996FC_PRODUCT_CODE 0x7102 #define ADM6996FC_SC3 0x11 #define ADM6996FC_VF0L 0x40 #define ADM6996FC_VF0H 0x41 #define ADM6996FC_CI0 0xa0 #define ADM6996FC_CI1 0xa1 #define ADM6996FC_PHY_C0 0x200 #define ADM6996FC_PC_SHIFT 4 #define ADM6996FC_TBV_SHIFT 5 #define ADM6996FC_PVID_SHIFT 10 #define ADM6996FC_OPTE_SHIFT 4 #define ADM6996FC_VV_SHIFT 15 #define ADM6996FC_PHY_SIZE 0x20 MALLOC_DECLARE(M_ADM6996FC); MALLOC_DEFINE(M_ADM6996FC, "adm6996fc", "adm6996fc data structures"); struct adm6996fc_softc { struct mtx sc_mtx; /* serialize access to softc */ device_t sc_dev; int vlan_mode; int media; /* cpu port media */ int cpuport; /* which PHY is connected to the CPU */ int phymask; /* PHYs we manage */ int numports; /* number of ports */ int ifpport[MII_NPHY]; int *portphy; char **ifname; device_t **miibus; if_t *ifp; struct callout callout_tick; etherswitch_info_t info; }; #define ADM6996FC_LOCK(_sc) \ mtx_lock(&(_sc)->sc_mtx) #define ADM6996FC_UNLOCK(_sc) \ mtx_unlock(&(_sc)->sc_mtx) #define ADM6996FC_LOCK_ASSERT(_sc, _what) \ mtx_assert(&(_sc)->sc_mtx, (_what)) #define ADM6996FC_TRYLOCK(_sc) \ mtx_trylock(&(_sc)->sc_mtx) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #else #define DPRINTF(dev, args...) #endif static inline int adm6996fc_portforphy(struct adm6996fc_softc *, int); static void adm6996fc_tick(void *); static int adm6996fc_ifmedia_upd(if_t); static void adm6996fc_ifmedia_sts(if_t, struct ifmediareq *); #define ADM6996FC_READREG(dev, x) \ MDIO_READREG(dev, ((x) >> 5), ((x) & 0x1f)); #define ADM6996FC_WRITEREG(dev, x, v) \ MDIO_WRITEREG(dev, ((x) >> 5), ((x) & 0x1f), v); #define ADM6996FC_PVIDBYDATA(data1, data2) \ ((((data1) >> ADM6996FC_PVID_SHIFT) & 0x0f) | ((data2) << 4)) static int adm6996fc_probe(device_t dev) { int data1, data2; int pc; struct adm6996fc_softc *sc; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); data1 = ADM6996FC_READREG(device_get_parent(dev), ADM6996FC_CI0); data2 = ADM6996FC_READREG(device_get_parent(dev), ADM6996FC_CI1); pc = ((data2 << 16) | data1) >> ADM6996FC_PC_SHIFT; if (bootverbose) device_printf(dev,"Chip Identifier Register %x %x\n", data1, data2); /* check Product Code */ if (pc != ADM6996FC_PRODUCT_CODE) { return (ENXIO); } device_set_desc(dev, "Infineon ADM6996FC/M/MX MDIO switch driver"); return (BUS_PROBE_DEFAULT); } static int adm6996fc_attach_phys(struct adm6996fc_softc *sc) { int phy, port, err; char name[IFNAMSIZ]; port = 0; err = 0; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < sc->numports; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; sc->ifpport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); sc->ifp[port]->if_softc = sc; sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; if_initname(sc->ifp[port], name, port); sc->miibus[port] = malloc(sizeof(device_t), M_ADM6996FC, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], adm6996fc_ifmedia_upd, adm6996fc_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), sc->ifp[port]->if_xname); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); goto failed; } ++port; } sc->info.es_nports = port; if (sc->cpuport != -1) { /* assume cpuport is last one */ sc->ifpport[sc->cpuport] = port; sc->portphy[port] = sc->cpuport; ++sc->info.es_nports; } return (0); failed: for (phy = 0; phy < sc->numports; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; port = adm6996fc_portforphy(sc, phy); if (sc->miibus[port] != NULL) device_delete_child(sc->sc_dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); if (sc->ifname[port] != NULL) free(sc->ifname[port], M_ADM6996FC); if (sc->miibus[port] != NULL) free(sc->miibus[port], M_ADM6996FC); } return (err); } static int adm6996fc_attach(device_t dev) { struct adm6996fc_softc *sc; int err; err = 0; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "adm6996fc", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* ADM6996FC Defaults */ sc->numports = 6; sc->phymask = 0x1f; sc->cpuport = 5; sc->media = 100; sc->info.es_nvlangroups = 16; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q; sc->ifp = malloc(sizeof(if_t) * sc->numports, M_ADM6996FC, M_WAITOK | M_ZERO); sc->ifname = malloc(sizeof(char *) * sc->numports, M_ADM6996FC, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_ADM6996FC, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_ADM6996FC, M_WAITOK | M_ZERO); /* * Attach the PHYs and complete the bus enumeration. */ err = adm6996fc_attach_phys(sc); if (err != 0) goto failed; - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) goto failed; callout_init(&sc->callout_tick, 0); adm6996fc_tick(sc); return (0); failed: free(sc->portphy, M_ADM6996FC); free(sc->miibus, M_ADM6996FC); free(sc->ifname, M_ADM6996FC); free(sc->ifp, M_ADM6996FC); return (err); } static int adm6996fc_detach(device_t dev) { struct adm6996fc_softc *sc; int i, port; sc = device_get_softc(dev); callout_drain(&sc->callout_tick); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = adm6996fc_portforphy(sc, i); if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->ifname[port], M_ADM6996FC); free(sc->miibus[port], M_ADM6996FC); } free(sc->portphy, M_ADM6996FC); free(sc->miibus, M_ADM6996FC); free(sc->ifname, M_ADM6996FC); free(sc->ifp, M_ADM6996FC); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* * Convert PHY number to port number. */ static inline int adm6996fc_portforphy(struct adm6996fc_softc *sc, int phy) { return (sc->ifpport[phy]); } static inline struct mii_data * adm6996fc_miiforport(struct adm6996fc_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); if (port == sc->cpuport) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t adm6996fc_ifpforport(struct adm6996fc_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void adm6996fc_miipollstat(struct adm6996fc_softc *sc) { int i, port; struct mii_data *mii; struct mii_softc *miisc; ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = adm6996fc_portforphy(sc, i); if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void adm6996fc_tick(void *arg) { struct adm6996fc_softc *sc; sc = arg; adm6996fc_miipollstat(sc); callout_reset(&sc->callout_tick, hz, adm6996fc_tick, sc); } static void adm6996fc_lock(device_t dev) { struct adm6996fc_softc *sc; sc = device_get_softc(dev); ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED); ADM6996FC_LOCK(sc); } static void adm6996fc_unlock(device_t dev) { struct adm6996fc_softc *sc; sc = device_get_softc(dev); ADM6996FC_LOCK_ASSERT(sc, MA_OWNED); ADM6996FC_UNLOCK(sc); } static etherswitch_info_t * adm6996fc_getinfo(device_t dev) { struct adm6996fc_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int adm6996fc_getport(device_t dev, etherswitch_port_t *p) { struct adm6996fc_softc *sc; struct mii_data *mii; struct ifmediareq *ifmr; device_t parent; int err, phy; int data1, data2; int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09}; int vidaddr[6] = {0x28, 0x29, 0x2a, 0x2b, 0x2b, 0x2c}; sc = device_get_softc(dev); ifmr = &p->es_ifmr; if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); parent = device_get_parent(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { data1 = ADM6996FC_READREG(parent, bcaddr[p->es_port]); data2 = ADM6996FC_READREG(parent, vidaddr[p->es_port]); /* only port 4 is hi bit */ if (p->es_port == 4) data2 = (data2 >> 8) & 0xff; else data2 = data2 & 0xff; p->es_pvid = ADM6996FC_PVIDBYDATA(data1, data2); if (((data1 >> ADM6996FC_OPTE_SHIFT) & 0x01) == 1) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; } else { p->es_pvid = 0; } phy = sc->portphy[p->es_port]; mii = adm6996fc_miiforport(sc, p->es_port); if (sc->cpuport != -1 && phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } static int adm6996fc_setport(device_t dev, etherswitch_port_t *p) { struct adm6996fc_softc *sc; struct ifmedia *ifm; struct mii_data *mii; if_t ifp; device_t parent; int err; int data; int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09}; int vidaddr[6] = {0x28, 0x29, 0x2a, 0x2b, 0x2b, 0x2c}; sc = device_get_softc(dev); parent = device_get_parent(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { data = ADM6996FC_READREG(parent, bcaddr[p->es_port]); data &= ~(0xf << 10); data |= (p->es_pvid & 0xf) << ADM6996FC_PVID_SHIFT; if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) data |= 1 << ADM6996FC_OPTE_SHIFT; else data &= ~(1 << ADM6996FC_OPTE_SHIFT); ADM6996FC_WRITEREG(parent, bcaddr[p->es_port], data); data = ADM6996FC_READREG(parent, vidaddr[p->es_port]); /* only port 4 is hi bit */ if (p->es_port == 4) { data &= ~(0xff << 8); data = data | (((p->es_pvid >> 4) & 0xff) << 8); } else { data &= ~0xff; data = data | ((p->es_pvid >> 4) & 0xff); } ADM6996FC_WRITEREG(parent, vidaddr[p->es_port], data); err = 0; } else { if (sc->portphy[p->es_port] == sc->cpuport) return (ENXIO); } if (sc->portphy[p->es_port] != sc->cpuport) { mii = adm6996fc_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = adm6996fc_ifpforport(sc, p->es_port); ifm = &mii->mii_media; err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA); } return (err); } static int adm6996fc_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct adm6996fc_softc *sc; device_t parent; int datahi, datalo; sc = device_get_softc(dev); parent = device_get_parent(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { if (vg->es_vlangroup <= 5) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= vg->es_vlangroup; datalo = ADM6996FC_READREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup); datahi = ADM6996FC_READREG(parent, ADM6996FC_VF0H + 2 * vg->es_vlangroup); vg->es_member_ports = datalo & 0x3f; vg->es_untagged_ports = vg->es_member_ports; vg->es_fid = 0; } else { vg->es_vid = 0; } } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { datalo = ADM6996FC_READREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup); datahi = ADM6996FC_READREG(parent, ADM6996FC_VF0H + 2 * vg->es_vlangroup); if (datahi & (1 << ADM6996FC_VV_SHIFT)) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= datahi & 0xfff; vg->es_member_ports = datalo & 0x3f; vg->es_untagged_ports = (~datalo >> 6) & 0x3f; vg->es_fid = 0; } else { vg->es_fid = 0; } } else { vg->es_fid = 0; } return (0); } static int adm6996fc_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct adm6996fc_softc *sc; device_t parent; sc = device_get_softc(dev); parent = device_get_parent(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup, vg->es_member_ports); } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * vg->es_vlangroup, vg->es_member_ports | ((~vg->es_untagged_ports & 0x3f)<< 6)); ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * vg->es_vlangroup, (1 << ADM6996FC_VV_SHIFT) | vg->es_vid); } return (0); } static int adm6996fc_getconf(device_t dev, etherswitch_conf_t *conf) { struct adm6996fc_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int adm6996fc_setconf(device_t dev, etherswitch_conf_t *conf) { struct adm6996fc_softc *sc; device_t parent; int i; int data; int bcaddr[6] = {0x01, 0x03, 0x05, 0x07, 0x08, 0x09}; sc = device_get_softc(dev); parent = device_get_parent(dev); if ((conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) == 0) return (0); if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) { sc->vlan_mode = ETHERSWITCH_VLAN_PORT; data = ADM6996FC_READREG(parent, ADM6996FC_SC3); data &= ~(1 << ADM6996FC_TBV_SHIFT); ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data); for (i = 0;i <= 5; ++i) { data = ADM6996FC_READREG(parent, bcaddr[i]); data &= ~(0xf << 10); data |= (i << 10); ADM6996FC_WRITEREG(parent, bcaddr[i], data); ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2 * i, 0x003f); ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * i, (1 << ADM6996FC_VV_SHIFT) | 1); } } else if (conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; data = ADM6996FC_READREG(parent, ADM6996FC_SC3); data |= (1 << ADM6996FC_TBV_SHIFT); ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data); for (i = 0;i <= 5; ++i) { data = ADM6996FC_READREG(parent, bcaddr[i]); /* Private VID set 1 */ data &= ~(0xf << 10); data |= (1 << 10); ADM6996FC_WRITEREG(parent, bcaddr[i], data); } for (i = 2;i <= 15; ++i) { ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2 * i, 0x0000); } } else { /* ADM6996FC have no VLAN off. Then set Port base and add all port to member. Use VLAN Filter 1 is reset default. */ sc->vlan_mode = 0; data = ADM6996FC_READREG(parent, ADM6996FC_SC3); data &= ~(1 << ADM6996FC_TBV_SHIFT); ADM6996FC_WRITEREG(parent, ADM6996FC_SC3, data); for (i = 0;i <= 5; ++i) { data = ADM6996FC_READREG(parent, bcaddr[i]); data &= ~(0xf << 10); data |= (1 << 10); if (i == 5) data &= ~(1 << 4); ADM6996FC_WRITEREG(parent, bcaddr[i], data); } /* default setting */ ADM6996FC_WRITEREG(parent, ADM6996FC_VF0L + 2, 0x003f); ADM6996FC_WRITEREG(parent, ADM6996FC_VF0H + 2, (1 << ADM6996FC_VV_SHIFT) | 1); } return (0); } static void adm6996fc_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int adm6996fc_ifmedia_upd(if_t ifp) { struct adm6996fc_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = adm6996fc_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void adm6996fc_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct adm6996fc_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = adm6996fc_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int adm6996fc_readphy(device_t dev, int phy, int reg) { struct adm6996fc_softc *sc; int data; sc = device_get_softc(dev); ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); ADM6996FC_LOCK(sc); data = ADM6996FC_READREG(device_get_parent(dev), (ADM6996FC_PHY_C0 + ADM6996FC_PHY_SIZE * phy) + reg); ADM6996FC_UNLOCK(sc); return (data); } static int adm6996fc_writephy(device_t dev, int phy, int reg, int data) { struct adm6996fc_softc *sc; int err; sc = device_get_softc(dev); ADM6996FC_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); ADM6996FC_LOCK(sc); err = ADM6996FC_WRITEREG(device_get_parent(dev), (ADM6996FC_PHY_C0 + ADM6996FC_PHY_SIZE * phy) + reg, data); ADM6996FC_UNLOCK(sc); return (err); } static int adm6996fc_readreg(device_t dev, int addr) { return ADM6996FC_READREG(device_get_parent(dev), addr); } static int adm6996fc_writereg(device_t dev, int addr, int value) { int err; err = ADM6996FC_WRITEREG(device_get_parent(dev), addr, value); return (err); } static device_method_t adm6996fc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, adm6996fc_probe), DEVMETHOD(device_attach, adm6996fc_attach), DEVMETHOD(device_detach, adm6996fc_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, adm6996fc_readphy), DEVMETHOD(miibus_writereg, adm6996fc_writephy), DEVMETHOD(miibus_statchg, adm6996fc_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, adm6996fc_readphy), DEVMETHOD(mdio_writereg, adm6996fc_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, adm6996fc_lock), DEVMETHOD(etherswitch_unlock, adm6996fc_unlock), DEVMETHOD(etherswitch_getinfo, adm6996fc_getinfo), DEVMETHOD(etherswitch_readreg, adm6996fc_readreg), DEVMETHOD(etherswitch_writereg, adm6996fc_writereg), DEVMETHOD(etherswitch_readphyreg, adm6996fc_readphy), DEVMETHOD(etherswitch_writephyreg, adm6996fc_writephy), DEVMETHOD(etherswitch_getport, adm6996fc_getport), DEVMETHOD(etherswitch_setport, adm6996fc_setport), DEVMETHOD(etherswitch_getvgroup, adm6996fc_getvgroup), DEVMETHOD(etherswitch_setvgroup, adm6996fc_setvgroup), DEVMETHOD(etherswitch_setconf, adm6996fc_setconf), DEVMETHOD(etherswitch_getconf, adm6996fc_getconf), DEVMETHOD_END }; DEFINE_CLASS_0(adm6996fc, adm6996fc_driver, adm6996fc_methods, sizeof(struct adm6996fc_softc)); DRIVER_MODULE(adm6996fc, mdio, adm6996fc_driver, 0, 0); DRIVER_MODULE(miibus, adm6996fc, miibus_driver, 0, 0); DRIVER_MODULE(mdio, adm6996fc, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, adm6996fc, etherswitch_driver, 0, 0); MODULE_VERSION(adm6996fc, 1); MODULE_DEPEND(adm6996fc, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(adm6996fc, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/etherswitch/ip17x/ip17x.c b/sys/dev/etherswitch/ip17x/ip17x.c index 349dc59e09ae..c79ae069d2f0 100644 --- a/sys/dev/etherswitch/ip17x/ip17x.c +++ b/sys/dev/etherswitch/ip17x/ip17x.c @@ -1,653 +1,653 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" MALLOC_DECLARE(M_IP17X); MALLOC_DEFINE(M_IP17X, "ip17x", "ip17x data structures"); static void ip17x_tick(void *); static int ip17x_ifmedia_upd(if_t); static void ip17x_ifmedia_sts(if_t, struct ifmediareq *); static void ip17x_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "ip17x", -1) == NULL) BUS_ADD_CHILD(parent, 0, "ip17x", DEVICE_UNIT_ANY); } static int ip17x_probe(device_t dev) { struct ip17x_softc *sc; uint32_t oui, model, phy_id1, phy_id2; #ifdef FDT phandle_t ip17x_node; pcell_t cell; ip17x_node = fdt_find_compatible(OF_finddevice("/"), "icplus,ip17x", 0); if (ip17x_node == 0) return (ENXIO); #endif sc = device_get_softc(dev); /* Read ID from PHY 0. */ phy_id1 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR1); phy_id2 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR2); oui = MII_OUI(phy_id1, phy_id2); model = MII_MODEL(phy_id2); /* We only care about IC+ devices. */ if (oui != IP17X_OUI) { device_printf(dev, "Unsupported IC+ switch. Unknown OUI: %#x\n", oui); return (ENXIO); } switch (model) { case IP17X_IP175A: sc->sc_switchtype = IP17X_SWITCH_IP175A; break; case IP17X_IP175C: sc->sc_switchtype = IP17X_SWITCH_IP175C; break; default: device_printf(dev, "Unsupported IC+ switch model: %#x\n", model); return (ENXIO); } /* IP175D has a specific ID register. */ model = MDIO_READREG(device_get_parent(dev), IP175D_ID_PHY, IP175D_ID_REG); if (model == 0x175d) sc->sc_switchtype = IP17X_SWITCH_IP175D; else { /* IP178 has more PHYs. Try it. */ model = MDIO_READREG(device_get_parent(dev), 5, MII_PHYIDR1); if (phy_id1 == model) sc->sc_switchtype = IP17X_SWITCH_IP178C; } sc->miipoll = 1; #ifdef FDT if ((OF_getencprop(ip17x_node, "mii-poll", &cell, sizeof(cell))) > 0) sc->miipoll = cell ? 1 : 0; #else (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "mii-poll", &sc->miipoll); #endif device_set_desc(dev, "IC+ IP17x switch driver"); return (BUS_PROBE_DEFAULT); } static int ip17x_attach_phys(struct ip17x_softc *sc) { int err, phy, port; char name[IFNAMSIZ]; port = err = 0; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < MII_NPHY; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; sc->phyport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[port], sc); if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX); if_initname(sc->ifp[port], name, port); sc->miibus[port] = malloc(sizeof(device_t), M_IP17X, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], ip17x_ifmedia_upd, ip17x_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), if_name(sc->ifp[port])); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); break; } sc->info.es_nports = port + 1; if (++port >= sc->numports) break; } return (err); } static int ip17x_attach(device_t dev) { struct ip17x_softc *sc; int err; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ip17x", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* XXX Defaults */ sc->phymask = 0x0f; sc->media = 100; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phymask", &sc->phymask); /* Number of vlans supported by the switch. */ sc->info.es_nvlangroups = IP17X_MAX_VLANS; /* Attach the switch related functions. */ if (IP17X_IS_SWITCH(sc, IP175C)) ip175c_attach(sc); else if (IP17X_IS_SWITCH(sc, IP175D)) ip175d_attach(sc); else /* We don't have support to all the models yet :-/ */ return (ENXIO); /* Always attach the cpu port. */ sc->phymask |= (1 << sc->cpuport); sc->ifp = malloc(sizeof(if_t) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->pvid = malloc(sizeof(uint32_t) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_IP17X, M_WAITOK | M_ZERO); /* Initialize the switch. */ sc->hal.ip17x_reset(sc); /* * Attach the PHYs and complete the bus enumeration. */ err = ip17x_attach_phys(sc); if (err != 0) return (err); /* * Set the switch to port based vlans or disabled (if not supported * on this model). */ sc->hal.ip17x_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); if (sc->miipoll) { callout_init(&sc->callout_tick, 0); ip17x_tick(sc); } return (0); } static int ip17x_detach(device_t dev) { struct ip17x_softc *sc; int i, port; sc = device_get_softc(dev); if (sc->miipoll) callout_drain(&sc->callout_tick); for (i=0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = sc->phyport[i]; if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->miibus[port], M_IP17X); } free(sc->portphy, M_IP17X); free(sc->miibus, M_IP17X); free(sc->pvid, M_IP17X); free(sc->ifp, M_IP17X); /* Reset the switch. */ sc->hal.ip17x_reset(sc); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } static inline struct mii_data * ip17x_miiforport(struct ip17x_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t ip17x_ifpforport(struct ip17x_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void ip17x_miipollstat(struct ip17x_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int i, port; IP17X_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = sc->phyport[i]; if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void ip17x_tick(void *arg) { struct ip17x_softc *sc; sc = arg; ip17x_miipollstat(sc); callout_reset(&sc->callout_tick, hz, ip17x_tick, sc); } static void ip17x_lock(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_NOTOWNED); IP17X_LOCK(sc); } static void ip17x_unlock(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); IP17X_UNLOCK(sc); } static etherswitch_info_t * ip17x_getinfo(device_t dev) { struct ip17x_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int ip17x_getport(device_t dev, etherswitch_port_t *p) { struct ip17x_softc *sc; struct ifmediareq *ifmr; struct mii_data *mii; int err, phy; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); phy = sc->portphy[p->es_port]; /* Retrieve the PVID. */ p->es_pvid = sc->pvid[phy]; /* Port flags. */ if (sc->addtag & (1 << phy)) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; if (sc->striptag & (1 << phy)) p->es_flags |= ETHERSWITCH_PORT_STRIPTAG; ifmr = &p->es_ifmr; /* No media settings ? */ if (p->es_ifmr.ifm_count == 0) return (0); mii = ip17x_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); if (phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } return (0); } static int ip17x_setport(device_t dev, etherswitch_port_t *p) { struct ip17x_softc *sc; struct ifmedia *ifm; if_t ifp; struct mii_data *mii; int phy; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); phy = sc->portphy[p->es_port]; ifp = ip17x_ifpforport(sc, p->es_port); mii = ip17x_miiforport(sc, p->es_port); if (ifp == NULL || mii == NULL) return (ENXIO); /* Port flags. */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { /* Set the PVID. */ if (p->es_pvid != 0) { if (IP17X_IS_SWITCH(sc, IP175C) && p->es_pvid > IP175C_LAST_VLAN) return (ENXIO); sc->pvid[phy] = p->es_pvid; } /* Mutually exclusive. */ if (p->es_flags & ETHERSWITCH_PORT_ADDTAG && p->es_flags & ETHERSWITCH_PORT_STRIPTAG) return (EINVAL); /* Reset the settings for this port. */ sc->addtag &= ~(1 << phy); sc->striptag &= ~(1 << phy); /* And then set it to the new value. */ if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) sc->addtag |= (1 << phy); if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG) sc->striptag |= (1 << phy); } /* Update the switch configuration. */ if (sc->hal.ip17x_hw_setup(sc)) return (ENXIO); /* Do not allow media changes on CPU port. */ if (phy == sc->cpuport) return (0); /* No media settings ? */ if (p->es_ifmr.ifm_count == 0) return (0); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static void ip17x_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int ip17x_ifmedia_upd(if_t ifp) { struct ip17x_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); DPRINTF(sc->sc_dev, "%s\n", __func__); mii = ip17x_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void ip17x_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct ip17x_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); DPRINTF(sc->sc_dev, "%s\n", __func__); mii = ip17x_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int ip17x_readreg(device_t dev, int addr) { struct ip17x_softc *sc __diagused; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static int ip17x_writereg(device_t dev, int addr, int value) { struct ip17x_softc *sc __diagused; sc = device_get_softc(dev); IP17X_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static int ip17x_getconf(device_t dev, etherswitch_conf_t *conf) { struct ip17x_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->hal.ip17x_get_vlan_mode(sc); return (0); } static int ip17x_setconf(device_t dev, etherswitch_conf_t *conf) { struct ip17x_softc *sc; sc = device_get_softc(dev); /* Set the VLAN mode. */ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) sc->hal.ip17x_set_vlan_mode(sc, conf->vlan_mode); return (0); } static device_method_t ip17x_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ip17x_identify), DEVMETHOD(device_probe, ip17x_probe), DEVMETHOD(device_attach, ip17x_attach), DEVMETHOD(device_detach, ip17x_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ip17x_readphy), DEVMETHOD(miibus_writereg, ip17x_writephy), DEVMETHOD(miibus_statchg, ip17x_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, ip17x_readphy), DEVMETHOD(mdio_writereg, ip17x_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ip17x_lock), DEVMETHOD(etherswitch_unlock, ip17x_unlock), DEVMETHOD(etherswitch_getinfo, ip17x_getinfo), DEVMETHOD(etherswitch_readreg, ip17x_readreg), DEVMETHOD(etherswitch_writereg, ip17x_writereg), DEVMETHOD(etherswitch_readphyreg, ip17x_readphy), DEVMETHOD(etherswitch_writephyreg, ip17x_writephy), DEVMETHOD(etherswitch_getport, ip17x_getport), DEVMETHOD(etherswitch_setport, ip17x_setport), DEVMETHOD(etherswitch_getvgroup, ip17x_getvgroup), DEVMETHOD(etherswitch_setvgroup, ip17x_setvgroup), DEVMETHOD(etherswitch_getconf, ip17x_getconf), DEVMETHOD(etherswitch_setconf, ip17x_setconf), DEVMETHOD_END }; DEFINE_CLASS_0(ip17x, ip17x_driver, ip17x_methods, sizeof(struct ip17x_softc)); DRIVER_MODULE(ip17x, mdio, ip17x_driver, 0, 0); DRIVER_MODULE(miibus, ip17x, miibus_driver, 0, 0); DRIVER_MODULE(etherswitch, ip17x, etherswitch_driver, 0, 0); MODULE_VERSION(ip17x, 1); #ifdef FDT MODULE_DEPEND(ip17x, mdio, 1, 1, 1); /* XXX which versions? */ #else DRIVER_MODULE(mdio, ip17x, mdio_driver, 0, 0); MODULE_DEPEND(ip17x, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(ip17x, etherswitch, 1, 1, 1); /* XXX which versions? */ #endif diff --git a/sys/dev/etherswitch/micrel/ksz8995ma.c b/sys/dev/etherswitch/micrel/ksz8995ma.c index ccd7dbffa9e9..bb93a881c6ab 100644 --- a/sys/dev/etherswitch/micrel/ksz8995ma.c +++ b/sys/dev/etherswitch/micrel/ksz8995ma.c @@ -1,942 +1,942 @@ /*- * Copyright (c) 2016 Hiroki Mori * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is Micrel KSZ8995MA driver code. KSZ8995MA use SPI bus on control. * This code development on @SRCHACK's ksz8995ma board and FON2100 with * gpiospi. * etherswitchcfg command port option support addtag, ingress, striptag, * dropuntagged. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" #include "miibus_if.h" #include "etherswitch_if.h" #define KSZ8995MA_SPI_READ 0x03 #define KSZ8995MA_SPI_WRITE 0x02 #define KSZ8995MA_CID0 0x00 #define KSZ8995MA_CID1 0x01 #define KSZ8995MA_GC0 0x02 #define KSZ8995MA_GC1 0x03 #define KSZ8995MA_GC2 0x04 #define KSZ8995MA_GC3 0x05 #define KSZ8995MA_PORT_SIZE 0x10 #define KSZ8995MA_PC0_BASE 0x10 #define KSZ8995MA_PC1_BASE 0x11 #define KSZ8995MA_PC2_BASE 0x12 #define KSZ8995MA_PC3_BASE 0x13 #define KSZ8995MA_PC4_BASE 0x14 #define KSZ8995MA_PC5_BASE 0x15 #define KSZ8995MA_PC6_BASE 0x16 #define KSZ8995MA_PC7_BASE 0x17 #define KSZ8995MA_PC8_BASE 0x18 #define KSZ8995MA_PC9_BASE 0x19 #define KSZ8995MA_PC10_BASE 0x1a #define KSZ8995MA_PC11_BASE 0x1b #define KSZ8995MA_PC12_BASE 0x1c #define KSZ8995MA_PC13_BASE 0x1d #define KSZ8995MA_PS0_BASE 0x1e #define KSZ8995MA_PC14_BASE 0x1f #define KSZ8995MA_IAC0 0x6e #define KSZ8995MA_IAC1 0x6f #define KSZ8995MA_IDR8 0x70 #define KSZ8995MA_IDR7 0x71 #define KSZ8995MA_IDR6 0x72 #define KSZ8995MA_IDR5 0x73 #define KSZ8995MA_IDR4 0x74 #define KSZ8995MA_IDR3 0x75 #define KSZ8995MA_IDR2 0x76 #define KSZ8995MA_IDR1 0x77 #define KSZ8995MA_IDR0 0x78 #define KSZ8995MA_FAMILI_ID 0x95 #define KSZ8995MA_CHIP_ID 0x00 #define KSZ8995MA_CHIP_ID_MASK 0xf0 #define KSZ8995MA_START 0x01 #define KSZ8995MA_VLAN_ENABLE 0x80 #define KSZ8995MA_TAG_INS 0x04 #define KSZ8995MA_TAG_RM 0x02 #define KSZ8995MA_INGR_FILT 0x40 #define KSZ8995MA_DROP_NONPVID 0x20 #define KSZ8995MA_PDOWN 0x08 #define KSZ8995MA_STARTNEG 0x20 #define KSZ8995MA_MII_STAT 0x7808 #define KSZ8995MA_MII_PHYID_H 0x0022 #define KSZ8995MA_MII_PHYID_L 0x1450 #define KSZ8995MA_MII_AA 0x0401 #define KSZ8995MA_VLAN_TABLE_VALID 0x20 #define KSZ8995MA_VLAN_TABLE_READ 0x14 #define KSZ8995MA_VLAN_TABLE_WRITE 0x04 #define KSZ8995MA_MAX_PORT 5 MALLOC_DECLARE(M_KSZ8995MA); MALLOC_DEFINE(M_KSZ8995MA, "ksz8995ma", "ksz8995ma data structures"); struct ksz8995ma_softc { struct mtx sc_mtx; /* serialize access to softc */ device_t sc_dev; int vlan_mode; int media; /* cpu port media */ int cpuport; /* which PHY is connected to the CPU */ int phymask; /* PHYs we manage */ int numports; /* number of ports */ int ifpport[KSZ8995MA_MAX_PORT]; int *portphy; char **ifname; device_t **miibus; if_t *ifp; struct callout callout_tick; etherswitch_info_t info; }; #define KSZ8995MA_LOCK(_sc) \ mtx_lock(&(_sc)->sc_mtx) #define KSZ8995MA_UNLOCK(_sc) \ mtx_unlock(&(_sc)->sc_mtx) #define KSZ8995MA_LOCK_ASSERT(_sc, _what) \ mtx_assert(&(_sc)->sc_mtx, (_what)) #define KSZ8995MA_TRYLOCK(_sc) \ mtx_trylock(&(_sc)->sc_mtx) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #else #define DPRINTF(dev, args...) #endif static inline int ksz8995ma_portforphy(struct ksz8995ma_softc *, int); static void ksz8995ma_tick(void *); static int ksz8995ma_ifmedia_upd(if_t); static void ksz8995ma_ifmedia_sts(if_t, struct ifmediareq *); static int ksz8995ma_readreg(device_t dev, int addr); static int ksz8995ma_writereg(device_t dev, int addr, int value); static void ksz8995ma_portvlanreset(device_t dev); static int ksz8995ma_probe(device_t dev) { int id0, id1; struct ksz8995ma_softc *sc; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); id0 = ksz8995ma_readreg(dev, KSZ8995MA_CID0); id1 = ksz8995ma_readreg(dev, KSZ8995MA_CID1); if (bootverbose) device_printf(dev,"Chip Identifier Register %x %x\n", id0, id1); /* check Product Code */ if (id0 != KSZ8995MA_FAMILI_ID || (id1 & KSZ8995MA_CHIP_ID_MASK) != KSZ8995MA_CHIP_ID) { return (ENXIO); } device_set_desc(dev, "Micrel KSZ8995MA SPI switch driver"); return (BUS_PROBE_DEFAULT); } static int ksz8995ma_attach_phys(struct ksz8995ma_softc *sc) { int phy, port, err; char name[IFNAMSIZ]; port = 0; err = 0; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < sc->numports; phy++) { if (phy == sc->cpuport) continue; if (((1 << phy) & sc->phymask) == 0) continue; sc->ifpport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); sc->ifp[port]->if_softc = sc; sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; if_initname(sc->ifp[port], name, port); sc->miibus[port] = malloc(sizeof(device_t), M_KSZ8995MA, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], ksz8995ma_ifmedia_upd, ksz8995ma_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), sc->ifp[port]->if_xname); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); goto failed; } ++port; } sc->info.es_nports = port; if (sc->cpuport != -1) { /* cpu port is MAC5 on ksz8995ma */ sc->ifpport[sc->cpuport] = port; sc->portphy[port] = sc->cpuport; ++sc->info.es_nports; } return (0); failed: for (phy = 0; phy < sc->numports; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; port = ksz8995ma_portforphy(sc, phy); if (sc->miibus[port] != NULL) device_delete_child(sc->sc_dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); if (sc->ifname[port] != NULL) free(sc->ifname[port], M_KSZ8995MA); if (sc->miibus[port] != NULL) free(sc->miibus[port], M_KSZ8995MA); } return (err); } static int ksz8995ma_attach(device_t dev) { struct ksz8995ma_softc *sc; int err, reg; err = 0; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ksz8995ma", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* KSZ8995MA Defaults */ sc->numports = KSZ8995MA_MAX_PORT; sc->phymask = (1 << (KSZ8995MA_MAX_PORT + 1)) - 1; sc->cpuport = -1; sc->media = 100; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "cpuport", &sc->cpuport); sc->info.es_nvlangroups = 16; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOT1Q; sc->ifp = malloc(sizeof(if_t) * sc->numports, M_KSZ8995MA, M_WAITOK | M_ZERO); sc->ifname = malloc(sizeof(char *) * sc->numports, M_KSZ8995MA, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_KSZ8995MA, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_KSZ8995MA, M_WAITOK | M_ZERO); /* * Attach the PHYs and complete the bus enumeration. */ err = ksz8995ma_attach_phys(sc); if (err != 0) goto failed; - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) goto failed; callout_init(&sc->callout_tick, 0); ksz8995ma_tick(sc); /* start switch */ sc->vlan_mode = 0; reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3); ksz8995ma_writereg(dev, KSZ8995MA_GC3, reg & ~KSZ8995MA_VLAN_ENABLE); ksz8995ma_portvlanreset(dev); ksz8995ma_writereg(dev, KSZ8995MA_CID1, KSZ8995MA_START); return (0); failed: free(sc->portphy, M_KSZ8995MA); free(sc->miibus, M_KSZ8995MA); free(sc->ifname, M_KSZ8995MA); free(sc->ifp, M_KSZ8995MA); return (err); } static int ksz8995ma_detach(device_t dev) { struct ksz8995ma_softc *sc; int i, port; sc = device_get_softc(dev); callout_drain(&sc->callout_tick); for (i = 0; i < KSZ8995MA_MAX_PORT; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = ksz8995ma_portforphy(sc, i); if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->ifname[port], M_KSZ8995MA); free(sc->miibus[port], M_KSZ8995MA); } free(sc->portphy, M_KSZ8995MA); free(sc->miibus, M_KSZ8995MA); free(sc->ifname, M_KSZ8995MA); free(sc->ifp, M_KSZ8995MA); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* * Convert PHY number to port number. */ static inline int ksz8995ma_portforphy(struct ksz8995ma_softc *sc, int phy) { return (sc->ifpport[phy]); } static inline struct mii_data * ksz8995ma_miiforport(struct ksz8995ma_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); if (port == sc->cpuport) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t ksz8995ma_ifpforport(struct ksz8995ma_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void ksz8995ma_miipollstat(struct ksz8995ma_softc *sc) { int i, port; struct mii_data *mii; struct mii_softc *miisc; KSZ8995MA_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < KSZ8995MA_MAX_PORT; i++) { if (i == sc->cpuport) continue; if (((1 << i) & sc->phymask) == 0) continue; port = ksz8995ma_portforphy(sc, i); if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void ksz8995ma_tick(void *arg) { struct ksz8995ma_softc *sc; sc = arg; ksz8995ma_miipollstat(sc); callout_reset(&sc->callout_tick, hz, ksz8995ma_tick, sc); } static void ksz8995ma_lock(device_t dev) { struct ksz8995ma_softc *sc; sc = device_get_softc(dev); KSZ8995MA_LOCK_ASSERT(sc, MA_NOTOWNED); KSZ8995MA_LOCK(sc); } static void ksz8995ma_unlock(device_t dev) { struct ksz8995ma_softc *sc; sc = device_get_softc(dev); KSZ8995MA_LOCK_ASSERT(sc, MA_OWNED); KSZ8995MA_UNLOCK(sc); } static etherswitch_info_t * ksz8995ma_getinfo(device_t dev) { struct ksz8995ma_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int ksz8995ma_getport(device_t dev, etherswitch_port_t *p) { struct ksz8995ma_softc *sc; struct mii_data *mii; struct ifmediareq *ifmr; int phy, err; int tag1, tag2, portreg; sc = device_get_softc(dev); ifmr = &p->es_ifmr; if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { tag1 = ksz8995ma_readreg(dev, KSZ8995MA_PC3_BASE + KSZ8995MA_PORT_SIZE * p->es_port); tag2 = ksz8995ma_readreg(dev, KSZ8995MA_PC4_BASE + KSZ8995MA_PORT_SIZE * p->es_port); p->es_pvid = (tag1 & 0x0f) << 8 | tag2; portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC0_BASE + KSZ8995MA_PORT_SIZE * p->es_port); if (portreg & KSZ8995MA_TAG_INS) p->es_flags |= ETHERSWITCH_PORT_ADDTAG; if (portreg & KSZ8995MA_TAG_RM) p->es_flags |= ETHERSWITCH_PORT_STRIPTAG; portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC2_BASE + KSZ8995MA_PORT_SIZE * p->es_port); if (portreg & KSZ8995MA_DROP_NONPVID) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; if (portreg & KSZ8995MA_INGR_FILT) p->es_flags |= ETHERSWITCH_PORT_INGRESS; } phy = sc->portphy[p->es_port]; mii = ksz8995ma_miiforport(sc, p->es_port); if (sc->cpuport != -1 && phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } static int ksz8995ma_setport(device_t dev, etherswitch_port_t *p) { struct ksz8995ma_softc *sc; struct mii_data *mii; struct ifmedia *ifm; if_t ifp; int phy, err; int portreg; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { ksz8995ma_writereg(dev, KSZ8995MA_PC4_BASE + KSZ8995MA_PORT_SIZE * p->es_port, p->es_pvid & 0xff); portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC3_BASE + KSZ8995MA_PORT_SIZE * p->es_port); ksz8995ma_writereg(dev, KSZ8995MA_PC3_BASE + KSZ8995MA_PORT_SIZE * p->es_port, (portreg & 0xf0) | ((p->es_pvid >> 8) & 0x0f)); portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC0_BASE + KSZ8995MA_PORT_SIZE * p->es_port); if (p->es_flags & ETHERSWITCH_PORT_ADDTAG) portreg |= KSZ8995MA_TAG_INS; else portreg &= ~KSZ8995MA_TAG_INS; if (p->es_flags & ETHERSWITCH_PORT_STRIPTAG) portreg |= KSZ8995MA_TAG_RM; else portreg &= ~KSZ8995MA_TAG_RM; ksz8995ma_writereg(dev, KSZ8995MA_PC0_BASE + KSZ8995MA_PORT_SIZE * p->es_port, portreg); portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC2_BASE + KSZ8995MA_PORT_SIZE * p->es_port); if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) portreg |= KSZ8995MA_DROP_NONPVID; else portreg &= ~KSZ8995MA_DROP_NONPVID; if (p->es_flags & ETHERSWITCH_PORT_INGRESS) portreg |= KSZ8995MA_INGR_FILT; else portreg &= ~KSZ8995MA_INGR_FILT; ksz8995ma_writereg(dev, KSZ8995MA_PC2_BASE + KSZ8995MA_PORT_SIZE * p->es_port, portreg); } phy = sc->portphy[p->es_port]; mii = ksz8995ma_miiforport(sc, p->es_port); if (phy != sc->cpuport) { if (mii == NULL) return (ENXIO); ifp = ksz8995ma_ifpforport(sc, p->es_port); ifm = &mii->mii_media; err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA); } return (0); } static int ksz8995ma_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int data0, data1, data2; int vlantab; struct ksz8995ma_softc *sc; sc = device_get_softc(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { if (vg->es_vlangroup < sc->numports) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= vg->es_vlangroup; data0 = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE + KSZ8995MA_PORT_SIZE * vg->es_vlangroup); vg->es_member_ports = data0 & 0x1f; vg->es_untagged_ports = vg->es_member_ports; vg->es_fid = 0; } else { vg->es_vid = 0; } } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { ksz8995ma_writereg(dev, KSZ8995MA_IAC0, KSZ8995MA_VLAN_TABLE_READ); ksz8995ma_writereg(dev, KSZ8995MA_IAC1, vg->es_vlangroup); data2 = ksz8995ma_readreg(dev, KSZ8995MA_IDR2); data1 = ksz8995ma_readreg(dev, KSZ8995MA_IDR1); data0 = ksz8995ma_readreg(dev, KSZ8995MA_IDR0); vlantab = data2 << 16 | data1 << 8 | data0; if (data2 & KSZ8995MA_VLAN_TABLE_VALID) { vg->es_vid = ETHERSWITCH_VID_VALID; vg->es_vid |= vlantab & 0xfff; vg->es_member_ports = (vlantab >> 16) & 0x1f; vg->es_untagged_ports = vg->es_member_ports; vg->es_fid = (vlantab >> 12) & 0x0f; } else { vg->es_fid = 0; } } return (0); } static int ksz8995ma_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct ksz8995ma_softc *sc; int data0; sc = device_get_softc(dev); if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { data0 = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE + KSZ8995MA_PORT_SIZE * vg->es_vlangroup); ksz8995ma_writereg(dev, KSZ8995MA_PC1_BASE + KSZ8995MA_PORT_SIZE * vg->es_vlangroup, (data0 & 0xe0) | (vg->es_member_ports & 0x1f)); } else if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { if (vg->es_member_ports != 0) { ksz8995ma_writereg(dev, KSZ8995MA_IDR2, KSZ8995MA_VLAN_TABLE_VALID | (vg->es_member_ports & 0x1f)); ksz8995ma_writereg(dev, KSZ8995MA_IDR1, vg->es_fid << 4 | vg->es_vid >> 8); ksz8995ma_writereg(dev, KSZ8995MA_IDR0, vg->es_vid & 0xff); } else { ksz8995ma_writereg(dev, KSZ8995MA_IDR2, 0); ksz8995ma_writereg(dev, KSZ8995MA_IDR1, 0); ksz8995ma_writereg(dev, KSZ8995MA_IDR0, 0); } ksz8995ma_writereg(dev, KSZ8995MA_IAC0, KSZ8995MA_VLAN_TABLE_WRITE); ksz8995ma_writereg(dev, KSZ8995MA_IAC1, vg->es_vlangroup); } return (0); } static int ksz8995ma_getconf(device_t dev, etherswitch_conf_t *conf) { struct ksz8995ma_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static void ksz8995ma_portvlanreset(device_t dev) { int i, data; struct ksz8995ma_softc *sc; sc = device_get_softc(dev); for (i = 0; i < sc->numports; ++i) { data = ksz8995ma_readreg(dev, KSZ8995MA_PC1_BASE + KSZ8995MA_PORT_SIZE * i); ksz8995ma_writereg(dev, KSZ8995MA_PC1_BASE + KSZ8995MA_PORT_SIZE * i, (data & 0xe0) | 0x1f); } } static int ksz8995ma_setconf(device_t dev, etherswitch_conf_t *conf) { int reg; struct ksz8995ma_softc *sc; sc = device_get_softc(dev); if ((conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) == 0) return (0); if (conf->vlan_mode == ETHERSWITCH_VLAN_PORT) { sc->vlan_mode = ETHERSWITCH_VLAN_PORT; reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3); ksz8995ma_writereg(dev, KSZ8995MA_GC3, reg & ~KSZ8995MA_VLAN_ENABLE); ksz8995ma_portvlanreset(dev); } else if (conf->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { sc->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3); ksz8995ma_writereg(dev, KSZ8995MA_GC3, reg | KSZ8995MA_VLAN_ENABLE); } else { sc->vlan_mode = 0; reg = ksz8995ma_readreg(dev, KSZ8995MA_GC3); ksz8995ma_writereg(dev, KSZ8995MA_GC3, reg & ~KSZ8995MA_VLAN_ENABLE); ksz8995ma_portvlanreset(dev); } return (0); } static void ksz8995ma_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int ksz8995ma_ifmedia_upd(if_t ifp) { struct ksz8995ma_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = ksz8995ma_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void ksz8995ma_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct ksz8995ma_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = ksz8995ma_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int ksz8995ma_readphy(device_t dev, int phy, int reg) { int portreg; /* * This is no mdio/mdc connection code. * simulate MIIM Registers via the SPI interface */ if (reg == MII_BMSR) { portreg = ksz8995ma_readreg(dev, KSZ8995MA_PS0_BASE + KSZ8995MA_PORT_SIZE * phy); return (KSZ8995MA_MII_STAT | (portreg & 0x20 ? BMSR_LINK : 0x00) | (portreg & 0x40 ? BMSR_ACOMP : 0x00)); } else if (reg == MII_PHYIDR1) { return (KSZ8995MA_MII_PHYID_H); } else if (reg == MII_PHYIDR2) { return (KSZ8995MA_MII_PHYID_L); } else if (reg == MII_ANAR) { portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC12_BASE + KSZ8995MA_PORT_SIZE * phy); return (KSZ8995MA_MII_AA | (portreg & 0x0f) << 5); } else if (reg == MII_ANLPAR) { portreg = ksz8995ma_readreg(dev, KSZ8995MA_PS0_BASE + KSZ8995MA_PORT_SIZE * phy); return (((portreg & 0x0f) << 5) | 0x01); } return (0); } static int ksz8995ma_writephy(device_t dev, int phy, int reg, int data) { int portreg; /* * This is no mdio/mdc connection code. * simulate MIIM Registers via the SPI interface */ if (reg == MII_BMCR) { portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC13_BASE + KSZ8995MA_PORT_SIZE * phy); if (data & BMCR_PDOWN) portreg |= KSZ8995MA_PDOWN; else portreg &= ~KSZ8995MA_PDOWN; if (data & BMCR_STARTNEG) portreg |= KSZ8995MA_STARTNEG; else portreg &= ~KSZ8995MA_STARTNEG; ksz8995ma_writereg(dev, KSZ8995MA_PC13_BASE + KSZ8995MA_PORT_SIZE * phy, portreg); } else if (reg == MII_ANAR) { portreg = ksz8995ma_readreg(dev, KSZ8995MA_PC12_BASE + KSZ8995MA_PORT_SIZE * phy); portreg &= 0xf; portreg |= ((data >> 5) & 0x0f); ksz8995ma_writereg(dev, KSZ8995MA_PC12_BASE + KSZ8995MA_PORT_SIZE * phy, portreg); } return (0); } static int ksz8995ma_readreg(device_t dev, int addr) { uint8_t txBuf[8], rxBuf[8]; struct spi_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); memset(txBuf, 0, sizeof(txBuf)); memset(rxBuf, 0, sizeof(rxBuf)); /* read spi */ txBuf[0] = KSZ8995MA_SPI_READ; txBuf[1] = addr; cmd.tx_cmd = &txBuf; cmd.rx_cmd = &rxBuf; cmd.tx_cmd_sz = 3; cmd.rx_cmd_sz = 3; err = SPIBUS_TRANSFER(device_get_parent(dev), dev, &cmd); if (err) return(0); return (rxBuf[2]); } static int ksz8995ma_writereg(device_t dev, int addr, int value) { uint8_t txBuf[8], rxBuf[8]; struct spi_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); memset(txBuf, 0, sizeof(txBuf)); memset(rxBuf, 0, sizeof(rxBuf)); /* write spi */ txBuf[0] = KSZ8995MA_SPI_WRITE; txBuf[1] = addr; txBuf[2] = value; cmd.tx_cmd = &txBuf; cmd.rx_cmd = &rxBuf; cmd.tx_cmd_sz = 3; cmd.rx_cmd_sz = 3; err = SPIBUS_TRANSFER(device_get_parent(dev), dev, &cmd); if (err) return(0); return (0); } static device_method_t ksz8995ma_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ksz8995ma_probe), DEVMETHOD(device_attach, ksz8995ma_attach), DEVMETHOD(device_detach, ksz8995ma_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ksz8995ma_readphy), DEVMETHOD(miibus_writereg, ksz8995ma_writephy), DEVMETHOD(miibus_statchg, ksz8995ma_statchg), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ksz8995ma_lock), DEVMETHOD(etherswitch_unlock, ksz8995ma_unlock), DEVMETHOD(etherswitch_getinfo, ksz8995ma_getinfo), DEVMETHOD(etherswitch_readreg, ksz8995ma_readreg), DEVMETHOD(etherswitch_writereg, ksz8995ma_writereg), DEVMETHOD(etherswitch_readphyreg, ksz8995ma_readphy), DEVMETHOD(etherswitch_writephyreg, ksz8995ma_writephy), DEVMETHOD(etherswitch_getport, ksz8995ma_getport), DEVMETHOD(etherswitch_setport, ksz8995ma_setport), DEVMETHOD(etherswitch_getvgroup, ksz8995ma_getvgroup), DEVMETHOD(etherswitch_setvgroup, ksz8995ma_setvgroup), DEVMETHOD(etherswitch_setconf, ksz8995ma_setconf), DEVMETHOD(etherswitch_getconf, ksz8995ma_getconf), DEVMETHOD_END }; DEFINE_CLASS_0(ksz8995ma, ksz8995ma_driver, ksz8995ma_methods, sizeof(struct ksz8995ma_softc)); DRIVER_MODULE(ksz8995ma, spibus, ksz8995ma_driver, 0, 0); DRIVER_MODULE(miibus, ksz8995ma, miibus_driver, 0, 0); DRIVER_MODULE(etherswitch, ksz8995ma, etherswitch_driver, 0, 0); MODULE_VERSION(ksz8995ma, 1); MODULE_DEPEND(ksz8995ma, spibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(ksz8995ma, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(ksz8995ma, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/etherswitch/mtkswitch/mtkswitch.c b/sys/dev/etherswitch/mtkswitch/mtkswitch.c index fa96a81b40ef..837362982223 100644 --- a/sys/dev/etherswitch/mtkswitch/mtkswitch.c +++ b/sys/dev/etherswitch/mtkswitch/mtkswitch.c @@ -1,665 +1,665 @@ /*- * Copyright (c) 2016 Stanislav Galabov. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" #define DEBUG #if defined(DEBUG) static SYSCTL_NODE(_debug, OID_AUTO, mtkswitch, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "mtkswitch"); #endif static inline int mtkswitch_portforphy(int phy); static int mtkswitch_ifmedia_upd(if_t ifp); static void mtkswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr); static void mtkswitch_tick(void *arg); static const struct ofw_compat_data compat_data[] = { { "ralink,rt3050-esw", MTK_SWITCH_RT3050 }, { "ralink,rt3352-esw", MTK_SWITCH_RT3352 }, { "ralink,rt5350-esw", MTK_SWITCH_RT5350 }, { "mediatek,mt7620-gsw", MTK_SWITCH_MT7620 }, { "mediatek,mt7621-gsw", MTK_SWITCH_MT7621 }, { "mediatek,mt7628-esw", MTK_SWITCH_MT7628 }, /* Sentinel */ { NULL, MTK_SWITCH_NONE } }; static int mtkswitch_probe(device_t dev) { struct mtkswitch_softc *sc; mtk_switch_type switch_type; if (!ofw_bus_status_okay(dev)) return (ENXIO); switch_type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (switch_type == MTK_SWITCH_NONE) return (ENXIO); sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->sc_switchtype = switch_type; device_set_desc(dev, "MTK Switch Driver"); return (0); } static int mtkswitch_attach_phys(struct mtkswitch_softc *sc) { int phy, err = 0; char name[IFNAMSIZ]; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < sc->numphys; phy++) { if ((sc->phymap & (1u << phy)) == 0) { sc->ifp[phy] = NULL; sc->ifname[phy] = NULL; sc->miibus[phy] = NULL; continue; } sc->ifp[phy] = if_alloc(IFT_ETHER); sc->ifp[phy]->if_softc = sc; sc->ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX; sc->ifname[phy] = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK); bcopy(name, sc->ifname[phy], strlen(name) + 1); if_initname(sc->ifp[phy], sc->ifname[phy], mtkswitch_portforphy(phy)); err = mii_attach(sc->sc_dev, &sc->miibus[phy], sc->ifp[phy], mtkswitch_ifmedia_upd, mtkswitch_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); } else { DPRINTF(sc->sc_dev, "%s attached to pseudo interface " "%s\n", device_get_nameunit(sc->miibus[phy]), sc->ifp[phy]->if_xname); } } return (err); } static int mtkswitch_set_vlan_mode(struct mtkswitch_softc *sc, uint32_t mode) { /* Check for invalid modes. */ if ((mode & sc->info.es_vlan_caps) != mode) return (EINVAL); sc->vlan_mode = mode; /* Reset VLANs. */ sc->hal.mtkswitch_vlan_init_hw(sc); return (0); } static int mtkswitch_attach(device_t dev) { struct mtkswitch_softc *sc; int err = 0; int port, rid; sc = device_get_softc(dev); /* sc->sc_switchtype is already decided in mtkswitch_probe() */ sc->numports = MTKSWITCH_MAX_PORTS; sc->numphys = MTKSWITCH_MAX_PHYS; sc->cpuport = MTKSWITCH_CPU_PORT; sc->sc_dev = dev; /* Attach switch related functions */ if (sc->sc_switchtype == MTK_SWITCH_NONE) { device_printf(dev, "Unknown switch type\n"); return (ENXIO); } if (sc->sc_switchtype == MTK_SWITCH_MT7620 || sc->sc_switchtype == MTK_SWITCH_MT7621) mtk_attach_switch_mt7620(sc); else mtk_attach_switch_rt3050(sc); /* Allocate resources */ rid = 0; sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "could not map memory\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, "mtkswitch", NULL, MTX_DEF); /* Reset the switch */ if (sc->hal.mtkswitch_reset(sc)) { DPRINTF(dev, "%s: mtkswitch_reset: failed\n", __func__); return (ENXIO); } err = sc->hal.mtkswitch_hw_setup(sc); DPRINTF(dev, "%s: hw_setup: err=%d\n", __func__, err); if (err != 0) return (err); err = sc->hal.mtkswitch_hw_global_setup(sc); DPRINTF(dev, "%s: hw_global_setup: err=%d\n", __func__, err); if (err != 0) return (err); /* Initialize the switch ports */ for (port = 0; port < sc->numports; port++) { sc->hal.mtkswitch_port_init(sc, port); } /* Attach the PHYs and complete the bus enumeration */ err = mtkswitch_attach_phys(sc); DPRINTF(dev, "%s: attach_phys: err=%d\n", __func__, err); if (err != 0) return (err); /* Default to ingress filters off. */ err = mtkswitch_set_vlan_mode(sc, ETHERSWITCH_VLAN_DOT1Q); DPRINTF(dev, "%s: set_vlan_mode: err=%d\n", __func__, err); if (err != 0) return (err); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); DPRINTF(dev, "%s: bus_generic_attach: err=%d\n", __func__, err); if (err != 0) return (err); callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0); MTKSWITCH_LOCK(sc); mtkswitch_tick(sc); MTKSWITCH_UNLOCK(sc); return (0); } static int mtkswitch_detach(device_t dev) { struct mtkswitch_softc *sc = device_get_softc(dev); int phy; callout_drain(&sc->callout_tick); for (phy = 0; phy < MTKSWITCH_MAX_PHYS; phy++) { if (sc->miibus[phy] != NULL) device_delete_child(dev, sc->miibus[phy]); if (sc->ifp[phy] != NULL) if_free(sc->ifp[phy]); free(sc->ifname[phy], M_DEVBUF); } bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* PHY <-> port mapping is currently 1:1 */ static inline int mtkswitch_portforphy(int phy) { return (phy); } static inline int mtkswitch_phyforport(int port) { return (port); } static inline struct mii_data * mtkswitch_miiforport(struct mtkswitch_softc *sc, int port) { int phy = mtkswitch_phyforport(port); if (phy < 0 || phy >= MTKSWITCH_MAX_PHYS || sc->miibus[phy] == NULL) return (NULL); return (device_get_softc(sc->miibus[phy])); } static inline if_t mtkswitch_ifpforport(struct mtkswitch_softc *sc, int port) { int phy = mtkswitch_phyforport(port); if (phy < 0 || phy >= MTKSWITCH_MAX_PHYS) return (NULL); return (sc->ifp[phy]); } /* * Convert port status to ifmedia. */ static void mtkswitch_update_ifmedia(uint32_t portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & MTKSWITCH_LINK_UP) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & MTKSWITCH_SPEED_MASK) { case MTKSWITCH_SPEED_10: *media_active |= IFM_10_T; break; case MTKSWITCH_SPEED_100: *media_active |= IFM_100_TX; break; case MTKSWITCH_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & MTKSWITCH_DUPLEX) != 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; if ((portstatus & MTKSWITCH_TXFLOW) != 0) *media_active |= IFM_ETH_TXPAUSE; if ((portstatus & MTKSWITCH_RXFLOW) != 0) *media_active |= IFM_ETH_RXPAUSE; } static void mtkswitch_miipollstat(struct mtkswitch_softc *sc) { struct mii_data *mii; struct mii_softc *miisc; uint32_t portstatus; int i, port_flap = 0; MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); for (i = 0; i < sc->numphys; i++) { if (sc->miibus[i] == NULL) continue; mii = device_get_softc(sc->miibus[i]); portstatus = sc->hal.mtkswitch_get_port_status(sc, mtkswitch_portforphy(i)); /* If a port has flapped - mark it so we can flush the ATU */ if (((mii->mii_media_status & IFM_ACTIVE) == 0 && (portstatus & MTKSWITCH_LINK_UP) != 0) || ((mii->mii_media_status & IFM_ACTIVE) != 0 && (portstatus & MTKSWITCH_LINK_UP) == 0)) { port_flap = 1; } mtkswitch_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } if (port_flap) sc->hal.mtkswitch_atu_flush(sc); } static void mtkswitch_tick(void *arg) { struct mtkswitch_softc *sc = arg; mtkswitch_miipollstat(sc); callout_reset(&sc->callout_tick, hz, mtkswitch_tick, sc); } static void mtkswitch_lock(device_t dev) { struct mtkswitch_softc *sc = device_get_softc(dev); MTKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); MTKSWITCH_LOCK(sc); } static void mtkswitch_unlock(device_t dev) { struct mtkswitch_softc *sc = device_get_softc(dev); MTKSWITCH_LOCK_ASSERT(sc, MA_OWNED); MTKSWITCH_UNLOCK(sc); } static etherswitch_info_t * mtkswitch_getinfo(device_t dev) { struct mtkswitch_softc *sc = device_get_softc(dev); return (&sc->info); } static inline int mtkswitch_is_cpuport(struct mtkswitch_softc *sc, int port) { return (sc->cpuport == port); } static int mtkswitch_getport(device_t dev, etherswitch_port_t *p) { struct mtkswitch_softc *sc; struct mii_data *mii; struct ifmediareq *ifmr; int err; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); err = sc->hal.mtkswitch_port_vlan_get(sc, p); if (err != 0) return (err); mii = mtkswitch_miiforport(sc, p->es_port); if (mtkswitch_is_cpuport(sc, p->es_port)) { /* fill in fixed values for CPU port */ /* XXX is this valid in all cases? */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { ifmr = &p->es_ifmr; ifmr->ifm_count = 0; ifmr->ifm_current = ifmr->ifm_active = IFM_NONE; ifmr->ifm_mask = 0; ifmr->ifm_status = 0; } return (0); } static int mtkswitch_setport(device_t dev, etherswitch_port_t *p) { int err; struct mtkswitch_softc *sc; struct ifmedia *ifm; struct mii_data *mii; if_t ifp; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); /* Port flags. */ if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) { err = sc->hal.mtkswitch_port_vlan_setup(sc, p); if (err) return (err); } /* Do not allow media changes on CPU port. */ if (mtkswitch_is_cpuport(sc, p->es_port)) return (0); mii = mtkswitch_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = mtkswitch_ifpforport(sc, p->es_port); ifm = &mii->mii_media; return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static void mtkswitch_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int mtkswitch_ifmedia_upd(if_t ifp) { struct mtkswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = mtkswitch_miiforport(sc, if_getdunit(ifp)); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void mtkswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct mtkswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = mtkswitch_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int mtkswitch_getconf(device_t dev, etherswitch_conf_t *conf) { struct mtkswitch_softc *sc; sc = device_get_softc(dev); /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = sc->vlan_mode; return (0); } static int mtkswitch_setconf(device_t dev, etherswitch_conf_t *conf) { struct mtkswitch_softc *sc; int err; sc = device_get_softc(dev); /* Set the VLAN mode. */ if (conf->cmd & ETHERSWITCH_CONF_VLAN_MODE) { err = mtkswitch_set_vlan_mode(sc, conf->vlan_mode); if (err != 0) return (err); } return (0); } static int mtkswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *e) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_vlan_getvgroup(sc, e)); } static int mtkswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *e) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_vlan_setvgroup(sc, e)); } static int mtkswitch_readphy(device_t dev, int phy, int reg) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_phy_read(dev, phy, reg)); } static int mtkswitch_writephy(device_t dev, int phy, int reg, int val) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_phy_write(dev, phy, reg, val)); } static int mtkswitch_readreg(device_t dev, int addr) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_reg_read(dev, addr)); } static int mtkswitch_writereg(device_t dev, int addr, int value) { struct mtkswitch_softc *sc = device_get_softc(dev); return (sc->hal.mtkswitch_reg_write(dev, addr, value)); } static device_method_t mtkswitch_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mtkswitch_probe), DEVMETHOD(device_attach, mtkswitch_attach), DEVMETHOD(device_detach, mtkswitch_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, mtkswitch_readphy), DEVMETHOD(miibus_writereg, mtkswitch_writephy), DEVMETHOD(miibus_statchg, mtkswitch_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, mtkswitch_readphy), DEVMETHOD(mdio_writereg, mtkswitch_writephy), /* ehterswitch interface */ DEVMETHOD(etherswitch_lock, mtkswitch_lock), DEVMETHOD(etherswitch_unlock, mtkswitch_unlock), DEVMETHOD(etherswitch_getinfo, mtkswitch_getinfo), DEVMETHOD(etherswitch_readreg, mtkswitch_readreg), DEVMETHOD(etherswitch_writereg, mtkswitch_writereg), DEVMETHOD(etherswitch_readphyreg, mtkswitch_readphy), DEVMETHOD(etherswitch_writephyreg, mtkswitch_writephy), DEVMETHOD(etherswitch_getport, mtkswitch_getport), DEVMETHOD(etherswitch_setport, mtkswitch_setport), DEVMETHOD(etherswitch_getvgroup, mtkswitch_getvgroup), DEVMETHOD(etherswitch_setvgroup, mtkswitch_setvgroup), DEVMETHOD(etherswitch_getconf, mtkswitch_getconf), DEVMETHOD(etherswitch_setconf, mtkswitch_setconf), DEVMETHOD_END }; DEFINE_CLASS_0(mtkswitch, mtkswitch_driver, mtkswitch_methods, sizeof(struct mtkswitch_softc)); DRIVER_MODULE(mtkswitch, simplebus, mtkswitch_driver, 0, 0); DRIVER_MODULE(miibus, mtkswitch, miibus_driver, 0, 0); DRIVER_MODULE(mdio, mtkswitch, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, mtkswitch, etherswitch_driver, 0, 0); MODULE_VERSION(mtkswitch, 1); MODULE_DEPEND(mtkswitch, miibus, 1, 1, 1); MODULE_DEPEND(mtkswitch, etherswitch, 1, 1, 1); diff --git a/sys/dev/etherswitch/rtl8366/rtl8366rb.c b/sys/dev/etherswitch/rtl8366/rtl8366rb.c index e57b11f3270f..a2a847bff621 100644 --- a/sys/dev/etherswitch/rtl8366/rtl8366rb.c +++ b/sys/dev/etherswitch/rtl8366/rtl8366rb.c @@ -1,960 +1,960 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015-2016 Hiroki Mori. * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_etherswitch.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "iicbus_if.h" #include "miibus_if.h" #include "etherswitch_if.h" struct rtl8366rb_softc { struct mtx sc_mtx; /* serialize access to softc */ int smi_acquired; /* serialize access to SMI/I2C bus */ struct mtx callout_mtx; /* serialize callout */ device_t dev; int vid[RTL8366_NUM_VLANS]; char *ifname[RTL8366_NUM_PHYS]; device_t miibus[RTL8366_NUM_PHYS]; if_t ifp[RTL8366_NUM_PHYS]; struct callout callout_tick; etherswitch_info_t info; int chip_type; int phy4cpu; int numphys; }; #define RTL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define RTL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define RTL_LOCK_ASSERT(_sc, _what) mtx_assert(&(_s)c->sc_mtx, (_what)) #define RTL_TRYLOCK(_sc) mtx_trylock(&(_sc)->sc_mtx) #define RTL_WAITOK 0 #define RTL_NOWAIT 1 #define RTL_SMI_ACQUIRED 1 #define RTL_SMI_ACQUIRED_ASSERT(_sc) \ KASSERT((_sc)->smi_acquired == RTL_SMI_ACQUIRED, ("smi must be acquired @%s", __FUNCTION__)) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #define DEVERR(dev, err, fmt, args...) do { \ if (err != 0) device_printf(dev, fmt, err, args); \ } while (0) #define DEBUG_INCRVAR(var) do { \ var++; \ } while (0) static int callout_blocked = 0; static int iic_select_retries = 0; static int phy_access_retries = 0; static SYSCTL_NODE(_debug, OID_AUTO, rtl8366rb, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "rtl8366rb"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, callout_blocked, CTLFLAG_RW, &callout_blocked, 0, "number of times the callout couldn't acquire the bus"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, iic_select_retries, CTLFLAG_RW, &iic_select_retries, 0, "number of times the I2C bus selection had to be retried"); SYSCTL_INT(_debug_rtl8366rb, OID_AUTO, phy_access_retries, CTLFLAG_RW, &phy_access_retries, 0, "number of times PHY register access had to be retried"); #else #define DPRINTF(dev, args...) #define DEVERR(dev, err, fmt, args...) #define DEBUG_INCRVAR(var) #endif static int smi_probe(device_t dev); static int smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep); static int smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep); static int smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep); static void rtl8366rb_tick(void *arg); static int rtl8366rb_ifmedia_upd(if_t); static void rtl8366rb_ifmedia_sts(if_t, struct ifmediareq *); static void rtl8366rb_identify(driver_t *driver, device_t parent) { device_t child; struct iicbus_ivar *devi; if (device_find_child(parent, "rtl8366rb", -1) == NULL) { child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", DEVICE_UNIT_ANY); devi = IICBUS_IVAR(child); devi->addr = RTL8366_IIC_ADDR; } } static int rtl8366rb_probe(device_t dev) { struct rtl8366rb_softc *sc; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); if (smi_probe(dev) != 0) return (ENXIO); if (sc->chip_type == RTL8366RB) device_set_desc(dev, "RTL8366RB Ethernet Switch Controller"); else device_set_desc(dev, "RTL8366SR Ethernet Switch Controller"); return (BUS_PROBE_DEFAULT); } static void rtl8366rb_init(device_t dev) { struct rtl8366rb_softc *sc; int i; sc = device_get_softc(dev); /* Initialisation for TL-WR1043ND */ #ifdef RTL8366_SOFT_RESET smi_rmw(dev, RTL8366_RCR, RTL8366_RCR_SOFT_RESET, RTL8366_RCR_SOFT_RESET, RTL_WAITOK); #else smi_rmw(dev, RTL8366_RCR, RTL8366_RCR_HARD_RESET, RTL8366_RCR_HARD_RESET, RTL_WAITOK); #endif /* hard reset not return ack */ DELAY(100000); /* Enable 16 VLAN mode */ smi_rmw(dev, RTL8366_SGCR, RTL8366_SGCR_EN_VLAN | RTL8366_SGCR_EN_VLAN_4KTB, RTL8366_SGCR_EN_VLAN, RTL_WAITOK); /* Initialize our vlan table. */ for (i = 0; i <= 1; i++) sc->vid[i] = (i + 1) | ETHERSWITCH_VID_VALID; /* Remove port 0 from VLAN 1. */ smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 0), (1 << 0), 0, RTL_WAITOK); /* Add port 0 untagged and port 5 tagged to VLAN 2. */ smi_rmw(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, 1), ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT) | ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT), ((1 << 5 | 1 << 0) << RTL8366_VMCR_MU_MEMBER_SHIFT | ((1 << 0) << RTL8366_VMCR_MU_UNTAG_SHIFT)), RTL_WAITOK); /* Set PVID 2 for port 0. */ smi_rmw(dev, RTL8366_PVCR_REG(0), RTL8366_PVCR_VAL(0, RTL8366_PVCR_PORT_MASK), RTL8366_PVCR_VAL(0, 1), RTL_WAITOK); } static int rtl8366rb_attach(device_t dev) { struct rtl8366rb_softc *sc; uint16_t rev = 0; char name[IFNAMSIZ]; int err = 0; int i; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->sc_mtx, "rtl8366rb", NULL, MTX_DEF); sc->smi_acquired = 0; mtx_init(&sc->callout_mtx, "rtl8366rbcallout", NULL, MTX_DEF); rtl8366rb_init(dev); smi_read(dev, RTL8366_CVCR, &rev, RTL_WAITOK); device_printf(dev, "rev. %d\n", rev & 0x000f); sc->phy4cpu = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phy4cpu", &sc->phy4cpu); sc->numphys = sc->phy4cpu ? RTL8366_NUM_PHYS - 1 : RTL8366_NUM_PHYS; sc->info.es_nports = sc->numphys + 1; sc->info.es_nvlangroups = RTL8366_NUM_VLANS; sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q; if (sc->chip_type == RTL8366RB) sprintf(sc->info.es_name, "Realtek RTL8366RB"); else sprintf(sc->info.es_name, "Realtek RTL8366SR"); /* attach miibus and phys */ /* PHYs need an interface, so we generate a dummy one */ for (i = 0; i < sc->numphys; i++) { sc->ifp[i] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[i], sc); if_setflagbits(sc->ifp[i], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX, 0); snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(dev)); sc->ifname[i] = malloc(strlen(name)+1, M_DEVBUF, M_WAITOK); bcopy(name, sc->ifname[i], strlen(name)+1); if_initname(sc->ifp[i], sc->ifname[i], i); err = mii_attach(dev, &sc->miibus[i], sc->ifp[i], rtl8366rb_ifmedia_upd, \ rtl8366rb_ifmedia_sts, BMSR_DEFCAPMASK, \ i, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(dev, "attaching PHY %d failed\n", i); return (err); } } - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); callout_init_mtx(&sc->callout_tick, &sc->callout_mtx, 0); rtl8366rb_tick(sc); return (err); } static int rtl8366rb_detach(device_t dev) { struct rtl8366rb_softc *sc; int i; sc = device_get_softc(dev); for (i=0; i < sc->numphys; i++) { if (sc->miibus[i]) device_delete_child(dev, sc->miibus[i]); if (sc->ifp[i] != NULL) if_free(sc->ifp[i]); free(sc->ifname[i], M_DEVBUF); } bus_generic_detach(dev); callout_drain(&sc->callout_tick); mtx_destroy(&sc->callout_mtx); mtx_destroy(&sc->sc_mtx); return (0); } static void rtl8366rb_update_ifmedia(int portstatus, u_int *media_status, u_int *media_active) { *media_active = IFM_ETHER; *media_status = IFM_AVALID; if ((portstatus & RTL8366_PLSR_LINK) != 0) *media_status |= IFM_ACTIVE; else { *media_active |= IFM_NONE; return; } switch (portstatus & RTL8366_PLSR_SPEED_MASK) { case RTL8366_PLSR_SPEED_10: *media_active |= IFM_10_T; break; case RTL8366_PLSR_SPEED_100: *media_active |= IFM_100_TX; break; case RTL8366_PLSR_SPEED_1000: *media_active |= IFM_1000_T; break; } if ((portstatus & RTL8366_PLSR_FULLDUPLEX) != 0) *media_active |= IFM_FDX; else *media_active |= IFM_HDX; if ((portstatus & RTL8366_PLSR_TXPAUSE) != 0) *media_active |= IFM_ETH_TXPAUSE; if ((portstatus & RTL8366_PLSR_RXPAUSE) != 0) *media_active |= IFM_ETH_RXPAUSE; } static void rtl833rb_miipollstat(struct rtl8366rb_softc *sc) { int i; struct mii_data *mii; struct mii_softc *miisc; uint16_t value; int portstatus; for (i = 0; i < sc->numphys; i++) { mii = device_get_softc(sc->miibus[i]); if ((i % 2) == 0) { if (smi_read(sc->dev, RTL8366_PLSR_BASE + i/2, &value, RTL_NOWAIT) != 0) { DEBUG_INCRVAR(callout_blocked); return; } portstatus = value & 0xff; } else { portstatus = (value >> 8) & 0xff; } rtl8366rb_update_ifmedia(portstatus, &mii->mii_media_status, &mii->mii_media_active); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; mii_phy_update(miisc, MII_POLLSTAT); } } } static void rtl8366rb_tick(void *arg) { struct rtl8366rb_softc *sc; sc = arg; rtl833rb_miipollstat(sc); callout_reset(&sc->callout_tick, hz, rtl8366rb_tick, sc); } static int smi_probe(device_t dev) { struct rtl8366rb_softc *sc; device_t iicbus, iicha; int err, i, j; uint16_t chipid; char bytes[2]; int xferd; sc = device_get_softc(dev); iicbus = device_get_parent(dev); iicha = device_get_parent(iicbus); for (i = 0; i < 2; ++i) { iicbus_reset(iicbus, IIC_FASTEST, RTL8366_IIC_ADDR, NULL); for (j=3; j--; ) { IICBUS_STOP(iicha); /* * we go directly to the host adapter because iicbus.c * only issues a stop on a bus that was successfully started. */ } err = iicbus_request_bus(iicbus, dev, IIC_WAIT); if (err != 0) goto out; err = iicbus_start(iicbus, RTL8366_IIC_ADDR | RTL_IICBUS_READ, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; if (i == 0) { bytes[0] = RTL8366RB_CIR & 0xff; bytes[1] = (RTL8366RB_CIR >> 8) & 0xff; } else { bytes[0] = RTL8366SR_CIR & 0xff; bytes[1] = (RTL8366SR_CIR >> 8) & 0xff; } err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0); if (err != 0) goto out; chipid = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff); if (i == 0 && chipid == RTL8366RB_CIR_ID8366RB) { DPRINTF(dev, "chip id 0x%04x\n", chipid); sc->chip_type = RTL8366RB; err = 0; break; } if (i == 1 && chipid == RTL8366SR_CIR_ID8366SR) { DPRINTF(dev, "chip id 0x%04x\n", chipid); sc->chip_type = RTL8366SR; err = 0; break; } if (i == 0) { iicbus_stop(iicbus); iicbus_release_bus(iicbus, dev); } } if (i == 2) err = ENXIO; out: iicbus_stop(iicbus); iicbus_release_bus(iicbus, dev); return (err == 0 ? 0 : ENXIO); } static int smi_acquire(struct rtl8366rb_softc *sc, int sleep) { int r = 0; if (sleep == RTL_WAITOK) RTL_LOCK(sc); else if (RTL_TRYLOCK(sc) == 0) return (EWOULDBLOCK); if (sc->smi_acquired == RTL_SMI_ACQUIRED) r = EBUSY; else { r = iicbus_request_bus(device_get_parent(sc->dev), sc->dev, \ sleep == RTL_WAITOK ? IIC_WAIT : IIC_DONTWAIT); if (r == 0) sc->smi_acquired = RTL_SMI_ACQUIRED; } RTL_UNLOCK(sc); return (r); } static int smi_release(struct rtl8366rb_softc *sc, int sleep) { if (sleep == RTL_WAITOK) RTL_LOCK(sc); else if (RTL_TRYLOCK(sc) == 0) return (EWOULDBLOCK); RTL_SMI_ACQUIRED_ASSERT(sc); iicbus_release_bus(device_get_parent(sc->dev), sc->dev); sc->smi_acquired = 0; RTL_UNLOCK(sc); return (0); } static int smi_select(device_t dev, int op, int sleep) { struct rtl8366rb_softc *sc; int err, i; device_t iicbus; struct iicbus_ivar *devi; int slave; sc = device_get_softc(dev); iicbus = device_get_parent(dev); devi = IICBUS_IVAR(dev); slave = devi->addr; RTL_SMI_ACQUIRED_ASSERT((struct rtl8366rb_softc *)device_get_softc(dev)); if (sc->chip_type == RTL8366SR) { // RTL8366SR work around // this is same work around at probe for (int i=3; i--; ) IICBUS_STOP(device_get_parent(device_get_parent(dev))); } /* * The chip does not use clock stretching when it is busy, * instead ignoring the command. Retry a few times. */ for (i = RTL_IICBUS_RETRIES; i--; ) { err = iicbus_start(iicbus, slave | op, RTL_IICBUS_TIMEOUT); if (err != IIC_ENOACK) break; if (sleep == RTL_WAITOK) { DEBUG_INCRVAR(iic_select_retries); pause("smi_select", RTL_IICBUS_RETRY_SLEEP); } else break; } return (err); } static int smi_read_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t *data, int sleep) { int err; device_t iicbus; char bytes[2]; int xferd; iicbus = device_get_parent(sc->dev); RTL_SMI_ACQUIRED_ASSERT(sc); bytes[0] = addr & 0xff; bytes[1] = (addr >> 8) & 0xff; err = smi_select(sc->dev, RTL_IICBUS_READ, sleep); if (err != 0) goto out; err = iicbus_write(iicbus, bytes, 2, &xferd, RTL_IICBUS_TIMEOUT); if (err != 0) goto out; err = iicbus_read(iicbus, bytes, 2, &xferd, IIC_LAST_READ, 0); if (err != 0) goto out; *data = ((bytes[1] & 0xff) << 8) | (bytes[0] & 0xff); out: iicbus_stop(iicbus); return (err); } static int smi_write_locked(struct rtl8366rb_softc *sc, uint16_t addr, uint16_t data, int sleep) { int err; device_t iicbus; char bytes[4]; int xferd; iicbus = device_get_parent(sc->dev); RTL_SMI_ACQUIRED_ASSERT(sc); bytes[0] = addr & 0xff; bytes[1] = (addr >> 8) & 0xff; bytes[2] = data & 0xff; bytes[3] = (data >> 8) & 0xff; err = smi_select(sc->dev, RTL_IICBUS_WRITE, sleep); if (err == 0) err = iicbus_write(iicbus, bytes, 4, &xferd, RTL_IICBUS_TIMEOUT); iicbus_stop(iicbus); return (err); } static int smi_read(device_t dev, uint16_t addr, uint16_t *data, int sleep) { struct rtl8366rb_softc *sc; int err; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); err = smi_read_locked(sc, addr, data, sleep); smi_release(sc, sleep); DEVERR(dev, err, "smi_read()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static int smi_write(device_t dev, uint16_t addr, uint16_t data, int sleep) { struct rtl8366rb_softc *sc; int err; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); err = smi_write_locked(sc, addr, data, sleep); smi_release(sc, sleep); DEVERR(dev, err, "smi_write()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static int smi_rmw(device_t dev, uint16_t addr, uint16_t mask, uint16_t data, int sleep) { struct rtl8366rb_softc *sc; int err; uint16_t oldv, newv; sc = device_get_softc(dev); err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); if (err == 0) { err = smi_read_locked(sc, addr, &oldv, sleep); if (err == 0) { newv = oldv & ~mask; newv |= data & mask; if (newv != oldv) err = smi_write_locked(sc, addr, newv, sleep); } } smi_release(sc, sleep); DEVERR(dev, err, "smi_rmw()=%d: addr=%04x\n", addr); return (err == 0 ? 0 : EIO); } static etherswitch_info_t * rtl_getinfo(device_t dev) { struct rtl8366rb_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static int rtl_readreg(device_t dev, int reg) { uint16_t data; data = 0; smi_read(dev, reg, &data, RTL_WAITOK); return (data); } static int rtl_writereg(device_t dev, int reg, int value) { return (smi_write(dev, reg, value, RTL_WAITOK)); } static int rtl_getport(device_t dev, etherswitch_port_t *p) { struct rtl8366rb_softc *sc; struct ifmedia *ifm; struct mii_data *mii; struct ifmediareq *ifmr; uint16_t v; int err, vlangroup; sc = device_get_softc(dev); ifmr = &p->es_ifmr; if (p->es_port < 0 || p->es_port >= (sc->numphys + 1)) return (ENXIO); if (sc->phy4cpu && p->es_port == sc->numphys) { vlangroup = RTL8366_PVCR_GET(p->es_port + 1, rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port + 1))); } else { vlangroup = RTL8366_PVCR_GET(p->es_port, rtl_readreg(dev, RTL8366_PVCR_REG(p->es_port))); } p->es_pvid = sc->vid[vlangroup] & ETHERSWITCH_VID_MASK; if (p->es_port < sc->numphys) { mii = device_get_softc(sc->miibus[p->es_port]); ifm = &mii->mii_media; err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCGIFMEDIA); if (err) return (err); } else { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; smi_read(dev, RTL8366_PLSR_BASE + (RTL8366_NUM_PHYS)/2, &v, RTL_WAITOK); v = v >> (8 * ((RTL8366_NUM_PHYS) % 2)); rtl8366rb_update_ifmedia(v, &ifmr->ifm_status, &ifmr->ifm_active); ifmr->ifm_current = ifmr->ifm_active; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; /* Return our static media list. */ if (ifmr->ifm_count > 0) { ifmr->ifm_count = 1; ifmr->ifm_ulist[0] = IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, 0); } else ifmr->ifm_count = 0; } return (0); } static int rtl_setport(device_t dev, etherswitch_port_t *p) { struct rtl8366rb_softc *sc; int i, err, vlangroup; struct ifmedia *ifm; struct mii_data *mii; int port; sc = device_get_softc(dev); if (p->es_port < 0 || p->es_port >= (sc->numphys + 1)) return (ENXIO); vlangroup = -1; for (i = 0; i < RTL8366_NUM_VLANS; i++) { if ((sc->vid[i] & ETHERSWITCH_VID_MASK) == p->es_pvid) { vlangroup = i; break; } } if (vlangroup == -1) return (ENXIO); if (sc->phy4cpu && p->es_port == sc->numphys) { port = p->es_port + 1; } else { port = p->es_port; } err = smi_rmw(dev, RTL8366_PVCR_REG(port), RTL8366_PVCR_VAL(port, RTL8366_PVCR_PORT_MASK), RTL8366_PVCR_VAL(port, vlangroup), RTL_WAITOK); if (err) return (err); /* CPU Port */ if (p->es_port == sc->numphys) return (0); mii = device_get_softc(sc->miibus[p->es_port]); ifm = &mii->mii_media; err = ifmedia_ioctl(sc->ifp[p->es_port], &p->es_ifr, ifm, SIOCSIFMEDIA); return (err); } static int rtl_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct rtl8366rb_softc *sc; uint16_t vmcr[3]; int i; int member, untagged; sc = device_get_softc(dev); for (i=0; ies_vlangroup)); vg->es_vid = sc->vid[vg->es_vlangroup]; member = RTL8366_VMCR_MEMBER(vmcr); untagged = RTL8366_VMCR_UNTAG(vmcr); if (sc->phy4cpu) { vg->es_member_ports = ((member & 0x20) >> 1) | (member & 0x0f); vg->es_untagged_ports = ((untagged & 0x20) >> 1) | (untagged & 0x0f); } else { vg->es_member_ports = member; vg->es_untagged_ports = untagged; } vg->es_fid = RTL8366_VMCR_FID(vmcr); return (0); } static int rtl_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { struct rtl8366rb_softc *sc; int g; int member, untagged; sc = device_get_softc(dev); g = vg->es_vlangroup; sc->vid[g] = vg->es_vid; /* VLAN group disabled ? */ if (vg->es_member_ports == 0 && vg->es_untagged_ports == 0 && vg->es_vid == 0) return (0); sc->vid[g] |= ETHERSWITCH_VID_VALID; rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_DOT1Q_REG, g), (vg->es_vid << RTL8366_VMCR_DOT1Q_VID_SHIFT) & RTL8366_VMCR_DOT1Q_VID_MASK); if (sc->phy4cpu) { /* add space at phy4 */ member = (vg->es_member_ports & 0x0f) | ((vg->es_member_ports & 0x10) << 1); untagged = (vg->es_untagged_ports & 0x0f) | ((vg->es_untagged_ports & 0x10) << 1); } else { member = vg->es_member_ports; untagged = vg->es_untagged_ports; } if (sc->chip_type == RTL8366RB) { rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g), ((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) | ((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK)); rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_FID_REG, g), vg->es_fid); } else { rtl_writereg(dev, RTL8366_VMCR(RTL8366_VMCR_MU_REG, g), ((member << RTL8366_VMCR_MU_MEMBER_SHIFT) & RTL8366_VMCR_MU_MEMBER_MASK) | ((untagged << RTL8366_VMCR_MU_UNTAG_SHIFT) & RTL8366_VMCR_MU_UNTAG_MASK) | ((vg->es_fid << RTL8366_VMCR_FID_FID_SHIFT) & RTL8366_VMCR_FID_FID_MASK)); } return (0); } static int rtl_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int rtl_readphy(device_t dev, int phy, int reg) { struct rtl8366rb_softc *sc; uint16_t data; int err, i, sleep; sc = device_get_softc(dev); data = 0; if (phy < 0 || phy >= RTL8366_NUM_PHYS) return (ENXIO); if (reg < 0 || reg >= RTL8366_NUM_PHY_REG) return (ENXIO); sleep = RTL_WAITOK; err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); for (i = RTL_IICBUS_RETRIES; i--; ) { err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_READ, sleep); if (err == 0) err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), 0, sleep); if (err == 0) { err = smi_read_locked(sc, RTL8366_PADR, &data, sleep); break; } DEBUG_INCRVAR(phy_access_retries); DPRINTF(dev, "rtl_readphy(): chip not responsive, retrying %d more times\n", i); pause("rtl_readphy", RTL_IICBUS_RETRY_SLEEP); } smi_release(sc, sleep); DEVERR(dev, err, "rtl_readphy()=%d: phy=%d.%02x\n", phy, reg); return (data); } static int rtl_writephy(device_t dev, int phy, int reg, int data) { struct rtl8366rb_softc *sc; int err, i, sleep; sc = device_get_softc(dev); if (phy < 0 || phy >= RTL8366_NUM_PHYS) return (ENXIO); if (reg < 0 || reg >= RTL8366_NUM_PHY_REG) return (ENXIO); sleep = RTL_WAITOK; err = smi_acquire(sc, sleep); if (err != 0) return (EBUSY); for (i = RTL_IICBUS_RETRIES; i--; ) { err = smi_write_locked(sc, RTL8366_PACR, RTL8366_PACR_WRITE, sleep); if (err == 0) err = smi_write_locked(sc, RTL8366_PHYREG(phy, 0, reg), data, sleep); if (err == 0) { break; } DEBUG_INCRVAR(phy_access_retries); DPRINTF(dev, "rtl_writephy(): chip not responsive, retrying %d more tiems\n", i); pause("rtl_writephy", RTL_IICBUS_RETRY_SLEEP); } smi_release(sc, sleep); DEVERR(dev, err, "rtl_writephy()=%d: phy=%d.%02x\n", phy, reg); return (err == 0 ? 0 : EIO); } static int rtl8366rb_ifmedia_upd(if_t ifp) { struct rtl8366rb_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus[if_getdunit(ifp)]); mii_mediachg(mii); return (0); } static void rtl8366rb_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct rtl8366rb_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus[if_getdunit(ifp)]); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static device_method_t rtl8366rb_methods[] = { /* Device interface */ DEVMETHOD(device_identify, rtl8366rb_identify), DEVMETHOD(device_probe, rtl8366rb_probe), DEVMETHOD(device_attach, rtl8366rb_attach), DEVMETHOD(device_detach, rtl8366rb_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, rtl_readphy), DEVMETHOD(miibus_writereg, rtl_writephy), /* MDIO interface */ DEVMETHOD(mdio_readreg, rtl_readphy), DEVMETHOD(mdio_writereg, rtl_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_getconf, rtl_getconf), DEVMETHOD(etherswitch_getinfo, rtl_getinfo), DEVMETHOD(etherswitch_readreg, rtl_readreg), DEVMETHOD(etherswitch_writereg, rtl_writereg), DEVMETHOD(etherswitch_readphyreg, rtl_readphy), DEVMETHOD(etherswitch_writephyreg, rtl_writephy), DEVMETHOD(etherswitch_getport, rtl_getport), DEVMETHOD(etherswitch_setport, rtl_setport), DEVMETHOD(etherswitch_getvgroup, rtl_getvgroup), DEVMETHOD(etherswitch_setvgroup, rtl_setvgroup), DEVMETHOD_END }; DEFINE_CLASS_0(rtl8366rb, rtl8366rb_driver, rtl8366rb_methods, sizeof(struct rtl8366rb_softc)); DRIVER_MODULE(rtl8366rb, iicbus, rtl8366rb_driver, 0, 0); DRIVER_MODULE(miibus, rtl8366rb, miibus_driver, 0, 0); DRIVER_MODULE(mdio, rtl8366rb, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, rtl8366rb, etherswitch_driver, 0, 0); MODULE_VERSION(rtl8366rb, 1); MODULE_DEPEND(rtl8366rb, iicbus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(rtl8366rb, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(rtl8366rb, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/etherswitch/ukswitch/ukswitch.c b/sys/dev/etherswitch/ukswitch/ukswitch.c index f2cf6ef74e90..c0f67310196f 100644 --- a/sys/dev/etherswitch/ukswitch/ukswitch.c +++ b/sys/dev/etherswitch/ukswitch/ukswitch.c @@ -1,575 +1,575 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2013 Luiz Otavio O Souza. * Copyright (c) 2011-2012 Stefan Bethke. * Copyright (c) 2012 Adrian Chadd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mdio_if.h" #include "miibus_if.h" #include "etherswitch_if.h" MALLOC_DECLARE(M_UKSWITCH); MALLOC_DEFINE(M_UKSWITCH, "ukswitch", "ukswitch data structures"); struct ukswitch_softc { struct mtx sc_mtx; /* serialize access to softc */ device_t sc_dev; int media; /* cpu port media */ int cpuport; /* which PHY is connected to the CPU */ int phymask; /* PHYs we manage */ int phyoffset; /* PHYs register offset */ int numports; /* number of ports */ int ifpport[MII_NPHY]; int *portphy; char **ifname; device_t **miibus; if_t *ifp; struct callout callout_tick; etherswitch_info_t info; }; #define UKSWITCH_LOCK(_sc) \ mtx_lock(&(_sc)->sc_mtx) #define UKSWITCH_UNLOCK(_sc) \ mtx_unlock(&(_sc)->sc_mtx) #define UKSWITCH_LOCK_ASSERT(_sc, _what) \ mtx_assert(&(_sc)->sc_mtx, (_what)) #define UKSWITCH_TRYLOCK(_sc) \ mtx_trylock(&(_sc)->sc_mtx) #if defined(DEBUG) #define DPRINTF(dev, args...) device_printf(dev, args) #else #define DPRINTF(dev, args...) #endif static inline int ukswitch_portforphy(struct ukswitch_softc *, int); static void ukswitch_tick(void *); static int ukswitch_ifmedia_upd(if_t); static void ukswitch_ifmedia_sts(if_t, struct ifmediareq *); static int ukswitch_probe(device_t dev) { struct ukswitch_softc *sc; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); device_set_desc(dev, "Generic MDIO switch driver"); return (BUS_PROBE_DEFAULT); } static int ukswitch_attach_phys(struct ukswitch_softc *sc) { int phy, port = 0, err = 0; char name[IFNAMSIZ]; /* PHYs need an interface, so we generate a dummy one */ snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev)); for (phy = 0; phy < MII_NPHY; phy++) { if (((1 << phy) & sc->phymask) == 0) continue; sc->ifpport[phy] = port; sc->portphy[port] = phy; sc->ifp[port] = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp[port], sc); if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING | IFF_SIMPLEX); sc->ifname[port] = malloc(strlen(name)+1, M_UKSWITCH, M_WAITOK); bcopy(name, sc->ifname[port], strlen(name)+1); if_initname(sc->ifp[port], sc->ifname[port], port); sc->miibus[port] = malloc(sizeof(device_t), M_UKSWITCH, M_WAITOK | M_ZERO); err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port], ukswitch_ifmedia_upd, ukswitch_ifmedia_sts, \ BMSR_DEFCAPMASK, phy + sc->phyoffset, MII_OFFSET_ANY, 0); DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(*sc->miibus[port]), if_name(sc->ifp[port])); if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", phy); break; } sc->info.es_nports = port + 1; if (++port >= sc->numports) break; } return (err); } static int ukswitch_attach(device_t dev) { struct ukswitch_softc *sc; int err = 0; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "ukswitch", NULL, MTX_DEF); strlcpy(sc->info.es_name, device_get_desc(dev), sizeof(sc->info.es_name)); /* XXX Defaults */ sc->numports = 6; sc->phymask = 0x0f; sc->phyoffset = 0; sc->cpuport = -1; sc->media = 100; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "numports", &sc->numports); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phymask", &sc->phymask); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "phyoffset", &sc->phyoffset); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "cpuport", &sc->cpuport); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "media", &sc->media); /* Support only fast and giga ethernet. */ if (sc->media != 100 && sc->media != 1000) sc->media = 100; if (sc->cpuport != -1) /* Always attach the cpu port. */ sc->phymask |= (1 << sc->cpuport); /* We do not support any vlan groups. */ sc->info.es_nvlangroups = 0; sc->ifp = malloc(sizeof(if_t) * sc->numports, M_UKSWITCH, M_WAITOK | M_ZERO); sc->ifname = malloc(sizeof(char *) * sc->numports, M_UKSWITCH, M_WAITOK | M_ZERO); sc->miibus = malloc(sizeof(device_t *) * sc->numports, M_UKSWITCH, M_WAITOK | M_ZERO); sc->portphy = malloc(sizeof(int) * sc->numports, M_UKSWITCH, M_WAITOK | M_ZERO); /* * Attach the PHYs and complete the bus enumeration. */ err = ukswitch_attach_phys(sc); if (err != 0) return (err); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); if (err != 0) return (err); callout_init(&sc->callout_tick, 0); ukswitch_tick(sc); return (err); } static int ukswitch_detach(device_t dev) { struct ukswitch_softc *sc = device_get_softc(dev); int i, port; callout_drain(&sc->callout_tick); for (i=0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = ukswitch_portforphy(sc, i); if (sc->miibus[port] != NULL) device_delete_child(dev, (*sc->miibus[port])); if (sc->ifp[port] != NULL) if_free(sc->ifp[port]); free(sc->ifname[port], M_UKSWITCH); free(sc->miibus[port], M_UKSWITCH); } free(sc->portphy, M_UKSWITCH); free(sc->miibus, M_UKSWITCH); free(sc->ifname, M_UKSWITCH); free(sc->ifp, M_UKSWITCH); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return (0); } /* * Convert PHY number to port number. */ static inline int ukswitch_portforphy(struct ukswitch_softc *sc, int phy) { return (sc->ifpport[phy]); } static inline struct mii_data * ukswitch_miiforport(struct ukswitch_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (device_get_softc(*sc->miibus[port])); } static inline if_t ukswitch_ifpforport(struct ukswitch_softc *sc, int port) { if (port < 0 || port > sc->numports) return (NULL); return (sc->ifp[port]); } /* * Poll the status for all PHYs. */ static void ukswitch_miipollstat(struct ukswitch_softc *sc) { int i, port; struct mii_data *mii; struct mii_softc *miisc; UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); for (i = 0; i < MII_NPHY; i++) { if (((1 << i) & sc->phymask) == 0) continue; port = ukswitch_portforphy(sc, i); if ((*sc->miibus[port]) == NULL) continue; mii = device_get_softc(*sc->miibus[port]); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { if (IFM_INST(mii->mii_media.ifm_cur->ifm_media) != miisc->mii_inst) continue; ukphy_status(miisc); mii_phy_update(miisc, MII_POLLSTAT); } } } static void ukswitch_tick(void *arg) { struct ukswitch_softc *sc = arg; ukswitch_miipollstat(sc); callout_reset(&sc->callout_tick, hz, ukswitch_tick, sc); } static void ukswitch_lock(device_t dev) { struct ukswitch_softc *sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); UKSWITCH_LOCK(sc); } static void ukswitch_unlock(device_t dev) { struct ukswitch_softc *sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_OWNED); UKSWITCH_UNLOCK(sc); } static etherswitch_info_t * ukswitch_getinfo(device_t dev) { struct ukswitch_softc *sc = device_get_softc(dev); return (&sc->info); } static int ukswitch_getport(device_t dev, etherswitch_port_t *p) { struct ukswitch_softc *sc = device_get_softc(dev); struct mii_data *mii; struct ifmediareq *ifmr = &p->es_ifmr; int err, phy; if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); p->es_pvid = 0; phy = sc->portphy[p->es_port]; mii = ukswitch_miiforport(sc, p->es_port); if (sc->cpuport != -1 && phy == sc->cpuport) { /* fill in fixed values for CPU port */ p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr->ifm_count = 0; if (sc->media == 100) ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; } else if (mii != NULL) { err = ifmedia_ioctl(mii->mii_ifp, &p->es_ifr, &mii->mii_media, SIOCGIFMEDIA); if (err) return (err); } else { return (ENXIO); } return (0); } static int ukswitch_setport(device_t dev, etherswitch_port_t *p) { struct ukswitch_softc *sc = device_get_softc(dev); struct ifmedia *ifm; struct mii_data *mii; if_t ifp; int err; if (p->es_port < 0 || p->es_port >= sc->numports) return (ENXIO); if (sc->portphy[p->es_port] == sc->cpuport) return (ENXIO); mii = ukswitch_miiforport(sc, p->es_port); if (mii == NULL) return (ENXIO); ifp = ukswitch_ifpforport(sc, p->es_port); ifm = &mii->mii_media; err = ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA); return (err); } static int ukswitch_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { /* Not supported. */ vg->es_vid = 0; vg->es_member_ports = 0; vg->es_untagged_ports = 0; vg->es_fid = 0; return (0); } static int ukswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { /* Not supported. */ return (0); } static void ukswitch_statchg(device_t dev) { DPRINTF(dev, "%s\n", __func__); } static int ukswitch_ifmedia_upd(if_t ifp) { struct ukswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = ukswitch_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return (ENXIO); mii_mediachg(mii); return (0); } static void ukswitch_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct ukswitch_softc *sc = if_getsoftc(ifp); struct mii_data *mii = ukswitch_miiforport(sc, if_getdunit(ifp)); DPRINTF(sc->sc_dev, "%s\n", __func__); if (mii == NULL) return; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int ukswitch_readphy(device_t dev, int phy, int reg) { struct ukswitch_softc *sc; int data; sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); UKSWITCH_LOCK(sc); data = MDIO_READREG(device_get_parent(dev), phy, reg); UKSWITCH_UNLOCK(sc); return (data); } static int ukswitch_writephy(device_t dev, int phy, int reg, int data) { struct ukswitch_softc *sc; int err; sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); if (phy < 0 || phy >= 32) return (ENXIO); if (reg < 0 || reg >= 32) return (ENXIO); UKSWITCH_LOCK(sc); err = MDIO_WRITEREG(device_get_parent(dev), phy, reg, data); UKSWITCH_UNLOCK(sc); return (err); } static int ukswitch_readreg(device_t dev, int addr) { struct ukswitch_softc *sc __diagused; sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static int ukswitch_writereg(device_t dev, int addr, int value) { struct ukswitch_softc *sc __diagused; sc = device_get_softc(dev); UKSWITCH_LOCK_ASSERT(sc, MA_OWNED); /* Not supported. */ return (0); } static device_method_t ukswitch_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ukswitch_probe), DEVMETHOD(device_attach, ukswitch_attach), DEVMETHOD(device_detach, ukswitch_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MII interface */ DEVMETHOD(miibus_readreg, ukswitch_readphy), DEVMETHOD(miibus_writereg, ukswitch_writephy), DEVMETHOD(miibus_statchg, ukswitch_statchg), /* MDIO interface */ DEVMETHOD(mdio_readreg, ukswitch_readphy), DEVMETHOD(mdio_writereg, ukswitch_writephy), /* etherswitch interface */ DEVMETHOD(etherswitch_lock, ukswitch_lock), DEVMETHOD(etherswitch_unlock, ukswitch_unlock), DEVMETHOD(etherswitch_getinfo, ukswitch_getinfo), DEVMETHOD(etherswitch_readreg, ukswitch_readreg), DEVMETHOD(etherswitch_writereg, ukswitch_writereg), DEVMETHOD(etherswitch_readphyreg, ukswitch_readphy), DEVMETHOD(etherswitch_writephyreg, ukswitch_writephy), DEVMETHOD(etherswitch_getport, ukswitch_getport), DEVMETHOD(etherswitch_setport, ukswitch_setport), DEVMETHOD(etherswitch_getvgroup, ukswitch_getvgroup), DEVMETHOD(etherswitch_setvgroup, ukswitch_setvgroup), DEVMETHOD_END }; DEFINE_CLASS_0(ukswitch, ukswitch_driver, ukswitch_methods, sizeof(struct ukswitch_softc)); DRIVER_MODULE(ukswitch, mdio, ukswitch_driver, 0, 0); DRIVER_MODULE(miibus, ukswitch, miibus_driver, 0, 0); DRIVER_MODULE(mdio, ukswitch, mdio_driver, 0, 0); DRIVER_MODULE(etherswitch, ukswitch, etherswitch_driver, 0, 0); MODULE_VERSION(ukswitch, 1); MODULE_DEPEND(ukswitch, miibus, 1, 1, 1); /* XXX which versions? */ MODULE_DEPEND(ukswitch, etherswitch, 1, 1, 1); /* XXX which versions? */ diff --git a/sys/dev/firewire/firewire.c b/sys/dev/firewire/firewire.c index 93a414405c17..1346cb255580 100644 --- a/sys/dev/firewire/firewire.c +++ b/sys/dev/firewire/firewire.c @@ -1,2386 +1,2386 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. Shimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include /* used by smbus and newbus */ #include #include #include #include #include #include struct crom_src_buf { struct crom_src src; struct crom_chunk root; struct crom_chunk vendor; struct crom_chunk hw; }; int firewire_debug = 0, try_bmr = 1, hold_count = 0; SYSCTL_INT(_debug, OID_AUTO, firewire_debug, CTLFLAG_RW, &firewire_debug, 0, "FireWire driver debug flag"); SYSCTL_NODE(_hw, OID_AUTO, firewire, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "FireWire Subsystem"); SYSCTL_INT(_hw_firewire, OID_AUTO, try_bmr, CTLFLAG_RW, &try_bmr, 0, "Try to be a bus manager"); SYSCTL_INT(_hw_firewire, OID_AUTO, hold_count, CTLFLAG_RW, &hold_count, 0, "Number of count of bus resets for removing lost device information"); MALLOC_DEFINE(M_FW, "firewire", "FireWire"); MALLOC_DEFINE(M_FWXFER, "fw_xfer", "XFER/FireWire"); #define FW_MAXASYRTY 4 devclass_t firewire_devclass; static void firewire_identify(driver_t *, device_t); static int firewire_probe(device_t); static int firewire_attach(device_t); static int firewire_detach(device_t); static int firewire_resume(device_t); static void firewire_xfer_timeout(void *, int); static device_t firewire_add_child(device_t, u_int, const char *, int); static void fw_try_bmr(void *); static void fw_try_bmr_callback(struct fw_xfer *); static void fw_asystart(struct fw_xfer *); static int fw_get_tlabel(struct firewire_comm *, struct fw_xfer *); static void fw_bus_probe(void *); static void fw_attach_dev(struct firewire_comm *); static void fw_bus_probe_thread(void *); #ifdef FW_VMACCESS static void fw_vmaccess (struct fw_xfer *); #endif static int fw_bmr (struct firewire_comm *); static void fw_dump_hdr(struct fw_pkt *, char *); static device_method_t firewire_methods[] = { /* Device interface */ DEVMETHOD(device_identify, firewire_identify), DEVMETHOD(device_probe, firewire_probe), DEVMETHOD(device_attach, firewire_attach), DEVMETHOD(device_detach, firewire_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, firewire_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, firewire_add_child), DEVMETHOD_END }; char *linkspeed[] = { "S100", "S200", "S400", "S800", "S1600", "S3200", "undef", "undef" }; static char *tcode_str[] = { "WREQQ", "WREQB", "WRES", "undef", "RREQQ", "RREQB", "RRESQ", "RRESB", "CYCS", "LREQ", "STREAM", "LRES", "undef", "undef", "PHY", "undef" }; /* IEEE-1394a Table C-2 Gap count as a function of hops*/ #define MAX_GAPHOP 15 u_int gap_cnt[] = { 5, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40}; static driver_t firewire_driver = { "firewire", firewire_methods, sizeof(struct firewire_softc), }; /* * Lookup fwdev by node id. */ struct fw_device * fw_noderesolve_nodeid(struct firewire_comm *fc, int dst) { struct fw_device *fwdev; FW_GLOCK(fc); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->dst == dst && fwdev->status != FWDEVINVAL) break; FW_GUNLOCK(fc); return fwdev; } /* * Lookup fwdev by EUI64. */ struct fw_device * fw_noderesolve_eui64(struct firewire_comm *fc, struct fw_eui64 *eui) { struct fw_device *fwdev; FW_GLOCK(fc); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, *eui)) break; FW_GUNLOCK(fc); if (fwdev == NULL) return NULL; if (fwdev->status == FWDEVINVAL) return NULL; return fwdev; } /* * Async. request procedure for userland application. */ int fw_asyreq(struct firewire_comm *fc, int sub, struct fw_xfer *xfer) { int err = 0; struct fw_xferq *xferq; int len; struct fw_pkt *fp; int tcode; struct tcode_info *info; if (xfer == NULL) return EINVAL; if (xfer->hand == NULL) { printf("hand == NULL\n"); return EINVAL; } fp = &xfer->send.hdr; tcode = fp->mode.common.tcode & 0xf; info = &fc->tcode[tcode]; if (info->flag == 0) { printf("invalid tcode=%x\n", tcode); return EINVAL; } /* XXX allow bus explore packets only after bus rest */ if ((fc->status < FWBUSEXPLORE) && ((tcode != FWTCODE_RREQQ) || (fp->mode.rreqq.dest_hi != 0xffff) || (fp->mode.rreqq.dest_lo < 0xf0000000) || (fp->mode.rreqq.dest_lo >= 0xf0001000))) { xfer->resp = EAGAIN; xfer->flag = FWXF_BUSY; return (EAGAIN); } if (info->flag & FWTI_REQ) xferq = fc->atq; else xferq = fc->ats; len = info->hdr_len; if (xfer->send.pay_len > MAXREC(fc->maxrec)) { printf("send.pay_len > maxrec\n"); return EINVAL; } if (info->flag & FWTI_BLOCK_STR) len = fp->mode.stream.len; else if (info->flag & FWTI_BLOCK_ASY) len = fp->mode.rresb.len; else len = 0; if (len != xfer->send.pay_len) { printf("len(%d) != send.pay_len(%d) %s(%x)\n", len, xfer->send.pay_len, tcode_str[tcode], tcode); return EINVAL; } if (xferq->start == NULL) { printf("xferq->start == NULL\n"); return EINVAL; } if (!(xferq->queued < xferq->maxq)) { device_printf(fc->bdev, "Discard a packet (queued=%d)\n", xferq->queued); return EAGAIN; } xfer->tl = -1; if (info->flag & FWTI_TLABEL) { if (fw_get_tlabel(fc, xfer) < 0) return EAGAIN; } xfer->resp = 0; xfer->fc = fc; xfer->q = xferq; fw_asystart(xfer); return err; } /* * Wakeup blocked process. */ void fw_xferwake(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; mtx_lock(lock); xfer->flag |= FWXF_WAKE; mtx_unlock(lock); wakeup(xfer); return; } int fw_xferwait(struct fw_xfer *xfer) { struct mtx *lock = &xfer->fc->wait_lock; int err = 0; mtx_lock(lock); while ((xfer->flag & FWXF_WAKE) == 0) err = msleep(xfer, lock, PWAIT|PCATCH, "fw_xferwait", 0); mtx_unlock(lock); return (err); } /* * Async. request with given xfer structure. */ static void fw_asystart(struct fw_xfer *xfer) { struct firewire_comm *fc = xfer->fc; /* Protect from interrupt/timeout */ FW_GLOCK(fc); xfer->flag = FWXF_INQ; STAILQ_INSERT_TAIL(&xfer->q->q, xfer, link); #if 0 xfer->q->queued++; #endif FW_GUNLOCK(fc); /* XXX just queue for mbuf */ if (xfer->mbuf == NULL) xfer->q->start(fc); return; } static void firewire_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "firewire", DEVICE_UNIT_ANY); } static int firewire_probe(device_t dev) { device_set_desc(dev, "IEEE1394(FireWire) bus"); return (0); } /* Just use a per-packet callout? */ static void firewire_xfer_timeout(void *arg, int pending) { struct firewire_comm *fc = (struct firewire_comm *)arg; struct fw_xfer *xfer, *txfer; struct timeval tv; struct timeval split_timeout; STAILQ_HEAD(, fw_xfer) xfer_timeout; int i; split_timeout.tv_sec = 0; split_timeout.tv_usec = 200 * 1000; /* 200 msec */ microtime(&tv); timevalsub(&tv, &split_timeout); STAILQ_INIT(&xfer_timeout); mtx_lock(&fc->tlabel_lock); for (i = 0; i < nitems(fc->tlabels); i++) { while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if ((xfer->flag & FWXF_SENT) == 0) /* not sent yet */ break; if (timevalcmp(&xfer->tv, &tv, >)) /* the rests are newer than this */ break; device_printf(fc->bdev, "split transaction timeout: tl=0x%x flag=0x%02x\n", i, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); xfer->resp = ETIMEDOUT; xfer->tl = -1; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_timeout, xfer, tlabel); } } mtx_unlock(&fc->tlabel_lock); fc->timeout(fc); STAILQ_FOREACH_SAFE(xfer, &xfer_timeout, tlabel, txfer) xfer->hand(xfer); } #define WATCHDOG_HZ 10 static void firewire_watchdog(void *arg) { struct firewire_comm *fc; static int watchdog_clock = 0; fc = arg; /* * At boot stage, the device interrupt is disabled and * We encounter a timeout easily. To avoid this, * ignore clock interrupt for a while. */ if (watchdog_clock > WATCHDOG_HZ * 15) taskqueue_enqueue(fc->taskqueue, &fc->task_timeout); else watchdog_clock++; callout_reset(&fc->timeout_callout, hz / WATCHDOG_HZ, firewire_watchdog, fc); } /* * The attach routine. */ static int firewire_attach(device_t dev) { int unit; struct firewire_softc *sc = device_get_softc(dev); device_t pa = device_get_parent(dev); struct firewire_comm *fc; fc = device_get_softc(pa); sc->fc = fc; fc->status = FWBUSNOTREADY; unit = device_get_unit(dev); if (fc->nisodma > FWMAXNDMA) fc->nisodma = FWMAXNDMA; fwdev_makedev(sc); fc->crom_src_buf = malloc(sizeof(struct crom_src_buf), M_FW, M_NOWAIT | M_ZERO); if (fc->crom_src_buf == NULL) { device_printf(fc->dev, "%s: unable to allocate crom src buffer\n", __func__); return ENOMEM; } fc->topology_map = malloc(sizeof(struct fw_topology_map), M_FW, M_NOWAIT | M_ZERO); if (fc->topology_map == NULL) { device_printf(fc->dev, "%s: unable to allocate topology map\n", __func__); free(fc->crom_src_buf, M_FW); return ENOMEM; } fc->speed_map = malloc(sizeof(struct fw_speed_map), M_FW, M_NOWAIT | M_ZERO); if (fc->speed_map == NULL) { device_printf(fc->dev, "%s: unable to allocate speed map\n", __func__); free(fc->crom_src_buf, M_FW); free(fc->topology_map, M_FW); return ENOMEM; } mtx_init(&fc->wait_lock, "fwwait", NULL, MTX_DEF); mtx_init(&fc->tlabel_lock, "fwtlabel", NULL, MTX_DEF); CALLOUT_INIT(&fc->timeout_callout); CALLOUT_INIT(&fc->bmr_callout); CALLOUT_INIT(&fc->busprobe_callout); TASK_INIT(&fc->task_timeout, 0, firewire_xfer_timeout, fc); callout_reset(&sc->fc->timeout_callout, hz, firewire_watchdog, sc->fc); /* create thread */ kproc_create(fw_bus_probe_thread, fc, &fc->probe_thread, 0, 0, "fw%d_probe", unit); /* Locate our children */ - bus_generic_probe(dev); + bus_identify_children(dev); /* launch attachement of the added children */ bus_generic_attach(dev); /* bus_reset */ FW_GLOCK(fc); fw_busreset(fc, FWBUSNOTREADY); FW_GUNLOCK(fc); fc->ibr(fc); return 0; } /* * Attach it as child. */ static device_t firewire_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct firewire_softc *sc; sc = device_get_softc(dev); child = device_add_child(dev, name, unit); if (child) { device_set_ivars(child, sc->fc); device_probe_and_attach(child); } return child; } static int firewire_resume(device_t dev) { struct firewire_softc *sc; sc = device_get_softc(dev); sc->fc->status = FWBUSNOTREADY; bus_generic_resume(dev); return (0); } /* * Detach it. */ static int firewire_detach(device_t dev) { struct firewire_softc *sc; struct firewire_comm *fc; struct fw_device *fwdev, *fwdev_next; int err; sc = device_get_softc(dev); fc = sc->fc; mtx_lock(&fc->wait_lock); fc->status = FWBUSDETACH; wakeup(fc); if (msleep(fc->probe_thread, &fc->wait_lock, PWAIT, "fwthr", hz * 60)) printf("firewire probe thread didn't die\n"); mtx_unlock(&fc->wait_lock); if (fc->arq != 0 && fc->arq->maxq > 0) fw_drain_txq(fc); if ((err = fwdev_destroydev(sc)) != 0) return err; if ((err = bus_generic_detach(dev)) != 0) return err; callout_stop(&fc->timeout_callout); callout_stop(&fc->bmr_callout); callout_stop(&fc->busprobe_callout); /* XXX xfer_free and untimeout on all xfers */ for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = fwdev_next) { fwdev_next = STAILQ_NEXT(fwdev, link); free(fwdev, M_FW); } free(fc->topology_map, M_FW); free(fc->speed_map, M_FW); free(fc->crom_src_buf, M_FW); mtx_destroy(&fc->tlabel_lock); mtx_destroy(&fc->wait_lock); return (0); } static void fw_xferq_drain(struct fw_xferq *xferq) { struct fw_xfer *xfer; while ((xfer = STAILQ_FIRST(&xferq->q)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->q, link); #if 0 xferq->queued--; #endif xfer->resp = EAGAIN; xfer->flag = FWXF_SENTERR; fw_xfer_done(xfer); } } void fw_drain_txq(struct firewire_comm *fc) { struct fw_xfer *xfer, *txfer; STAILQ_HEAD(, fw_xfer) xfer_drain; int i; STAILQ_INIT(&xfer_drain); FW_GLOCK(fc); fw_xferq_drain(fc->atq); fw_xferq_drain(fc->ats); for (i = 0; i < fc->nisodma; i++) fw_xferq_drain(fc->it[i]); FW_GUNLOCK(fc); mtx_lock(&fc->tlabel_lock); for (i = 0; i < 0x40; i++) while ((xfer = STAILQ_FIRST(&fc->tlabels[i])) != NULL) { if (firewire_debug) printf("tl=%d flag=%d\n", i, xfer->flag); xfer->tl = -1; xfer->resp = EAGAIN; STAILQ_REMOVE_HEAD(&fc->tlabels[i], tlabel); STAILQ_INSERT_TAIL(&xfer_drain, xfer, tlabel); } mtx_unlock(&fc->tlabel_lock); STAILQ_FOREACH_SAFE(xfer, &xfer_drain, tlabel, txfer) xfer->hand(xfer); } static void fw_reset_csr(struct firewire_comm *fc) { int i; CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, NODE_IDS) = 0x3f; CSRARC(fc, TOPO_MAP + 8) = 0; fc->irm = -1; fc->max_node = -1; for (i = 2; i < 0x100 / 4 - 2; i++) { CSRARC(fc, SPED_MAP + i * 4) = 0; } CSRARC(fc, STATE_CLEAR) = 1 << 23 | 0 << 17 | 1 << 16 | 1 << 15 | 1 << 14; CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); CSRARC(fc, RESET_START) = 0; CSRARC(fc, SPLIT_TIMEOUT_HI) = 0; CSRARC(fc, SPLIT_TIMEOUT_LO) = 800 << 19; CSRARC(fc, CYCLE_TIME) = 0x0; CSRARC(fc, BUS_TIME) = 0x0; CSRARC(fc, BUS_MGR_ID) = 0x3f; CSRARC(fc, BANDWIDTH_AV) = 4915; CSRARC(fc, CHANNELS_AV_HI) = 0xffffffff; CSRARC(fc, CHANNELS_AV_LO) = 0xffffffff; CSRARC(fc, IP_CHANNELS) = (1U << 31); CSRARC(fc, CONF_ROM) = 0x04 << 24; CSRARC(fc, CONF_ROM + 4) = 0x31333934; /* means strings 1394 */ CSRARC(fc, CONF_ROM + 8) = 1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 0xff << 16 | 0x09 << 8; CSRARC(fc, CONF_ROM + 0xc) = 0; /* DV depend CSRs see blue book */ CSRARC(fc, oPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, iPCR) &= ~DV_BROADCAST_ON; CSRARC(fc, STATE_CLEAR) &= ~(1 << 23 | 1 << 15 | 1 << 14); CSRARC(fc, STATE_SET) = CSRARC(fc, STATE_CLEAR); } static void fw_init_crom(struct firewire_comm *fc) { struct crom_src *src; src = &fc->crom_src_buf->src; bzero(src, sizeof(struct crom_src)); /* BUS info sample */ src->hdr.info_len = 4; src->businfo.bus_name = CSR_BUS_NAME_IEEE1394; src->businfo.irmc = 1; src->businfo.cmc = 1; src->businfo.isc = 1; src->businfo.bmc = 1; src->businfo.pmc = 0; src->businfo.cyc_clk_acc = 100; src->businfo.max_rec = fc->maxrec; src->businfo.max_rom = MAXROM_4; #define FW_GENERATION_CHANGEABLE 2 src->businfo.generation = FW_GENERATION_CHANGEABLE; src->businfo.link_spd = fc->speed; src->businfo.eui64.hi = fc->eui.hi; src->businfo.eui64.lo = fc->eui.lo; STAILQ_INIT(&src->chunk_list); fc->crom_src = src; fc->crom_root = &fc->crom_src_buf->root; } static void fw_reset_crom(struct firewire_comm *fc) { struct crom_src_buf *buf; struct crom_src *src; struct crom_chunk *root; buf = fc->crom_src_buf; src = fc->crom_src; root = fc->crom_root; STAILQ_INIT(&src->chunk_list); bzero(root, sizeof(struct crom_chunk)); crom_add_chunk(src, NULL, root, 0); crom_add_entry(root, CSRKEY_NCAP, 0x0083c0); /* XXX */ /* private company_id */ crom_add_entry(root, CSRKEY_VENDOR, CSRVAL_VENDOR_PRIVATE); crom_add_simple_text(src, root, &buf->vendor, "FreeBSD Project"); crom_add_entry(root, CSRKEY_HW, __FreeBSD_version); mtx_lock(&prison0.pr_mtx); crom_add_simple_text(src, root, &buf->hw, prison0.pr_hostname); mtx_unlock(&prison0.pr_mtx); } /* * Called after bus reset. */ void fw_busreset(struct firewire_comm *fc, uint32_t new_status) { struct firewire_dev_comm *fdc; struct crom_src *src; device_t *devlistp; uint32_t *newrom; int i, devcnt; FW_GLOCK_ASSERT(fc); if (fc->status == FWBUSMGRELECT) callout_stop(&fc->bmr_callout); fc->status = new_status; fw_reset_csr(fc); if (fc->status == FWBUSNOTREADY) fw_init_crom(fc); fw_reset_crom(fc); if (device_get_children(fc->bdev, &devlistp, &devcnt) == 0) { for (i = 0; i < devcnt; i++) if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_busreset != NULL) fdc->post_busreset(fdc); } free(devlistp, M_TEMP); } src = &fc->crom_src_buf->src; /* * If the old config rom needs to be overwritten, * bump the businfo.generation indicator to * indicate that we need to be reprobed * See 1394a-2000 8.3.2.5.4 for more details. * generation starts at 2 and rolls over at 0xF * back to 2. * * A generation of 0 indicates a device * that is not 1394a-2000 compliant. * A generation of 1 indicates a device that * does not change it's Bus Info Block or * Configuration ROM. */ #define FW_MAX_GENERATION 0xF newrom = malloc(CROMSIZE, M_FW, M_NOWAIT | M_ZERO); src = &fc->crom_src_buf->src; crom_load(src, newrom, CROMSIZE); if (bcmp(newrom, fc->config_rom, CROMSIZE) != 0) { /* Bump generation and reload. */ src->businfo.generation++; /* Handle generation count wraps. */ if (src->businfo.generation < FW_GENERATION_CHANGEABLE) src->businfo.generation = FW_GENERATION_CHANGEABLE; /* Recalculate CRC to account for generation change. */ crom_load(src, newrom, CROMSIZE); bcopy(newrom, fc->config_rom, CROMSIZE); } free(newrom, M_FW); } /* Call once after reboot */ void fw_init(struct firewire_comm *fc) { int i; #ifdef FW_VMACCESS struct fw_xfer *xfer; struct fw_bind *fwb; #endif fc->arq->queued = 0; fc->ars->queued = 0; fc->atq->queued = 0; fc->ats->queued = 0; fc->arq->buf = NULL; fc->ars->buf = NULL; fc->atq->buf = NULL; fc->ats->buf = NULL; fc->arq->flag = 0; fc->ars->flag = 0; fc->atq->flag = 0; fc->ats->flag = 0; STAILQ_INIT(&fc->atq->q); STAILQ_INIT(&fc->ats->q); for (i = 0; i < fc->nisodma; i++) { fc->it[i]->queued = 0; fc->ir[i]->queued = 0; fc->it[i]->start = NULL; fc->ir[i]->start = NULL; fc->it[i]->buf = NULL; fc->ir[i]->buf = NULL; fc->it[i]->flag = FWXFERQ_STREAM; fc->ir[i]->flag = FWXFERQ_STREAM; STAILQ_INIT(&fc->it[i]->q); STAILQ_INIT(&fc->ir[i]->q); } fc->arq->maxq = FWMAXQUEUE; fc->ars->maxq = FWMAXQUEUE; fc->atq->maxq = FWMAXQUEUE; fc->ats->maxq = FWMAXQUEUE; for (i = 0; i < fc->nisodma; i++) { fc->ir[i]->maxq = FWMAXQUEUE; fc->it[i]->maxq = FWMAXQUEUE; } CSRARC(fc, TOPO_MAP) = 0x3f1 << 16; CSRARC(fc, TOPO_MAP + 4) = 1; CSRARC(fc, SPED_MAP) = 0x3f1 << 16; CSRARC(fc, SPED_MAP + 4) = 1; STAILQ_INIT(&fc->devices); /* Initialize Async handlers */ STAILQ_INIT(&fc->binds); for (i = 0; i < 0x40; i++) { STAILQ_INIT(&fc->tlabels[i]); } /* DV depend CSRs see blue book */ #if 0 CSRARC(fc, oMPR) = 0x3fff0001; /* # output channel = 1 */ CSRARC(fc, oPCR) = 0x8000007a; for (i = 4; i < 0x7c/4; i += 4) { CSRARC(fc, i + oPCR) = 0x8000007a; } CSRARC(fc, iMPR) = 0x00ff0001; /* # input channel = 1 */ CSRARC(fc, iPCR) = 0x803f0000; for (i = 4; i < 0x7c/4; i += 4) { CSRARC(fc, i + iPCR) = 0x0; } #endif fc->crom_src_buf = NULL; #ifdef FW_VMACCESS xfer = fw_xfer_alloc(); if (xfer == NULL) return; fwb = malloc(sizeof(struct fw_bind), M_FW, M_NOWAIT); if (fwb == NULL) { fw_xfer_free(xfer); return; } xfer->hand = fw_vmaccess; xfer->fc = fc; xfer->sc = NULL; fwb->start_hi = 0x2; fwb->start_lo = 0; fwb->addrlen = 0xffffffff; fwb->xfer = xfer; fw_bindadd(fc, fwb); #endif } #define BIND_CMP(addr, fwb) (((addr) < (fwb)->start)? -1 : \ ((fwb)->end < (addr)) ? 1 : 0) /* * To lookup bound process from IEEE1394 address. */ struct fw_bind * fw_bindlookup(struct firewire_comm *fc, uint16_t dest_hi, uint32_t dest_lo) { u_int64_t addr; struct fw_bind *tfw, *r = NULL; addr = ((u_int64_t)dest_hi << 32) | dest_lo; FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (BIND_CMP(addr, tfw) == 0) { r = tfw; break; } FW_GUNLOCK(fc); return (r); } /* * To bind IEEE1394 address block to process. */ int fw_bindadd(struct firewire_comm *fc, struct fw_bind *fwb) { struct fw_bind *tfw, *prev = NULL; int r = 0; if (fwb->start > fwb->end) { printf("%s: invalid range\n", __func__); return EINVAL; } FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) { if (fwb->end < tfw->start) break; prev = tfw; } if (prev == NULL) STAILQ_INSERT_HEAD(&fc->binds, fwb, fclist); else if (prev->end < fwb->start) STAILQ_INSERT_AFTER(&fc->binds, prev, fwb, fclist); else { printf("%s: bind failed\n", __func__); r = EBUSY; } FW_GUNLOCK(fc); return (r); } /* * To free IEEE1394 address block. */ int fw_bindremove(struct firewire_comm *fc, struct fw_bind *fwb) { #if 0 struct fw_xfer *xfer, *next; #endif struct fw_bind *tfw; int s; s = splfw(); FW_GLOCK(fc); STAILQ_FOREACH(tfw, &fc->binds, fclist) if (tfw == fwb) { STAILQ_REMOVE(&fc->binds, fwb, fw_bind, fclist); goto found; } printf("%s: no such binding\n", __func__); FW_GUNLOCK(fc); splx(s); return (1); found: #if 0 /* shall we do this? */ for (xfer = STAILQ_FIRST(&fwb->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwb->xferlist); #endif FW_GUNLOCK(fc); splx(s); return 0; } int fw_xferlist_add(struct fw_xferlist *q, struct malloc_type *type, int slen, int rlen, int n, struct firewire_comm *fc, void *sc, void (*hand)(struct fw_xfer *)) { int i, s; struct fw_xfer *xfer; for (i = 0; i < n; i++) { xfer = fw_xfer_alloc_buf(type, slen, rlen); if (xfer == NULL) return (i); xfer->fc = fc; xfer->sc = sc; xfer->hand = hand; s = splfw(); STAILQ_INSERT_TAIL(q, xfer, link); splx(s); } return (n); } void fw_xferlist_remove(struct fw_xferlist *q) { struct fw_xfer *xfer, *next; for (xfer = STAILQ_FIRST(q); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free_buf(xfer); } STAILQ_INIT(q); } /* * dump packet header */ static void fw_dump_hdr(struct fw_pkt *fp, char *prefix) { printf("%s: dst=0x%02x tl=0x%02x rt=%d tcode=0x%x pri=0x%x " "src=0x%03x\n", prefix, fp->mode.hdr.dst & 0x3f, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.hdr.tcode, fp->mode.hdr.pri, fp->mode.hdr.src); } /* * To free transaction label. */ static void fw_tl_free(struct firewire_comm *fc, struct fw_xfer *xfer) { struct fw_xfer *txfer; mtx_lock(&fc->tlabel_lock); if (xfer->tl < 0) { mtx_unlock(&fc->tlabel_lock); return; } /* make sure the label is allocated */ STAILQ_FOREACH(txfer, &fc->tlabels[xfer->tl], tlabel) if (txfer == xfer) break; if (txfer == NULL) { printf("%s: the xfer is not in the queue " "(tlabel=%d, flag=0x%x)\n", __FUNCTION__, xfer->tl, xfer->flag); fw_dump_hdr(&xfer->send.hdr, "send"); fw_dump_hdr(&xfer->recv.hdr, "recv"); kdb_backtrace(); mtx_unlock(&fc->tlabel_lock); return; } STAILQ_REMOVE(&fc->tlabels[xfer->tl], xfer, fw_xfer, tlabel); xfer->tl = -1; mtx_unlock(&fc->tlabel_lock); return; } /* * To obtain XFER structure by transaction label. */ static struct fw_xfer * fw_tl2xfer(struct firewire_comm *fc, int node, int tlabel, int tcode) { struct fw_xfer *xfer; int s = splfw(); int req; mtx_lock(&fc->tlabel_lock); STAILQ_FOREACH(xfer, &fc->tlabels[tlabel], tlabel) if (xfer->send.hdr.mode.hdr.dst == node) { mtx_unlock(&fc->tlabel_lock); splx(s); KASSERT(xfer->tl == tlabel, ("xfer->tl 0x%x != 0x%x", xfer->tl, tlabel)); /* extra sanity check */ req = xfer->send.hdr.mode.hdr.tcode; if (xfer->fc->tcode[req].valid_res != tcode) { printf("%s: invalid response tcode " "(0x%x for 0x%x)\n", __FUNCTION__, tcode, req); return (NULL); } if (firewire_debug > 2) printf("fw_tl2xfer: found tl=%d\n", tlabel); return (xfer); } mtx_unlock(&fc->tlabel_lock); if (firewire_debug > 1) printf("fw_tl2xfer: not found tl=%d\n", tlabel); splx(s); return (NULL); } /* * To allocate IEEE1394 XFER structure. */ struct fw_xfer * fw_xfer_alloc(struct malloc_type *type) { struct fw_xfer *xfer; xfer = malloc(sizeof(struct fw_xfer), type, M_NOWAIT | M_ZERO); if (xfer == NULL) return xfer; xfer->malloc = type; xfer->tl = -1; return xfer; } struct fw_xfer * fw_xfer_alloc_buf(struct malloc_type *type, int send_len, int recv_len) { struct fw_xfer *xfer; xfer = fw_xfer_alloc(type); if (xfer == NULL) return (NULL); xfer->send.pay_len = send_len; xfer->recv.pay_len = recv_len; if (send_len > 0) { xfer->send.payload = malloc(send_len, type, M_NOWAIT | M_ZERO); if (xfer->send.payload == NULL) { fw_xfer_free(xfer); return (NULL); } } if (recv_len > 0) { xfer->recv.payload = malloc(recv_len, type, M_NOWAIT); if (xfer->recv.payload == NULL) { if (xfer->send.payload != NULL) free(xfer->send.payload, type); fw_xfer_free(xfer); return (NULL); } } return (xfer); } /* * IEEE1394 XFER post process. */ void fw_xfer_done(struct fw_xfer *xfer) { if (xfer->hand == NULL) { printf("hand == NULL\n"); return; } if (xfer->fc == NULL) panic("fw_xfer_done: why xfer->fc is NULL?"); fw_tl_free(xfer->fc, xfer); xfer->hand(xfer); } void fw_xfer_unload(struct fw_xfer *xfer) { if (xfer == NULL) return; if (xfer->fc != NULL) { FW_GLOCK(xfer->fc); if (xfer->flag & FWXF_INQ) { STAILQ_REMOVE(&xfer->q->q, xfer, fw_xfer, link); xfer->flag &= ~FWXF_INQ; #if 0 xfer->q->queued--; #endif } FW_GUNLOCK(xfer->fc); /* * Ensure that any tlabel owner can't access this * xfer after it's freed. */ fw_tl_free(xfer->fc, xfer); #if 1 if (xfer->flag & FWXF_START) /* * This could happen if: * 1. We call fwohci_arcv() before fwohci_txd(). * 2. firewire_watch() is called. */ printf("fw_xfer_free FWXF_START\n"); #endif } xfer->flag = FWXF_INIT; xfer->resp = 0; } /* * To free IEEE1394 XFER structure. */ void fw_xfer_free_buf(struct fw_xfer *xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); if (xfer->send.payload != NULL) free(xfer->send.payload, xfer->malloc); if (xfer->recv.payload != NULL) free(xfer->recv.payload, xfer->malloc); free(xfer, xfer->malloc); } void fw_xfer_free(struct fw_xfer *xfer) { if (xfer == NULL) { printf("%s: xfer == NULL\n", __func__); return; } fw_xfer_unload(xfer); free(xfer, xfer->malloc); } void fw_asy_callback_free(struct fw_xfer *xfer) { #if 0 printf("asyreq done flag=0x%02x resp=%d\n", xfer->flag, xfer->resp); #endif fw_xfer_free(xfer); } /* * To configure PHY. */ static void fw_phy_config(struct firewire_comm *fc, int root_node, int gap_count) { struct fw_xfer *xfer; struct fw_pkt *fp; fc->status = FWBUSPHYCONF; xfer = fw_xfer_alloc(M_FWXFER); if (xfer == NULL) return; xfer->fc = fc; xfer->hand = fw_asy_callback_free; fp = &xfer->send.hdr; fp->mode.ld[1] = 0; if (root_node >= 0) fp->mode.ld[1] |= (1 << 23) | (root_node & 0x3f) << 24; if (gap_count >= 0) fp->mode.ld[1] |= (1 << 22) | (gap_count & 0x3f) << 16; fp->mode.ld[2] = ~fp->mode.ld[1]; /* XXX Dangerous, how to pass PHY packet to device driver */ fp->mode.common.tcode |= FWTCODE_PHY; if (firewire_debug) device_printf(fc->bdev, "%s: root_node=%d gap_count=%d\n", __func__, root_node, gap_count); fw_asyreq(fc, -1, xfer); } /* * Dump self ID. */ static void fw_print_sid(uint32_t sid) { union fw_self_id *s; s = (union fw_self_id *) &sid; if (s->p0.sequel) { if (s->p1.sequence_num == FW_SELF_ID_PAGE0) { printf("node:%d p3:%d p4:%d p5:%d p6:%d p7:%d" "p8:%d p9:%d p10:%d\n", s->p1.phy_id, s->p1.port3, s->p1.port4, s->p1.port5, s->p1.port6, s->p1.port7, s->p1.port8, s->p1.port9, s->p1.port10); } else if (s->p2.sequence_num == FW_SELF_ID_PAGE1) { printf("node:%d p11:%d p12:%d p13:%d p14:%d p15:%d\n", s->p2.phy_id, s->p2.port11, s->p2.port12, s->p2.port13, s->p2.port14, s->p2.port15); } else { printf("node:%d Unknown Self ID Page number %d\n", s->p1.phy_id, s->p1.sequence_num); } } else { printf("node:%d link:%d gap:%d spd:%d con:%d pwr:%d" " p0:%d p1:%d p2:%d i:%d m:%d\n", s->p0.phy_id, s->p0.link_active, s->p0.gap_count, s->p0.phy_speed, s->p0.contender, s->p0.power_class, s->p0.port0, s->p0.port1, s->p0.port2, s->p0.initiated_reset, s->p0.more_packets); } } /* * To receive self ID. */ void fw_sidrcv(struct firewire_comm *fc, uint32_t *sid, u_int len) { uint32_t *p; union fw_self_id *self_id; u_int i, j, node, c_port = 0, i_branch = 0; fc->sid_cnt = len / (sizeof(uint32_t) * 2); fc->max_node = fc->nodeid & 0x3f; CSRARC(fc, NODE_IDS) = ((uint32_t)fc->nodeid) << 16; fc->status = FWBUSCYMELECT; fc->topology_map->crc_len = 2; fc->topology_map->generation++; fc->topology_map->self_id_count = 0; fc->topology_map->node_count= 0; fc->speed_map->generation++; fc->speed_map->crc_len = 1 + (64 * 64 + 3) / 4; self_id = &fc->topology_map->self_id[0]; for (i = 0; i < fc->sid_cnt; i++) { if (sid[1] != ~sid[0]) { device_printf(fc->bdev, "%s: ERROR invalid self-id packet\n", __func__); sid += 2; continue; } *self_id = *((union fw_self_id *)sid); fc->topology_map->crc_len++; if (self_id->p0.sequel == 0) { fc->topology_map->node_count++; c_port = 0; if (firewire_debug) fw_print_sid(sid[0]); node = self_id->p0.phy_id; if (fc->max_node < node) fc->max_node = self_id->p0.phy_id; /* XXX I'm not sure this is the right speed_map */ fc->speed_map->speed[node][node] = self_id->p0.phy_speed; for (j = 0; j < node; j++) { fc->speed_map->speed[j][node] = fc->speed_map->speed[node][j] = min(fc->speed_map->speed[j][j], self_id->p0.phy_speed); } if ((fc->irm == -1 || self_id->p0.phy_id > fc->irm) && (self_id->p0.link_active && self_id->p0.contender)) fc->irm = self_id->p0.phy_id; if (self_id->p0.port0 >= 0x2) c_port++; if (self_id->p0.port1 >= 0x2) c_port++; if (self_id->p0.port2 >= 0x2) c_port++; } if (c_port > 2) i_branch += (c_port - 2); sid += 2; self_id++; fc->topology_map->self_id_count++; } /* CRC */ fc->topology_map->crc = fw_crc16( (uint32_t *)&fc->topology_map->generation, fc->topology_map->crc_len * 4); fc->speed_map->crc = fw_crc16( (uint32_t *)&fc->speed_map->generation, fc->speed_map->crc_len * 4); /* byteswap and copy to CSR */ p = (uint32_t *)fc->topology_map; for (i = 0; i <= fc->topology_map->crc_len; i++) CSRARC(fc, TOPO_MAP + i * 4) = htonl(*p++); p = (uint32_t *)fc->speed_map; CSRARC(fc, SPED_MAP) = htonl(*p++); CSRARC(fc, SPED_MAP + 4) = htonl(*p++); /* don't byte-swap uint8_t array */ bcopy(p, &CSRARC(fc, SPED_MAP + 8), (fc->speed_map->crc_len - 1) * 4); fc->max_hop = fc->max_node - i_branch; device_printf(fc->bdev, "%d nodes, maxhop <= %d %s irm(%d) %s\n", fc->max_node + 1, fc->max_hop, (fc->irm == -1) ? "Not IRM capable" : "cable IRM", fc->irm, (fc->irm == fc->nodeid) ? " (me) " : ""); if (try_bmr && (fc->irm != -1) && (CSRARC(fc, BUS_MGR_ID) == 0x3f)) { if (fc->irm == fc->nodeid) { fc->status = FWBUSMGRDONE; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, fc->irm); fw_bmr(fc); } else { fc->status = FWBUSMGRELECT; callout_reset(&fc->bmr_callout, hz / 8, fw_try_bmr, fc); } } else fc->status = FWBUSMGRDONE; callout_reset(&fc->busprobe_callout, hz / 4, fw_bus_probe, fc); } /* * To probe devices on the IEEE1394 bus. */ static void fw_bus_probe(void *arg) { struct firewire_comm *fc; struct fw_device *fwdev; int s; s = splfw(); fc = arg; fc->status = FWBUSEXPLORE; /* Invalidate all devices, just after bus reset. */ if (firewire_debug) device_printf(fc->bdev, "%s:" "iterate and invalidate all nodes\n", __func__); STAILQ_FOREACH(fwdev, &fc->devices, link) if (fwdev->status != FWDEVINVAL) { fwdev->status = FWDEVINVAL; fwdev->rcnt = 0; if (firewire_debug) device_printf(fc->bdev, "%s:" "Invalidate Dev ID: %08x%08x\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } else { if (firewire_debug) device_printf(fc->bdev, "%s:" "Dev ID: %08x%08x already invalid\n", __func__, fwdev->eui.hi, fwdev->eui.lo); } splx(s); wakeup(fc); } static int fw_explore_read_quads(struct fw_device *fwdev, int offset, uint32_t *quad, int length) { struct fw_xfer *xfer; uint32_t tmp; int i, error; for (i = 0; i < length; i++, offset += sizeof(uint32_t)) { xfer = fwmem_read_quad(fwdev, NULL, -1, 0xffff, 0xf0000000 | offset, &tmp, fw_xferwake); if (xfer == NULL) return (-1); fw_xferwait(xfer); if (xfer->resp == 0) quad[i] = ntohl(tmp); error = xfer->resp; fw_xfer_free(xfer); if (error) return (error); } return (0); } static int fw_explore_csrblock(struct fw_device *fwdev, int offset, int recur) { int err, i, off; struct csrdirectory *dir; struct csrreg *reg; dir = (struct csrdirectory *)&fwdev->csrrom[offset / sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)dir, 1); if (err) return (-1); offset += sizeof(uint32_t); reg = (struct csrreg *)&fwdev->csrrom[offset / sizeof(uint32_t)]; err = fw_explore_read_quads(fwdev, CSRROMOFF + offset, (uint32_t *)reg, dir->crc_len); if (err) return (-1); /* XXX check CRC */ off = CSRROMOFF + offset + sizeof(uint32_t) * (dir->crc_len - 1); if (fwdev->rommax < off) fwdev->rommax = off; if (recur == 0) return (0); for (i = 0; i < dir->crc_len; i++, offset += sizeof(uint32_t)) { if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_D) recur = 1; else if ((reg[i].key & CSRTYPE_MASK) == CSRTYPE_L) recur = 0; else continue; off = offset + reg[i].val * sizeof(uint32_t); if (off > CROMSIZE) { printf("%s: invalid offset %d\n", __FUNCTION__, off); return (-1); } err = fw_explore_csrblock(fwdev, off, recur); if (err) return (-1); } return (0); } static int fw_explore_node(struct fw_device *dfwdev) { struct firewire_comm *fc; struct fw_device *fwdev, *pfwdev, *tfwdev; uint32_t *csr; struct csrhdr *hdr; struct bus_info *binfo; int err, node; uint32_t speed_test = 0; fc = dfwdev->fc; csr = dfwdev->csrrom; node = dfwdev->dst; /* First quad */ err = fw_explore_read_quads(dfwdev, CSRROMOFF, &csr[0], 1); if (err) { dfwdev->status = FWDEVINVAL; return (-1); } hdr = (struct csrhdr *)&csr[0]; if (hdr->info_len != 4) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: wrong bus info len(%d)\n", __func__, node, hdr->info_len); dfwdev->status = FWDEVINVAL; return (-1); } /* bus info */ err = fw_explore_read_quads(dfwdev, CSRROMOFF + 0x04, &csr[1], 4); if (err) { dfwdev->status = FWDEVINVAL; return (-1); } binfo = (struct bus_info *)&csr[1]; if (binfo->bus_name != CSR_BUS_NAME_IEEE1394) { dfwdev->status = FWDEVINVAL; return (-1); } if (firewire_debug) device_printf(fc->bdev, "%s: node(%d) BUS INFO BLOCK:\n" "irmc(%d) cmc(%d) isc(%d) bmc(%d) pmc(%d) " "cyc_clk_acc(%d) max_rec(%d) max_rom(%d) " "generation(%d) link_spd(%d)\n", __func__, node, binfo->irmc, binfo->cmc, binfo->isc, binfo->bmc, binfo->pmc, binfo->cyc_clk_acc, binfo->max_rec, binfo->max_rom, binfo->generation, binfo->link_spd); STAILQ_FOREACH(fwdev, &fc->devices, link) if (FW_EUI64_EQUAL(fwdev->eui, binfo->eui64)) break; if (fwdev == NULL) { /* new device */ fwdev = malloc(sizeof(struct fw_device), M_FW, M_NOWAIT | M_ZERO); if (fwdev == NULL) { device_printf(fc->bdev, "%s: node%d: no memory\n", __func__, node); return (-1); } fwdev->fc = fc; fwdev->eui = binfo->eui64; fwdev->dst = dfwdev->dst; fwdev->maxrec = dfwdev->maxrec; fwdev->status = dfwdev->status; /* * Pre-1394a-2000 didn't have link_spd in * the Bus Info block, so try and use the * speed map value. * 1394a-2000 compliant devices only use * the Bus Info Block link spd value, so * ignore the speed map altogether. SWB */ if (binfo->link_spd == FWSPD_S100 /* 0 */) { device_printf(fc->bdev, "%s: " "Pre 1394a-2000 detected\n", __func__); fwdev->speed = fc->speed_map->speed[fc->nodeid][node]; } else fwdev->speed = binfo->link_spd; /* * Test this speed with a read to the CSRROM. * If it fails, slow down the speed and retry. */ while (fwdev->speed > FWSPD_S100 /* 0 */) { err = fw_explore_read_quads(fwdev, CSRROMOFF, &speed_test, 1); if (err) { device_printf(fc->bdev, "%s: fwdev->speed(%s) decremented due to negotiation\n", __func__, linkspeed[fwdev->speed]); fwdev->speed--; } else break; } /* * If the fwdev is not found in the * fc->devices TAILQ, then we will add it. */ pfwdev = NULL; STAILQ_FOREACH(tfwdev, &fc->devices, link) { if (tfwdev->eui.hi > fwdev->eui.hi || (tfwdev->eui.hi == fwdev->eui.hi && tfwdev->eui.lo > fwdev->eui.lo)) break; pfwdev = tfwdev; } if (pfwdev == NULL) STAILQ_INSERT_HEAD(&fc->devices, fwdev, link); else STAILQ_INSERT_AFTER(&fc->devices, pfwdev, fwdev, link); } else { fwdev->dst = node; fwdev->status = FWDEVINIT; /* unchanged ? */ if (bcmp(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5) == 0) { if (firewire_debug) device_printf(fc->dev, "node%d: crom unchanged\n", node); return (0); } } bzero(&fwdev->csrrom[0], CROMSIZE); /* copy first quad and bus info block */ bcopy(&csr[0], &fwdev->csrrom[0], sizeof(uint32_t) * 5); fwdev->rommax = CSRROMOFF + sizeof(uint32_t) * 4; err = fw_explore_csrblock(fwdev, 0x14, 1); /* root directory */ if (err) { if (firewire_debug) device_printf(fc->dev, "%s: explore csrblock failed err(%d)\n", __func__, err); fwdev->status = FWDEVINVAL; fwdev->csrrom[0] = 0; } return (err); } /* * Find the self_id packet for a node, ignoring sequels. */ static union fw_self_id * fw_find_self_id(struct firewire_comm *fc, int node) { uint32_t i; union fw_self_id *s; for (i = 0; i < fc->topology_map->self_id_count; i++) { s = &fc->topology_map->self_id[i]; if (s->p0.sequel) continue; if (s->p0.phy_id == node) return s; } return 0; } static void fw_explore(struct firewire_comm *fc) { int node, err, s, i, todo, todo2, trys; char nodes[63]; struct fw_device dfwdev; union fw_self_id *fwsid; todo = 0; /* setup dummy fwdev */ dfwdev.fc = fc; dfwdev.speed = 0; dfwdev.maxrec = 8; /* 512 */ dfwdev.status = FWDEVINIT; for (node = 0; node <= fc->max_node; node++) { /* We don't probe myself and linkdown nodes */ if (node == fc->nodeid) { if (firewire_debug) device_printf(fc->bdev, "%s:" "found myself node(%d) fc->nodeid(%d) fc->max_node(%d)\n", __func__, node, fc->nodeid, fc->max_node); continue; } else if (firewire_debug) { device_printf(fc->bdev, "%s:" "node(%d) fc->max_node(%d) found\n", __func__, node, fc->max_node); } fwsid = fw_find_self_id(fc, node); if (!fwsid || !fwsid->p0.link_active) { if (firewire_debug) device_printf(fc->bdev, "%s: node%d: link down\n", __func__, node); continue; } nodes[todo++] = node; } s = splfw(); for (trys = 0; todo > 0 && trys < 3; trys++) { todo2 = 0; for (i = 0; i < todo; i++) { dfwdev.dst = nodes[i]; err = fw_explore_node(&dfwdev); if (err) nodes[todo2++] = nodes[i]; if (firewire_debug) device_printf(fc->bdev, "%s: node %d, err = %d\n", __func__, node, err); } todo = todo2; } splx(s); } static void fw_bus_probe_thread(void *arg) { struct firewire_comm *fc; fc = arg; mtx_lock(&fc->wait_lock); while (fc->status != FWBUSDETACH) { if (fc->status == FWBUSEXPLORE) { mtx_unlock(&fc->wait_lock); fw_explore(fc); fc->status = FWBUSEXPDONE; if (firewire_debug) printf("bus_explore done\n"); fw_attach_dev(fc); mtx_lock(&fc->wait_lock); } msleep((void *)fc, &fc->wait_lock, PWAIT|PCATCH, "-", 0); } mtx_unlock(&fc->wait_lock); kproc_exit(0); } /* * To attach sub-devices layer onto IEEE1394 bus. */ static void fw_attach_dev(struct firewire_comm *fc) { struct fw_device *fwdev, *next; int i, err; device_t *devlistp; int devcnt; struct firewire_dev_comm *fdc; for (fwdev = STAILQ_FIRST(&fc->devices); fwdev != NULL; fwdev = next) { next = STAILQ_NEXT(fwdev, link); if (fwdev->status == FWDEVINIT) { fwdev->status = FWDEVATTACHED; } else if (fwdev->status == FWDEVINVAL) { fwdev->rcnt++; if (firewire_debug) device_printf(fc->bdev, "%s:" "fwdev->rcnt(%d), hold_count(%d)\n", __func__, fwdev->rcnt, hold_count); if (fwdev->rcnt > hold_count) { /* * Remove devices which have not been seen * for a while. */ STAILQ_REMOVE(&fc->devices, fwdev, fw_device, link); free(fwdev, M_FW); } } } err = device_get_children(fc->bdev, &devlistp, &devcnt); if (err == 0) { for (i = 0; i < devcnt; i++) { if (device_get_state(devlistp[i]) >= DS_ATTACHED) { fdc = device_get_softc(devlistp[i]); if (fdc->post_explore != NULL) fdc->post_explore(fdc); } } free(devlistp, M_TEMP); } return; } /* * To allocate unique transaction label. */ static int fw_get_tlabel(struct firewire_comm *fc, struct fw_xfer *xfer) { u_int dst, new_tlabel; struct fw_xfer *txfer; int s; dst = xfer->send.hdr.mode.hdr.dst & 0x3f; s = splfw(); mtx_lock(&fc->tlabel_lock); new_tlabel = (fc->last_tlabel[dst] + 1) & 0x3f; STAILQ_FOREACH(txfer, &fc->tlabels[new_tlabel], tlabel) if ((txfer->send.hdr.mode.hdr.dst & 0x3f) == dst) break; if (txfer == NULL) { fc->last_tlabel[dst] = new_tlabel; STAILQ_INSERT_TAIL(&fc->tlabels[new_tlabel], xfer, tlabel); mtx_unlock(&fc->tlabel_lock); splx(s); xfer->tl = new_tlabel; xfer->send.hdr.mode.hdr.tlrt = new_tlabel << 2; if (firewire_debug > 1) printf("fw_get_tlabel: dst=%d tl=%d\n", dst, new_tlabel); return (new_tlabel); } mtx_unlock(&fc->tlabel_lock); splx(s); if (firewire_debug > 1) printf("fw_get_tlabel: no free tlabel\n"); return (-1); } static void fw_rcv_copy(struct fw_rcv_buf *rb) { struct fw_pkt *pkt; u_char *p; struct tcode_info *tinfo; u_int res, i, len, plen; rb->xfer->recv.spd = rb->spd; pkt = (struct fw_pkt *)rb->vec->iov_base; tinfo = &rb->fc->tcode[pkt->mode.hdr.tcode]; /* Copy header */ p = (u_char *)&rb->xfer->recv.hdr; bcopy(rb->vec->iov_base, p, tinfo->hdr_len); rb->vec->iov_base = (u_char *)rb->vec->iov_base + tinfo->hdr_len; rb->vec->iov_len -= tinfo->hdr_len; /* Copy payload */ p = (u_char *)rb->xfer->recv.payload; res = rb->xfer->recv.pay_len; /* special handling for RRESQ */ if (pkt->mode.hdr.tcode == FWTCODE_RRESQ && p != NULL && res >= sizeof(uint32_t)) { *(uint32_t *)p = pkt->mode.rresq.data; rb->xfer->recv.pay_len = sizeof(uint32_t); return; } if ((tinfo->flag & FWTI_BLOCK_ASY) == 0) return; plen = pkt->mode.rresb.len; for (i = 0; i < rb->nvec; i++, rb->vec++) { len = MIN(rb->vec->iov_len, plen); if (res < len) { device_printf(rb->fc->bdev, "%s:" " rcv buffer(%d) is %d bytes short.\n", __func__, rb->xfer->recv.pay_len, len - res); len = res; } bcopy(rb->vec->iov_base, p, len); p += len; res -= len; plen -= len; if (res == 0 || plen == 0) break; } rb->xfer->recv.pay_len -= res; } /* * Generic packet receiving process. */ void fw_rcv(struct fw_rcv_buf *rb) { struct fw_pkt *fp, *resfp; struct fw_bind *bind; int tcode; int oldstate; #if 0 int i, len; { uint32_t *qld; int i; qld = (uint32_t *)buf; printf("spd %d len:%d\n", spd, len); for (i = 0; i <= len && i < 32; i+= 4) { printf("0x%08x ", ntohl(qld[i/4])); if ((i % 16) == 15) printf("\n"); } if ((i % 16) != 15) printf("\n"); } #endif fp = (struct fw_pkt *)rb->vec[0].iov_base; tcode = fp->mode.common.tcode; switch (tcode) { case FWTCODE_WRES: case FWTCODE_RRESQ: case FWTCODE_RRESB: case FWTCODE_LRES: rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tcode); if (rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: unknown response " "%s(%x) src=0x%x tl=0x%x rt=%d data=0x%x\n", __func__, tcode_str[tcode], tcode, fp->mode.hdr.src, fp->mode.hdr.tlrt >> 2, fp->mode.hdr.tlrt & 3, fp->mode.rresq.data); #if 0 printf("try ad-hoc work around!!\n"); rb->xfer = fw_tl2xfer(rb->fc, fp->mode.hdr.src, (fp->mode.hdr.tlrt >> 2)^3); if (rb->xfer == NULL) { printf("no use...\n"); return; } #else return; #endif } fw_rcv_copy(rb); if (rb->xfer->recv.hdr.mode.wres.rtcode != RESP_CMP) rb->xfer->resp = EIO; else rb->xfer->resp = 0; /* make sure the packet is drained in AT queue */ oldstate = rb->xfer->flag; rb->xfer->flag = FWXF_RCVD; switch (oldstate) { case FWXF_SENT: fw_xfer_done(rb->xfer); break; case FWXF_START: #if 0 if (firewire_debug) printf("not sent yet tl=%x\n", rb->xfer->tl); #endif break; default: device_printf(rb->fc->bdev, "%s: " "unexpected flag 0x%02x\n", __func__, rb->xfer->flag); } return; case FWTCODE_WREQQ: case FWTCODE_WREQB: case FWTCODE_RREQQ: case FWTCODE_RREQB: case FWTCODE_LREQ: bind = fw_bindlookup(rb->fc, fp->mode.rreqq.dest_hi, fp->mode.rreqq.dest_lo); if (bind == NULL) { device_printf(rb->fc->bdev, "%s: " "Unknown service addr 0x%04x:0x%08x %s(%x)" " src=0x%x data=%x\n", __func__, fp->mode.wreqq.dest_hi, fp->mode.wreqq.dest_lo, tcode_str[tcode], tcode, fp->mode.hdr.src, ntohl(fp->mode.wreqq.data)); if (rb->fc->status == FWBUSINIT) { device_printf(rb->fc->bdev, "%s: cannot respond(bus reset)!\n", __func__); return; } rb->xfer = fw_xfer_alloc(M_FWXFER); if (rb->xfer == NULL) { return; } rb->xfer->send.spd = rb->spd; rb->xfer->send.pay_len = 0; resfp = &rb->xfer->send.hdr; switch (tcode) { case FWTCODE_WREQQ: case FWTCODE_WREQB: resfp->mode.hdr.tcode = FWTCODE_WRES; break; case FWTCODE_RREQQ: resfp->mode.hdr.tcode = FWTCODE_RRESQ; break; case FWTCODE_RREQB: resfp->mode.hdr.tcode = FWTCODE_RRESB; break; case FWTCODE_LREQ: resfp->mode.hdr.tcode = FWTCODE_LRES; break; } resfp->mode.hdr.dst = fp->mode.hdr.src; resfp->mode.hdr.tlrt = fp->mode.hdr.tlrt; resfp->mode.hdr.pri = fp->mode.hdr.pri; resfp->mode.rresb.rtcode = RESP_ADDRESS_ERROR; resfp->mode.rresb.extcode = 0; resfp->mode.rresb.len = 0; /* rb->xfer->hand = fw_xferwake; */ rb->xfer->hand = fw_xfer_free; if (fw_asyreq(rb->fc, -1, rb->xfer)) fw_xfer_free(rb->xfer); return; } #if 0 len = 0; for (i = 0; i < rb->nvec; i++) len += rb->vec[i].iov_len; #endif rb->xfer = STAILQ_FIRST(&bind->xferlist); if (rb->xfer == NULL) { device_printf(rb->fc->bdev, "%s: " "Discard a packet for this bind.\n", __func__); return; } STAILQ_REMOVE_HEAD(&bind->xferlist, link); fw_rcv_copy(rb); rb->xfer->hand(rb->xfer); return; #if 0 /* shouldn't happen ?? or for GASP */ case FWTCODE_STREAM: { struct fw_xferq *xferq; xferq = rb->fc->ir[sub]; #if 0 printf("stream rcv dma %d len %d off %d spd %d\n", sub, len, off, spd); #endif if (xferq->queued >= xferq->maxq) { printf("receive queue is full\n"); return; } /* XXX get xfer from xfer queue, we don't need copy for per packet mode */ rb->xfer = fw_xfer_alloc_buf(M_FWXFER, 0, /* XXX */ vec[0].iov_len); if (rb->xfer == NULL) return; fw_rcv_copy(rb) s = splfw(); xferq->queued++; STAILQ_INSERT_TAIL(&xferq->q, rb->xfer, link); splx(s); sc = device_get_softc(rb->fc->bdev); if (SEL_WAITING(&xferq->rsel)) selwakeuppri(&xferq->rsel, FWPRI); if (xferq->flag & FWXFERQ_WAKEUP) { xferq->flag &= ~FWXFERQ_WAKEUP; wakeup((caddr_t)xferq); } if (xferq->flag & FWXFERQ_HANDLER) { xferq->hand(xferq); } return; break; } #endif default: device_printf(rb->fc->bdev,"%s: unknown tcode %d\n", __func__, tcode); break; } } /* * Post process for Bus Manager election process. */ static void fw_try_bmr_callback(struct fw_xfer *xfer) { struct firewire_comm *fc; int bmr; if (xfer == NULL) return; fc = xfer->fc; if (xfer->resp != 0) goto error; if (xfer->recv.payload == NULL) goto error; if (xfer->recv.hdr.mode.lres.rtcode != FWRCODE_COMPLETE) goto error; bmr = ntohl(xfer->recv.payload[0]); if (bmr == 0x3f) bmr = fc->nodeid; CSRARC(fc, BUS_MGR_ID) = fc->set_bmr(fc, bmr & 0x3f); fw_xfer_free_buf(xfer); fw_bmr(fc); return; error: device_printf(fc->bdev, "bus manager election failed\n"); fw_xfer_free_buf(xfer); } /* * To candidate Bus Manager election process. */ static void fw_try_bmr(void *arg) { struct fw_xfer *xfer; struct firewire_comm *fc = arg; struct fw_pkt *fp; int err = 0; xfer = fw_xfer_alloc_buf(M_FWXFER, 8, 4); if (xfer == NULL) return; xfer->send.spd = 0; fc->status = FWBUSMGRELECT; fp = &xfer->send.hdr; fp->mode.lreq.dest_hi = 0xffff; fp->mode.lreq.tlrt = 0; fp->mode.lreq.tcode = FWTCODE_LREQ; fp->mode.lreq.pri = 0; fp->mode.lreq.src = 0; fp->mode.lreq.len = 8; fp->mode.lreq.extcode = EXTCODE_CMP_SWAP; fp->mode.lreq.dst = FWLOCALBUS | fc->irm; fp->mode.lreq.dest_lo = 0xf0000000 | BUS_MGR_ID; xfer->send.payload[0] = htonl(0x3f); xfer->send.payload[1] = htonl(fc->nodeid); xfer->hand = fw_try_bmr_callback; err = fw_asyreq(fc, -1, xfer); if (err) { fw_xfer_free_buf(xfer); return; } return; } #ifdef FW_VMACCESS /* * Software implementation for physical memory block access. * XXX:Too slow, useful for debug purpose only. */ static void fw_vmaccess(struct fw_xfer *xfer) { struct fw_pkt *rfp, *sfp = NULL; uint32_t *ld = (uint32_t *)xfer->recv.buf; printf("vmaccess spd:%2x len:%03x data:%08x %08x %08x %08x\n", xfer->spd, xfer->recv.len, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); printf("vmaccess data:%08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); if (xfer->resp != 0) { fw_xfer_free(xfer); return; } if (xfer->recv.buf == NULL) { fw_xfer_free(xfer); return; } rfp = (struct fw_pkt *)xfer->recv.buf; switch (rfp->mode.hdr.tcode) { /* XXX need fix for 64bit arch */ case FWTCODE_WREQB: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp = (struct fw_pkt *)xfer->send.buf; bcopy(rfp->mode.wreqb.payload, (caddr_t)ntohl(rfp->mode.wreqb.dest_lo),s ntohs(rfp->mode.wreqb.len)); sfp->mode.wres.tcode = FWTCODE_WRES; sfp->mode.wres.rtcode = 0; break; case FWTCODE_WREQQ: xfer->send.buf = malloc(12, M_FW, M_NOWAIT); xfer->send.len = 12; sfp->mode.wres.tcode = FWTCODE_WRES; *((uint32_t *)(ntohl(rfp->mode.wreqb.dest_lo))) = rfp->mode.wreqq.data; sfp->mode.wres.rtcode = 0; break; case FWTCODE_RREQB: xfer->send.buf = malloc(16 + rfp->mode.rreqb.len, M_FW, M_NOWAIT); xfer->send.len = 16 + ntohs(rfp->mode.rreqb.len); sfp = (struct fw_pkt *)xfer->send.buf; bcopy((caddr_t)ntohl(rfp->mode.rreqb.dest_lo), sfp->mode.rresb.payload, ntohs(rfp->mode.rreqb.len)); sfp->mode.rresb.tcode = FWTCODE_RRESB; sfp->mode.rresb.len = rfp->mode.rreqb.len; sfp->mode.rresb.rtcode = 0; sfp->mode.rresb.extcode = 0; break; case FWTCODE_RREQQ: xfer->send.buf = malloc(16, M_FW, M_NOWAIT); xfer->send.len = 16; sfp = (struct fw_pkt *)xfer->send.buf; sfp->mode.rresq.data = *(uint32_t *)(ntohl(rfp->mode.rreqq.dest_lo)); sfp->mode.wres.tcode = FWTCODE_RRESQ; sfp->mode.rresb.rtcode = 0; break; default: fw_xfer_free(xfer); return; } sfp->mode.hdr.dst = rfp->mode.hdr.src; xfer->dst = ntohs(rfp->mode.hdr.src); xfer->hand = fw_xfer_free; sfp->mode.hdr.tlrt = rfp->mode.hdr.tlrt; sfp->mode.hdr.pri = 0; fw_asyreq(xfer->fc, -1, xfer); /**/ return; } #endif /* * CRC16 check-sum for IEEE1394 register blocks. */ uint16_t fw_crc16(uint32_t *ptr, uint32_t len) { uint32_t i, sum, crc = 0; int shift; len = (len + 3) & ~3; for (i = 0; i < len; i += 4) { for (shift = 28; shift >= 0; shift -= 4) { sum = ((crc >> 12) ^ (ptr[i/4] >> shift)) & 0xf; crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum; } crc &= 0xffff; } return ((uint16_t) crc); } /* * Find the root node, if it is not * Cycle Master Capable, then we should * override this and become the Cycle * Master */ static int fw_bmr(struct firewire_comm *fc) { struct fw_device fwdev; union fw_self_id *self_id; int cmstr; uint32_t quad; /* Check to see if the current root node is cycle master capable */ self_id = fw_find_self_id(fc, fc->max_node); if (fc->max_node > 0) { /* XXX check cmc bit of businfo block rather than contender */ if (self_id->p0.link_active && self_id->p0.contender) cmstr = fc->max_node; else { device_printf(fc->bdev, "root node is not cycle master capable\n"); /* XXX shall we be the cycle master? */ cmstr = fc->nodeid; /* XXX need bus reset */ } } else cmstr = -1; device_printf(fc->bdev, "bus manager %d %s\n", CSRARC(fc, BUS_MGR_ID), (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) ? "(me)" : ""); if (CSRARC(fc, BUS_MGR_ID) != fc->nodeid) { /* We are not the bus manager */ return (0); } /* Optimize gapcount */ if (fc->max_hop <= MAX_GAPHOP) fw_phy_config(fc, cmstr, gap_cnt[fc->max_hop]); /* If we are the cycle master, nothing to do */ if (cmstr == fc->nodeid || cmstr == -1) return 0; /* Bus probe has not finished, make dummy fwdev for cmstr */ bzero(&fwdev, sizeof(fwdev)); fwdev.fc = fc; fwdev.dst = cmstr; fwdev.speed = 0; fwdev.maxrec = 8; /* 512 */ fwdev.status = FWDEVINIT; /* Set cmstr bit on the cycle master */ quad = htonl(1 << 8); fwmem_write_quad(&fwdev, NULL, 0/*spd*/, 0xffff, 0xf0000000 | STATE_SET, &quad, fw_asy_callback_free); return 0; } int fw_open_isodma(struct firewire_comm *fc, int tx) { struct fw_xferq **xferqa; struct fw_xferq *xferq; int i; if (tx) xferqa = &fc->it[0]; else xferqa = &fc->ir[0]; FW_GLOCK(fc); for (i = 0; i < fc->nisodma; i++) { xferq = xferqa[i]; if ((xferq->flag & FWXFERQ_OPEN) == 0) { xferq->flag |= FWXFERQ_OPEN; break; } } if (i == fc->nisodma) { printf("no free dma channel (tx=%d)\n", tx); i = -1; } FW_GUNLOCK(fc); return (i); } static int fw_modevent(module_t mode, int type, void *data) { int err = 0; static eventhandler_tag fwdev_ehtag = NULL; switch (type) { case MOD_LOAD: firewire_devclass = devclass_create("firewire"); fwdev_ehtag = EVENTHANDLER_REGISTER(dev_clone, fwdev_clone, 0, 1000); break; case MOD_UNLOAD: if (fwdev_ehtag != NULL) EVENTHANDLER_DEREGISTER(dev_clone, fwdev_ehtag); break; case MOD_SHUTDOWN: break; default: return (EOPNOTSUPP); } return (err); } DRIVER_MODULE(firewire, fwohci, firewire_driver, fw_modevent, NULL); MODULE_VERSION(firewire, 1); diff --git a/sys/dev/firewire/fwohci_pci.c b/sys/dev/firewire/fwohci_pci.c index f16ce1e26565..603915a1e637 100644 --- a/sys/dev/firewire/fwohci_pci.c +++ b/sys/dev/firewire/fwohci_pci.c @@ -1,470 +1,470 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 Hidetoshi Shimokawa * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the acknowledgement as bellow: * * This product includes software developed by K. Kobayashi and H. SHimokawa * * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include #define BOUNCE_BUFFER_TEST 0 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int fwohci_pci_attach(device_t self); static int fwohci_pci_detach(device_t self); /* * The probe routine. */ static int fwohci_pci_probe(device_t dev) { uint32_t id; id = pci_get_devid(dev); if (id == (FW_VENDORID_NATSEMI | FW_DEVICE_CS4210)) { device_set_desc(dev, "National Semiconductor CS4210"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD861)) { device_set_desc(dev, "NEC uPD72861"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD871)) { device_set_desc(dev, "NEC uPD72871/2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72870)) { device_set_desc(dev, "NEC uPD72870"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72873)) { device_set_desc(dev, "NEC uPD72873"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_NEC | FW_DEVICE_UPD72874)) { device_set_desc(dev, "NEC uPD72874"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_SIS | FW_DEVICE_7007)) { /* It has no real identifier, using device id. */ device_set_desc(dev, "SiS 7007"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB22)) { device_set_desc(dev, "Texas Instruments TSB12LV22"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB23)) { device_set_desc(dev, "Texas Instruments TSB12LV23"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB26)) { device_set_desc(dev, "Texas Instruments TSB12LV26"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43)) { device_set_desc(dev, "Texas Instruments TSB43AA22"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43A)) { device_set_desc(dev, "Texas Instruments TSB43AB22/A"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43AB21)) { device_set_desc(dev, "Texas Instruments TSB43AB21/A/AI/A-EP"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB43AB23)) { device_set_desc(dev, "Texas Instruments TSB43AB23"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TITSB82AA2)) { device_set_desc(dev, "Texas Instruments TSB82AA2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4450)) { device_set_desc(dev, "Texas Instruments PCI4450"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4410A)) { device_set_desc(dev, "Texas Instruments PCI4410A"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_TI | FW_DEVICE_TIPCI4451)) { device_set_desc(dev, "Texas Instruments PCI4451"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_SONY | FW_DEVICE_CXD1947)) { device_printf(dev, "Sony i.LINK (CXD1947) not supported\n"); return ENXIO; } if (id == (FW_VENDORID_SONY | FW_DEVICE_CXD3222)) { device_set_desc(dev, "Sony i.LINK (CXD3222)"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_VIA | FW_DEVICE_VT6306)) { device_set_desc(dev, "VIA Fire II (VT6306)"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_RICOH | FW_DEVICE_R5C551)) { device_set_desc(dev, "Ricoh R5C551"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_RICOH | FW_DEVICE_R5C552)) { device_set_desc(dev, "Ricoh R5C552"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_APPLE | FW_DEVICE_PANGEA)) { device_set_desc(dev, "Apple Pangea"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_APPLE | FW_DEVICE_UNINORTH2)) { device_set_desc(dev, "Apple UniNorth 2"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_LUCENT | FW_DEVICE_FW322)) { device_set_desc(dev, "Lucent FW322/323"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_INTEL | FW_DEVICE_82372FB)) { device_set_desc(dev, "Intel 82372FB"); return BUS_PROBE_DEFAULT; } if (id == (FW_VENDORID_ADAPTEC | FW_DEVICE_AIC5800)) { device_set_desc(dev, "Adaptec AHA-894x/AIC-5800"); return BUS_PROBE_DEFAULT; } if (pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_FW && pci_get_progif(dev) == PCI_INTERFACE_OHCI) { if (bootverbose) device_printf(dev, "vendor=%x, dev=%x\n", pci_get_vendor(dev), pci_get_device(dev)); device_set_desc(dev, "1394 Open Host Controller Interface"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int fwohci_pci_init(device_t self) { int olatency, latency, ocache_line, cache_line; uint16_t cmd; cmd = pci_read_config(self, PCIR_COMMAND, 2); cmd |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; #if 1 /* for broken hardware */ cmd &= ~PCIM_CMD_MWRICEN; #endif pci_write_config(self, PCIR_COMMAND, cmd, 2); latency = olatency = pci_read_config(self, PCIR_LATTIMER, 1); #define DEF_LATENCY 0x20 if (olatency < DEF_LATENCY) { latency = DEF_LATENCY; pci_write_config(self, PCIR_LATTIMER, latency, 1); } cache_line = ocache_line = pci_read_config(self, PCIR_CACHELNSZ, 1); #define DEF_CACHE_LINE 8 if (ocache_line < DEF_CACHE_LINE) { cache_line = DEF_CACHE_LINE; pci_write_config(self, PCIR_CACHELNSZ, cache_line, 1); } if (firewire_debug) { device_printf(self, "latency timer %d -> %d.\n", olatency, latency); device_printf(self, "cache size %d -> %d.\n", ocache_line, cache_line); } return 0; } static int fwohci_pci_attach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int err; int rid; #if 0 if (bootverbose) firewire_debug = bootverbose; #endif mtx_init(FW_GMTX(&sc->fc), "firewire", NULL, MTX_DEF); fwohci_pci_init(self); rid = PCI_CBMEM; sc->bsr = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->bsr) { device_printf(self, "Could not map memory\n"); return ENXIO; } sc->bst = rman_get_bustag(sc->bsr); sc->bsh = rman_get_bushandle(sc->bsr); rid = 0; sc->irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(self, "Could not allocate irq\n"); fwohci_pci_detach(self); return ENXIO; } err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, (driver_intr_t *) fwohci_intr, sc, &sc->ih); if (err) { device_printf(self, "Could not setup irq, %d\n", err); fwohci_pci_detach(self); return ENXIO; } err = bus_dma_tag_create( /*parent*/bus_get_dma_tag(self), /*alignment*/1, /*boundary*/0, #if BOUNCE_BUFFER_TEST /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, #else /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, #endif /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/0x20, /*maxsegsz*/0x8000, /*flags*/BUS_DMA_ALLOCNOW, /*lockfunc*/busdma_lock_mutex, /*lockarg*/FW_GMTX(&sc->fc), &sc->fc.dmat); if (err != 0) { device_printf(self, "fwohci_pci_attach: Could not allocate DMA " "tag - error %d\n", err); fwohci_pci_detach(self); return (ENOMEM); } err = fwohci_init(sc, self); if (err != 0) { device_printf(self, "fwohci_init failed with err=%d\n", err); fwohci_pci_detach(self); return EIO; } /* probe and attach a child device(firewire) */ - bus_generic_probe(self); + bus_identify_children(self); bus_generic_attach(self); return 0; } static int fwohci_pci_detach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int s; s = splfw(); if (sc->bsr) fwohci_stop(sc, self); bus_generic_detach(self); if (sc->fc.bdev) { device_delete_child(self, sc->fc.bdev); sc->fc.bdev = NULL; } /* disable interrupts that might have been switched on */ if (sc->bst && sc->bsh) bus_space_write_4(sc->bst, sc->bsh, FWOHCI_INTMASKCLR, OHCI_INT_EN); if (sc->irq_res) { int err; if (sc->ih) { err = bus_teardown_intr(self, sc->irq_res, sc->ih); if (err) device_printf(self, "Could not tear down irq, %d\n", err); sc->ih = NULL; } bus_release_resource(self, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } if (sc->bsr) { bus_release_resource(self, SYS_RES_MEMORY, PCI_CBMEM, sc->bsr); sc->bsr = NULL; sc->bst = 0; sc->bsh = 0; } fwohci_detach(sc, self); mtx_destroy(FW_GMTX(&sc->fc)); splx(s); return 0; } static int fwohci_pci_suspend(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); int err; device_printf(dev, "fwohci_pci_suspend\n"); err = bus_generic_suspend(dev); if (err) return err; fwohci_stop(sc, dev); return 0; } static int fwohci_pci_resume(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); fwohci_pci_init(dev); fwohci_resume(sc, dev); return 0; } static int fwohci_pci_shutdown(device_t dev) { fwohci_softc_t *sc = device_get_softc(dev); bus_generic_shutdown(dev); fwohci_stop(sc, dev); return 0; } static device_t fwohci_pci_add_child(device_t dev, u_int order, const char *name, int unit) { struct fwohci_softc *sc; device_t child; int err = 0; sc = (struct fwohci_softc *)device_get_softc(dev); child = device_add_child(dev, name, unit); if (child == NULL) return (child); sc->fc.bdev = child; device_set_ivars(child, &sc->fc); err = device_probe_and_attach(child); if (err) { device_printf(dev, "probe_and_attach failed with err=%d\n", err); fwohci_pci_detach(dev); device_delete_child(dev, child); return NULL; } /* XXX * Clear the bus reset event flag to start transactions even when * interrupt is disabled during the boot process. */ if (cold) { int s; DELAY(250); /* 2 cycles */ s = splfw(); fwohci_poll(&sc->fc, 0, -1); splx(s); } return (child); } static device_method_t fwohci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fwohci_pci_probe), DEVMETHOD(device_attach, fwohci_pci_attach), DEVMETHOD(device_detach, fwohci_pci_detach), DEVMETHOD(device_suspend, fwohci_pci_suspend), DEVMETHOD(device_resume, fwohci_pci_resume), DEVMETHOD(device_shutdown, fwohci_pci_shutdown), /* Bus interface */ DEVMETHOD(bus_add_child, fwohci_pci_add_child), DEVMETHOD_END }; static driver_t fwohci_driver = { "fwohci", fwohci_methods, sizeof(fwohci_softc_t), }; #ifdef FWOHCI_MODULE MODULE_DEPEND(fwohci, firewire, 1, 1, 1); #endif DRIVER_MODULE(fwohci, pci, fwohci_driver, 0, 0); diff --git a/sys/dev/firmware/arm/scmi.c b/sys/dev/firmware/arm/scmi.c index ef4bcbf13996..afabdcf9917b 100644 --- a/sys/dev/firmware/arm/scmi.c +++ b/sys/dev/firmware/arm/scmi.c @@ -1,651 +1,651 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Ruslan Bukin * Copyright (c) 2023 Arm Ltd * * This work was supported by Innovate UK project 105694, "Digital Security * by Design (DSbD) Technology Platform Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "scmi.h" #include "scmi_protocols.h" #define SCMI_MAX_TOKEN 1024 #define SCMI_HDR_TOKEN_S 18 #define SCMI_HDR_TOKEN_BF (0x3fff) #define SCMI_HDR_TOKEN_M (SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S) #define SCMI_HDR_PROTOCOL_ID_S 10 #define SCMI_HDR_PROTOCOL_ID_BF (0xff) #define SCMI_HDR_PROTOCOL_ID_M \ (SCMI_HDR_PROTOCOL_ID_BF << SCMI_HDR_PROTOCOL_ID_S) #define SCMI_HDR_MESSAGE_TYPE_S 8 #define SCMI_HDR_MESSAGE_TYPE_BF (0x3) #define SCMI_HDR_MESSAGE_TYPE_M \ (SCMI_HDR_MESSAGE_TYPE_BF << SCMI_HDR_MESSAGE_TYPE_S) #define SCMI_HDR_MESSAGE_ID_S 0 #define SCMI_HDR_MESSAGE_ID_BF (0xff) #define SCMI_HDR_MESSAGE_ID_M \ (SCMI_HDR_MESSAGE_ID_BF << SCMI_HDR_MESSAGE_ID_S) #define SCMI_MSG_TYPE_CMD 0 #define SCMI_MSG_TYPE_DRESP 2 #define SCMI_MSG_TYPE_NOTIF 3 #define SCMI_MSG_TYPE_CHECK(_h, _t) \ ((((_h) & SCMI_HDR_MESSAGE_TYPE_M) >> SCMI_HDR_MESSAGE_TYPE_S) == (_t)) #define SCMI_IS_MSG_TYPE_NOTIF(h) \ SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_NOTIF) #define SCMI_IS_MSG_TYPE_DRESP(h) \ SCMI_MSG_TYPE_CHECK((h), SCMI_MSG_TYPE_DRESP) #define SCMI_MSG_TOKEN(_hdr) \ (((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S) struct scmi_req { int cnt; bool timed_out; bool use_polling; bool done; struct mtx mtx; LIST_ENTRY(scmi_req) next; int protocol_id; int message_id; int token; uint32_t header; struct scmi_msg msg; }; #define buf_to_msg(b) __containerof((b), struct scmi_msg, payld) #define msg_to_req(m) __containerof((m), struct scmi_req, msg) #define buf_to_req(b) msg_to_req(buf_to_msg(b)) LIST_HEAD(reqs_head, scmi_req); struct scmi_reqs_pool { struct mtx mtx; struct reqs_head head; }; BITSET_DEFINE(_scmi_tokens, SCMI_MAX_TOKEN); LIST_HEAD(inflight_head, scmi_req); #define REQHASH(_sc, _tk) \ (&((_sc)->trs->inflight_ht[(_tk) & (_sc)->trs->inflight_mask])) struct scmi_transport { unsigned long next_id; struct _scmi_tokens avail_tokens; struct inflight_head *inflight_ht; unsigned long inflight_mask; struct scmi_reqs_pool *chans[SCMI_CHAN_MAX]; struct mtx mtx; }; static int scmi_transport_init(struct scmi_softc *); static void scmi_transport_cleanup(struct scmi_softc *); static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int); static void scmi_reqs_pool_free(struct scmi_reqs_pool *); static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan); static void scmi_req_free_unlocked(struct scmi_softc *, enum scmi_chan, struct scmi_req *); static void scmi_req_get(struct scmi_softc *, struct scmi_req *); static void scmi_req_put(struct scmi_softc *, struct scmi_req *); static int scmi_token_pick(struct scmi_softc *); static void scmi_token_release_unlocked(struct scmi_softc *, int); static int scmi_req_track_inflight(struct scmi_softc *, struct scmi_req *); static int scmi_req_drop_inflight(struct scmi_softc *, struct scmi_req *); static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t); static int scmi_wait_for_response(struct scmi_softc *, struct scmi_req *, void **); static void scmi_process_response(struct scmi_softc *, uint32_t); int scmi_attach(device_t dev) { struct scmi_softc *sc; phandle_t node; int error; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (node == -1) return (ENXIO); simplebus_init(dev, node); error = scmi_transport_init(sc); if (error != 0) return (error); device_printf(dev, "Transport reply timeout initialized to %dms\n", sc->trs_desc.reply_timo_ms); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); error = bus_generic_attach(dev); return (error); } static int scmi_detach(device_t dev) { struct scmi_softc *sc; sc = device_get_softc(dev); scmi_transport_cleanup(sc); return (0); } static device_method_t scmi_methods[] = { DEVMETHOD(device_attach, scmi_attach), DEVMETHOD(device_detach, scmi_detach), DEVMETHOD_END }; DEFINE_CLASS_1(scmi, scmi_driver, scmi_methods, sizeof(struct scmi_softc), simplebus_driver); DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0); MODULE_VERSION(scmi, 1); static struct scmi_reqs_pool * scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz) { struct scmi_reqs_pool *rp; struct scmi_req *req; rp = malloc(sizeof(*rp), M_DEVBUF, M_ZERO | M_WAITOK); LIST_INIT(&rp->head); for (int i = 0; i < max_msg; i++) { req = malloc(sizeof(*req) + max_payld_sz, M_DEVBUF, M_ZERO | M_WAITOK); mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN); LIST_INSERT_HEAD(&rp->head, req, next); } mtx_init(&rp->mtx, "reqs_pool", "SCMI", MTX_SPIN); return (rp); } static void scmi_reqs_pool_free(struct scmi_reqs_pool *rp) { struct scmi_req *req; LIST_FOREACH(req, &rp->head, next) { mtx_destroy(&req->mtx); free(req, M_DEVBUF); } mtx_destroy(&rp->mtx); free(rp, M_DEVBUF); } static int scmi_transport_init(struct scmi_softc *sc) { struct scmi_transport *trs; int ret; trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK); BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens); mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN); trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF, &trs->inflight_mask); trs->chans[SCMI_CHAN_A2P] = scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE); if (trs->chans[SCMI_CHAN_A2P] == NULL) { free(trs, M_DEVBUF); return (ENOMEM); } trs->chans[SCMI_CHAN_P2A] = scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE); if (trs->chans[SCMI_CHAN_P2A] == NULL) { scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]); free(trs, M_DEVBUF); return (ENOMEM); } sc->trs = trs; ret = SCMI_TRANSPORT_INIT(sc->dev); if (ret != 0) { scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]); scmi_reqs_pool_free(trs->chans[SCMI_CHAN_P2A]); free(trs, M_DEVBUF); return (ret); } return (0); } static void scmi_transport_cleanup(struct scmi_softc *sc) { SCMI_TRANSPORT_CLEANUP(sc->dev); mtx_destroy(&sc->trs->mtx); hashdestroy(sc->trs->inflight_ht, M_DEVBUF, sc->trs->inflight_mask); scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_A2P]); scmi_reqs_pool_free(sc->trs->chans[SCMI_CHAN_P2A]); free(sc->trs, M_DEVBUF); } static struct scmi_req * scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx) { struct scmi_reqs_pool *rp; struct scmi_req *req = NULL; rp = sc->trs->chans[ch_idx]; mtx_lock_spin(&rp->mtx); if (!LIST_EMPTY(&rp->head)) { req = LIST_FIRST(&rp->head); LIST_REMOVE_HEAD(&rp->head, next); } mtx_unlock_spin(&rp->mtx); if (req != NULL) refcount_init(&req->cnt, 1); return (req); } static void scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx, struct scmi_req *req) { struct scmi_reqs_pool *rp; rp = sc->trs->chans[ch_idx]; mtx_lock_spin(&rp->mtx); req->timed_out = false; req->done = false; refcount_init(&req->cnt, 0); LIST_INSERT_HEAD(&rp->head, req, next); mtx_unlock_spin(&rp->mtx); } static void scmi_req_get(struct scmi_softc *sc, struct scmi_req *req) { bool ok; mtx_lock_spin(&req->mtx); ok = refcount_acquire_if_not_zero(&req->cnt); mtx_unlock_spin(&req->mtx); if (!ok) device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__); return; } static void scmi_req_put(struct scmi_softc *sc, struct scmi_req *req) { mtx_lock_spin(&req->mtx); if (!refcount_release_if_not_last(&req->cnt)) { bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE); scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req); } mtx_unlock_spin(&req->mtx); } static int scmi_token_pick(struct scmi_softc *sc) { unsigned long next_msg_id, token; mtx_lock_spin(&sc->trs->mtx); /* * next_id is a monotonically increasing unsigned long that can be used * for tracing purposes; next_msg_id is a 10-bit sequence number derived * from it. */ next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF; token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id); /* TODO Account for wrap-arounds and holes */ if (token != 0) BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens); mtx_unlock_spin(&sc->trs->mtx); /* * BIT_FFS_AT returns 1-indexed values, so 0 means failure to find a * free slot: all possible SCMI messages are in-flight using all of the * SCMI_MAX_TOKEN sequence numbers. */ if (!token) return (-EBUSY); return ((int)(token - 1)); } static void scmi_token_release_unlocked(struct scmi_softc *sc, int token) { BIT_SET(SCMI_MAX_TOKEN, token, &sc->trs->avail_tokens); } static int scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req) { uint32_t header = 0; req->token = scmi_token_pick(sc); if (req->token < 0) return (EBUSY); header = req->message_id; header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S; header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S; header |= req->token << SCMI_HDR_TOKEN_S; req->header = htole32(header); req->msg.hdr = htole32(header); return (0); } static int scmi_req_track_inflight(struct scmi_softc *sc, struct scmi_req *req) { int error; /* build hdr, pick token */ error = scmi_finalize_req(sc, req); if (error != 0) return (error); /* Bump refcount to get hold of this in-flight transaction */ scmi_req_get(sc, req); /* Register in the inflight hashtable */ mtx_lock_spin(&sc->trs->mtx); LIST_INSERT_HEAD(REQHASH(sc, req->token), req, next); mtx_unlock_spin(&sc->trs->mtx); return (0); } static int scmi_req_drop_inflight(struct scmi_softc *sc, struct scmi_req *req) { /* Remove from inflight hashtable at first ... */ mtx_lock_spin(&sc->trs->mtx); LIST_REMOVE(req, next); scmi_token_release_unlocked(sc, req->token); mtx_unlock_spin(&sc->trs->mtx); /* ...and drop refcount..potentially releasing *req */ scmi_req_put(sc, req); return (0); } static struct scmi_req * scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr) { struct scmi_req *req = NULL; unsigned int token; token = SCMI_MSG_TOKEN(hdr); mtx_lock_spin(&sc->trs->mtx); LIST_FOREACH(req, REQHASH(sc, token), next) { if (req->token == token) break; } mtx_unlock_spin(&sc->trs->mtx); return (req); } static void scmi_process_response(struct scmi_softc *sc, uint32_t hdr) { bool timed_out = false; struct scmi_req *req; req = scmi_req_lookup_inflight(sc, hdr); if (req == NULL) { device_printf(sc->dev, "Unexpected reply with header |%X| - token: 0x%X Drop.\n", hdr, SCMI_MSG_TOKEN(hdr)); return; } mtx_lock_spin(&req->mtx); req->done = true; if (!req->timed_out) { /* * Consider the case in which a polled message is picked * by chance on the IRQ path on another CPU: setting poll_done * will terminate the other poll loop. */ if (!req->msg.polling) wakeup(req); else atomic_store_rel_int(&req->msg.poll_done, 1); } else { timed_out = true; } mtx_unlock_spin(&req->mtx); if (timed_out) device_printf(sc->dev, "Late reply for timed-out request - token: 0x%X. Ignore.\n", req->token); /* * In case of a late reply to a timed-out transaction this will * finally free the pending scmi_req */ scmi_req_drop_inflight(sc, req); } void scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr) { struct scmi_softc *sc; sc = device_get_softc(dev); if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) { device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n"); SCMI_CLEAR_CHANNEL(dev, chan); return; } scmi_process_response(sc, hdr); } static int scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out) { int ret; if (req->msg.polling) { bool needs_drop; ret = SCMI_POLL_MSG(sc->dev, &req->msg, sc->trs_desc.reply_timo_ms); /* * Drop reference to successfully polled req unless it had * already also been processed on the IRQ path. * Addresses a possible race-condition between polling and * interrupt reception paths. */ mtx_lock_spin(&req->mtx); needs_drop = (ret == 0) && !req->done; mtx_unlock_spin(&req->mtx); if (needs_drop) scmi_req_drop_inflight(sc, req); if (ret == 0 && req->msg.hdr != req->header) { device_printf(sc->dev, "Malformed reply with header |%08X|. Expected: |%08X|Drop.\n", le32toh(req->msg.hdr), le32toh(req->header)); } } else { ret = tsleep(req, 0, "scmi_wait4", (sc->trs_desc.reply_timo_ms * hz) / 1000); /* Check for lost wakeups since there is no associated lock */ mtx_lock_spin(&req->mtx); if (ret != 0 && req->done) ret = 0; mtx_unlock_spin(&req->mtx); } if (ret == 0) { SCMI_COLLECT_REPLY(sc->dev, &req->msg); if (req->msg.payld[0] != 0) ret = req->msg.payld[0]; *out = &req->msg.payld[SCMI_MSG_HDR_SIZE]; } else { mtx_lock_spin(&req->mtx); req->timed_out = true; mtx_unlock_spin(&req->mtx); device_printf(sc->dev, "Request for token 0x%X timed-out.\n", req->token); } SCMI_TX_COMPLETE(sc->dev, NULL); return (ret); } void * scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id, int tx_payld_sz, int rx_payld_sz) { struct scmi_softc *sc; struct scmi_req *req; sc = device_get_softc(dev); if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE || rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE) { device_printf(dev, "Unsupported payload size. Drop.\n"); return (NULL); } /* Pick one from free list */ req = scmi_req_alloc(sc, SCMI_CHAN_A2P); if (req == NULL) return (NULL); req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF; req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF; req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz; req->msg.rx_len = rx_payld_sz ? rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE; return (&req->msg.payld[0]); } void scmi_buf_put(device_t dev, void *buf) { struct scmi_softc *sc; struct scmi_req *req; sc = device_get_softc(dev); req = buf_to_req(buf); scmi_req_put(sc, req); } int scmi_request(device_t dev, void *in, void **out) { struct scmi_softc *sc; struct scmi_req *req; int error; sc = device_get_softc(dev); req = buf_to_req(in); req->msg.polling = (cold || sc->trs_desc.no_completion_irq || req->use_polling); /* Set inflight and send using transport specific method - refc-2 */ error = scmi_req_track_inflight(sc, req); if (error != 0) return (error); error = SCMI_XFER_MSG(sc->dev, &req->msg); if (error != 0) { scmi_req_drop_inflight(sc, req); return (error); } return (scmi_wait_for_response(sc, req, out)); } diff --git a/sys/dev/gpio/dwgpio/dwgpio_bus.c b/sys/dev/gpio/dwgpio/dwgpio_bus.c index 48de7dc327eb..7aa681d7aaad 100644 --- a/sys/dev/gpio/dwgpio/dwgpio_bus.c +++ b/sys/dev/gpio/dwgpio/dwgpio_bus.c @@ -1,161 +1,161 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory (Department of Computer Science and * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the * DARPA SSITH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "dwgpio_if.h" struct dwgpiobus_softc { struct simplebus_softc simplebus_sc; device_t dev; struct resource *res[1]; }; static struct resource_spec dwgpio_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static int dwgpiobus_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "snps,dw-apb-gpio")) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); device_set_desc(dev, "Synopsys® DesignWare® APB GPIO BUS"); return (BUS_PROBE_DEFAULT); } static int dwgpiobus_attach(device_t dev) { struct dwgpiobus_softc *sc; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (node == -1) return (ENXIO); if (bus_alloc_resources(dev, dwgpio_spec, sc->res)) { device_printf(dev, "Could not allocate resources.\n"); return (ENXIO); } simplebus_init(dev, node); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); } static int dwgpiobus_detach(device_t dev) { struct dwgpiobus_softc *sc; sc = device_get_softc(dev); bus_release_resources(dev, dwgpio_spec, sc->res); return (0); } static int dwgpiobus_write(device_t dev, bus_size_t offset, int val) { struct dwgpiobus_softc *sc; sc = device_get_softc(dev); bus_write_4(sc->res[0], offset, val); return (0); }; static int dwgpiobus_read(device_t dev, bus_size_t offset) { struct dwgpiobus_softc *sc; int val; sc = device_get_softc(dev); val = bus_read_4(sc->res[0], offset); return (val); }; static device_method_t dwgpiobus_methods[] = { DEVMETHOD(device_probe, dwgpiobus_probe), DEVMETHOD(device_attach, dwgpiobus_attach), DEVMETHOD(device_detach, dwgpiobus_detach), DEVMETHOD(dwgpio_write, dwgpiobus_write), DEVMETHOD(dwgpio_read, dwgpiobus_read), DEVMETHOD_END }; DEFINE_CLASS_1(dwgpiobus, dwgpiobus_driver, dwgpiobus_methods, sizeof(struct dwgpiobus_softc), simplebus_driver); EARLY_DRIVER_MODULE(dwgpiobus, simplebus, dwgpiobus_driver, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(dwgpiobus, 1); diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index 6b0f8e5ed3ff..4632f0dcd911 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -1,1077 +1,1077 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #ifdef INTRNG #include #endif #include #include #include #include #include #include "gpiobus_if.h" #undef GPIOBUS_DEBUG #ifdef GPIOBUS_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif static void gpiobus_print_pins(struct gpiobus_ivar *, struct sbuf *); static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int); static int gpiobus_probe(device_t); static int gpiobus_suspend(device_t); static int gpiobus_resume(device_t); static void gpiobus_probe_nomatch(device_t, device_t); static int gpiobus_print_child(device_t, device_t); static int gpiobus_child_location(device_t, device_t, struct sbuf *); static device_t gpiobus_add_child(device_t, u_int, const char *, int); static void gpiobus_hinted_child(device_t, const char *, int); /* * GPIOBUS interface */ static int gpiobus_acquire_bus(device_t, device_t, int); static void gpiobus_release_bus(device_t, device_t); static int gpiobus_pin_setflags(device_t, device_t, uint32_t, uint32_t); static int gpiobus_pin_getflags(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_getcaps(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_set(device_t, device_t, uint32_t, unsigned int); static int gpiobus_pin_get(device_t, device_t, uint32_t, unsigned int*); static int gpiobus_pin_toggle(device_t, device_t, uint32_t); /* * gpiobus_pin flags * The flags in struct gpiobus_pin are not related to the flags used by the * low-level controller driver in struct gpio_pin. Currently, only pins * acquired via FDT data have gpiobus_pin.flags set, sourced from the flags in * the FDT properties. In theory, these flags are defined per-platform. In * practice they are always the flags from the dt-bindings/gpio/gpio.h file. * The only one of those flags we currently support is for handling active-low * pins, so we just define that flag here instead of including a GPL'd header. */ #define GPIO_ACTIVE_LOW 1 /* * XXX -> Move me to better place - gpio_subr.c? * Also, this function must be changed when interrupt configuration * data will be moved into struct resource. */ #ifdef INTRNG struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { u_int irq; struct intr_map_data_gpio *gpio_data; struct resource *res; gpio_data = (struct intr_map_data_gpio *)intr_alloc_map_data( INTR_MAP_DATA_GPIO, sizeof(*gpio_data), M_WAITOK | M_ZERO); gpio_data->gpio_pin_num = pin->pin; gpio_data->gpio_pin_flags = pin->flags; gpio_data->gpio_intr_mode = intr_mode; irq = intr_map_irq(pin->dev, 0, (struct intr_map_data *)gpio_data); res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1, alloc_flags); if (res == NULL) { intr_free_intr_map_data((struct intr_map_data *)gpio_data); return (NULL); } rman_set_virtual(res, gpio_data); return (res); } #else struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { return (NULL); } #endif int gpio_check_flags(uint32_t caps, uint32_t flags) { /* Filter unwanted flags. */ flags &= caps; /* Cannot mix input/output together. */ if (flags & GPIO_PIN_INPUT && flags & GPIO_PIN_OUTPUT) return (EINVAL); /* Cannot mix pull-up/pull-down together. */ if (flags & GPIO_PIN_PULLUP && flags & GPIO_PIN_PULLDOWN) return (EINVAL); /* Cannot mix output and interrupt flags together */ if (flags & GPIO_PIN_OUTPUT && flags & GPIO_INTR_MASK) return (EINVAL); /* Only one interrupt flag can be defined at once */ if ((flags & GPIO_INTR_MASK) & ((flags & GPIO_INTR_MASK) - 1)) return (EINVAL); /* The interrupt attached flag cannot be set */ if (flags & GPIO_INTR_ATTACHED) return (EINVAL); return (0); } int gpio_pin_get_by_bus_pinnum(device_t busdev, uint32_t pinnum, gpio_pin_t *ppin) { gpio_pin_t pin; int err; err = gpiobus_acquire_pin(busdev, pinnum); if (err != 0) return (EBUSY); pin = malloc(sizeof(*pin), M_DEVBUF, M_WAITOK | M_ZERO); pin->dev = device_get_parent(busdev); pin->pin = pinnum; pin->flags = 0; *ppin = pin; return (0); } int gpio_pin_get_by_child_index(device_t childdev, uint32_t idx, gpio_pin_t *ppin) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(childdev); if (idx >= devi->npins) return (EINVAL); return (gpio_pin_get_by_bus_pinnum(device_get_parent(childdev), devi->pins[idx], ppin)); } int gpio_pin_getcaps(gpio_pin_t pin, uint32_t *caps) { KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); return (GPIO_PIN_GETCAPS(pin->dev, pin->pin, caps)); } int gpio_pin_is_active(gpio_pin_t pin, bool *active) { int rv; uint32_t tmp; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_GET(pin->dev, pin->pin, &tmp); if (rv != 0) { return (rv); } if (pin->flags & GPIO_ACTIVE_LOW) *active = tmp == 0; else *active = tmp != 0; return (0); } void gpio_pin_release(gpio_pin_t gpio) { device_t busdev; if (gpio == NULL) return; KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL.")); busdev = GPIO_GET_BUS(gpio->dev); if (busdev != NULL) gpiobus_release_pin(busdev, gpio->pin); free(gpio, M_DEVBUF); } int gpio_pin_set_active(gpio_pin_t pin, bool active) { int rv; uint32_t tmp; if (pin->flags & GPIO_ACTIVE_LOW) tmp = active ? 0 : 1; else tmp = active ? 1 : 0; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_SET(pin->dev, pin->pin, tmp); return (rv); } int gpio_pin_setflags(gpio_pin_t pin, uint32_t flags) { int rv; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_SETFLAGS(pin->dev, pin->pin, flags); return (rv); } static void gpiobus_print_pins(struct gpiobus_ivar *devi, struct sbuf *sb) { int i, range_start, range_stop, need_coma; if (devi->npins == 0) return; need_coma = 0; range_start = range_stop = devi->pins[0]; for (i = 1; i < devi->npins; i++) { if (devi->pins[i] != (range_stop + 1)) { if (need_coma) sbuf_cat(sb, ","); if (range_start != range_stop) sbuf_printf(sb, "%d-%d", range_start, range_stop); else sbuf_printf(sb, "%d", range_start); range_start = range_stop = devi->pins[i]; need_coma = 1; } else range_stop++; } if (need_coma) sbuf_cat(sb, ","); if (range_start != range_stop) sbuf_printf(sb, "%d-%d", range_start, range_stop); else sbuf_printf(sb, "%d", range_start); } device_t gpiobus_attach_bus(device_t dev) { device_t busdev; busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY); if (busdev == NULL) return (NULL); if (device_add_child(dev, "gpioc", -1) == NULL) { device_delete_child(dev, busdev); return (NULL); } #ifdef FDT ofw_gpiobus_register_provider(dev); #endif bus_generic_attach(dev); return (busdev); } int gpiobus_detach_bus(device_t dev) { int err; #ifdef FDT ofw_gpiobus_unregister_provider(dev); #endif err = bus_generic_detach(dev); if (err != 0) return (err); return (device_delete_children(dev)); } int gpiobus_init_softc(device_t dev) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); sc->sc_busdev = dev; sc->sc_dev = device_get_parent(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "GPIO Interrupts"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0) panic("%s: failed to set up rman.", __func__); if (GPIO_PIN_MAX(sc->sc_dev, &sc->sc_npins) != 0) return (ENXIO); KASSERT(sc->sc_npins >= 0, ("GPIO device with no pins")); /* Pins = GPIO_PIN_MAX() + 1 */ sc->sc_npins++; sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_pins == NULL) return (ENOMEM); /* Initialize the bus lock. */ GPIOBUS_LOCK_INIT(sc); return (0); } int gpiobus_alloc_ivars(struct gpiobus_ivar *devi) { /* Allocate pins and flags memory. */ devi->pins = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (devi->pins == NULL) return (ENOMEM); return (0); } void gpiobus_free_ivars(struct gpiobus_ivar *devi) { if (devi->pins) { free(devi->pins, M_DEVBUF); devi->pins = NULL; } devi->npins = 0; } int gpiobus_acquire_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "invalid pin %d, max: %d\n", pin, sc->sc_npins - 1); return (-1); } /* Mark pin as mapped and give warning if it's already mapped. */ if (sc->sc_pins[pin].mapped) { device_printf(bus, "warning: pin %d is already mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 1; return (0); } /* Release mapped pin */ int gpiobus_release_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "invalid pin %d, max=%d\n", pin, sc->sc_npins - 1); return (-1); } if (!sc->sc_pins[pin].mapped) { device_printf(bus, "pin %d is not mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 0; return (0); } static int gpiobus_acquire_child_pins(device_t dev, device_t child) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); int i; for (i = 0; i < devi->npins; i++) { /* Reserve the GPIO pin. */ if (gpiobus_acquire_pin(dev, devi->pins[i]) != 0) { device_printf(child, "cannot acquire pin %d\n", devi->pins[i]); while (--i >= 0) { (void)gpiobus_release_pin(dev, devi->pins[i]); } gpiobus_free_ivars(devi); return (EBUSY); } } for (i = 0; i < devi->npins; i++) { /* Use the child name as pin name. */ GPIOBUS_PIN_SETNAME(dev, devi->pins[i], device_get_nameunit(child)); } return (0); } static int gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); int i, npins; npins = 0; for (i = 0; i < 32; i++) { if (mask & (1 << i)) npins++; } if (npins == 0) { device_printf(child, "empty pin mask\n"); return (EINVAL); } devi->npins = npins; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); return (EINVAL); } npins = 0; for (i = 0; i < 32; i++) { if ((mask & (1 << i)) == 0) continue; devi->pins[npins++] = i; } return (0); } static int gpiobus_parse_pin_list(struct gpiobus_softc *sc, device_t child, const char *pins) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); const char *p; char *endp; unsigned long pin; int i, npins; npins = 0; p = pins; for (;;) { pin = strtoul(p, &endp, 0); if (endp == p) break; npins++; if (*endp == '\0') break; p = endp + 1; } if (*endp != '\0') { device_printf(child, "garbage in the pin list: %s\n", endp); return (EINVAL); } if (npins == 0) { device_printf(child, "empty pin list\n"); return (EINVAL); } devi->npins = npins; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); return (EINVAL); } i = 0; p = pins; for (;;) { pin = strtoul(p, &endp, 0); devi->pins[i] = pin; if (*endp == '\0') break; i++; p = endp + 1; } return (0); } static int gpiobus_probe(device_t dev) { device_set_desc(dev, "GPIO bus"); return (BUS_PROBE_GENERIC); } int gpiobus_attach(device_t dev) { int err; err = gpiobus_init_softc(dev); if (err != 0) return (err); /* * Get parent's pins and mark them as unmapped */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } /* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ int gpiobus_detach(device_t dev) { struct gpiobus_softc *sc; int i, err; sc = GPIOBUS_SOFTC(dev); KASSERT(mtx_initialized(&sc->sc_mtx), ("gpiobus mutex not initialized")); GPIOBUS_LOCK_DESTROY(sc); if ((err = bus_generic_detach(dev)) != 0) return (err); if ((err = device_delete_children(dev)) != 0) return (err); rman_fini(&sc->sc_intr_rman); if (sc->sc_pins) { for (i = 0; i < sc->sc_npins; i++) { if (sc->sc_pins[i].name != NULL) free(sc->sc_pins[i].name, M_DEVBUF); sc->sc_pins[i].name = NULL; } free(sc->sc_pins, M_DEVBUF); sc->sc_pins = NULL; } return (0); } static int gpiobus_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int gpiobus_resume(device_t dev) { return (bus_generic_resume(dev)); } static void gpiobus_probe_nomatch(device_t dev, device_t child) { char pins[128]; struct sbuf sb; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); sbuf_new(&sb, pins, sizeof(pins), SBUF_FIXEDLEN); gpiobus_print_pins(devi, &sb); sbuf_finish(&sb); device_printf(dev, " at pin%s %s", devi->npins > 1 ? "s" : "", sbuf_data(&sb)); resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); printf("\n"); } static int gpiobus_print_child(device_t dev, device_t child) { char pins[128]; struct sbuf sb; int retval = 0; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); retval += bus_print_child_header(dev, child); if (devi->npins > 0) { if (devi->npins > 1) retval += printf(" at pins "); else retval += printf(" at pin "); sbuf_new(&sb, pins, sizeof(pins), SBUF_FIXEDLEN); gpiobus_print_pins(devi, &sb); sbuf_finish(&sb); retval += printf("%s", sbuf_data(&sb)); } resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static int gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); sbuf_printf(sb, "pins="); gpiobus_print_pins(devi, sb); return (0); } static device_t gpiobus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct gpiobus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static void gpiobus_child_deleted(device_t dev, device_t child) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); if (devi == NULL) return; gpiobus_free_ivars(devi); resource_list_free(&devi->rl); free(devi, M_DEVBUF); } static int gpiobus_rescan(device_t dev) { /* * Re-scan is supposed to remove and add children, but if someone has * deleted the hints for a child we attached earlier, we have no easy * way to handle that. So this just attaches new children for whom new * hints or drivers have arrived since we last tried. */ bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static void gpiobus_hinted_child(device_t bus, const char *dname, int dunit) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(bus); device_t child; const char *pins; int irq, pinmask; if (device_find_child(bus, dname, dunit) != NULL) { return; } child = BUS_ADD_CHILD(bus, 0, dname, dunit); if (resource_int_value(dname, dunit, "pins", &pinmask) == 0) { if (gpiobus_parse_pins(sc, child, pinmask)) { device_delete_child(bus, child); return; } } else if (resource_string_value(dname, dunit, "pin_list", &pins) == 0) { if (gpiobus_parse_pin_list(sc, child, pins)) { device_delete_child(bus, child); return; } } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } int gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); switch (which) { case GPIOBUS_IVAR_NPINS: *result = devi->npins; break; case GPIOBUS_IVAR_PINS: /* Children do not ever need to directly examine this. */ return (ENOTSUP); default: return (ENOENT); } return (0); } static int gpiobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct gpiobus_ivar *devi; const uint32_t *ptr; int i; devi = GPIOBUS_IVAR(child); switch (which) { case GPIOBUS_IVAR_NPINS: /* GPIO ivars are set once. */ if (devi->npins != 0) { return (EBUSY); } devi->npins = value; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); devi->npins = 0; return (ENOMEM); } break; case GPIOBUS_IVAR_PINS: ptr = (const uint32_t *)value; for (i = 0; i < devi->npins; i++) devi->pins[i] = ptr[i]; if (gpiobus_acquire_child_pins(dev, child) != 0) return (EBUSY); break; default: return (ENOENT); } return (0); } static struct rman * gpiobus_get_rman(device_t bus, int type, u_int flags) { struct gpiobus_softc *sc; sc = device_get_softc(bus); switch (type) { case SYS_RES_IRQ: return (&sc->sc_intr_rman); default: return (NULL); } } static struct resource * gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list *rl; struct resource_list_entry *rle; int isdefault; isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); if (isdefault) { rl = BUS_GET_RESOURCE_LIST(bus, child); if (rl == NULL) return (NULL); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); start = rle->start; count = rle->count; end = rle->end; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static struct resource_list * gpiobus_get_resource_list(device_t bus __unused, device_t child) { struct gpiobus_ivar *ivar; ivar = GPIOBUS_IVAR(child); return (&ivar->rl); } static int gpiobus_acquire_bus(device_t busdev, device_t child, int how) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner != NULL) { if (sc->sc_owner == child) panic("%s: %s still owns the bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (how == GPIOBUS_DONTWAIT) { GPIOBUS_UNLOCK(sc); return (EWOULDBLOCK); } while (sc->sc_owner != NULL) mtx_sleep(sc, &sc->sc_mtx, 0, "gpiobuswait", 0); } sc->sc_owner = child; GPIOBUS_UNLOCK(sc); return (0); } static void gpiobus_release_bus(device_t busdev, device_t child) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner == NULL) panic("%s: %s releasing unowned bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (sc->sc_owner != child) panic("%s: %s trying to release bus owned by %s", device_get_nameunit(busdev), device_get_nameunit(child), device_get_nameunit(sc->sc_owner)); sc->sc_owner = NULL; wakeup(sc); GPIOBUS_UNLOCK(sc); } static int gpiobus_pin_setflags(device_t dev, device_t child, uint32_t pin, uint32_t flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); uint32_t caps; if (pin >= devi->npins) return (EINVAL); if (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], &caps) != 0) return (EINVAL); if (gpio_check_flags(caps, flags) != 0) return (EINVAL); return (GPIO_PIN_SETFLAGS(sc->sc_dev, devi->pins[pin], flags)); } static int gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin, uint32_t *flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags); } static int gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin, uint32_t *caps) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps); } static int gpiobus_pin_set(device_t dev, device_t child, uint32_t pin, unsigned int value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_get(device_t dev, device_t child, uint32_t pin, unsigned int *value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]); } static int gpiobus_pin_getname(device_t dev, uint32_t pin, char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); /* Did we have a name for this pin ? */ if (sc->sc_pins[pin].name != NULL) { memcpy(name, sc->sc_pins[pin].name, GPIOMAXNAME); return (0); } /* Return the default pin name. */ return (GPIO_PIN_GETNAME(device_get_parent(dev), pin, name)); } static int gpiobus_pin_setname(device_t dev, uint32_t pin, const char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); if (name == NULL) return (EINVAL); /* Save the pin name. */ if (sc->sc_pins[pin].name == NULL) sc->sc_pins[pin].name = malloc(GPIOMAXNAME, M_DEVBUF, M_WAITOK | M_ZERO); strlcpy(sc->sc_pins[pin].name, name, GPIOMAXNAME); return (0); } static device_method_t gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gpiobus_probe), DEVMETHOD(device_attach, gpiobus_attach), DEVMETHOD(device_detach, gpiobus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, gpiobus_suspend), DEVMETHOD(device_resume, gpiobus_resume), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_rman_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource), DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list), DEVMETHOD(bus_get_rman, gpiobus_get_rman), DEVMETHOD(bus_add_child, gpiobus_add_child), DEVMETHOD(bus_child_deleted, gpiobus_child_deleted), DEVMETHOD(bus_rescan, gpiobus_rescan), DEVMETHOD(bus_probe_nomatch, gpiobus_probe_nomatch), DEVMETHOD(bus_print_child, gpiobus_print_child), DEVMETHOD(bus_child_location, gpiobus_child_location), DEVMETHOD(bus_hinted_child, gpiobus_hinted_child), DEVMETHOD(bus_read_ivar, gpiobus_read_ivar), DEVMETHOD(bus_write_ivar, gpiobus_write_ivar), /* GPIO protocol */ DEVMETHOD(gpiobus_acquire_bus, gpiobus_acquire_bus), DEVMETHOD(gpiobus_release_bus, gpiobus_release_bus), DEVMETHOD(gpiobus_pin_getflags, gpiobus_pin_getflags), DEVMETHOD(gpiobus_pin_getcaps, gpiobus_pin_getcaps), DEVMETHOD(gpiobus_pin_setflags, gpiobus_pin_setflags), DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get), DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set), DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle), DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname), DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname), DEVMETHOD_END }; driver_t gpiobus_driver = { "gpiobus", gpiobus_methods, sizeof(struct gpiobus_softc) }; EARLY_DRIVER_MODULE(gpiobus, gpio, gpiobus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(gpiobus, 1); diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c index 8cadc12934e0..ee52748a8c6b 100644 --- a/sys/dev/gpio/ofw_gpiobus.c +++ b/sys/dev/gpio/ofw_gpiobus.c @@ -1,511 +1,511 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009, Nathan Whitehorn * Copyright (c) 2013, Luiz Otavio O Souza * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include "gpiobus_if.h" static struct ofw_gpiobus_devinfo *ofw_gpiobus_setup_devinfo(device_t, device_t, phandle_t); static void ofw_gpiobus_destroy_devinfo(device_t, struct ofw_gpiobus_devinfo *); static int ofw_gpiobus_parse_gpios_impl(device_t, phandle_t, char *, struct gpiobus_softc *, struct gpiobus_pin **); /* * Utility functions for easier handling of OFW GPIO pins. * * !!! BEWARE !!! * GPIOBUS uses children's IVARs, so we cannot use this interface for cross * tree consumers. * */ int gpio_pin_get_by_ofw_propidx(device_t consumer, phandle_t cnode, char *prop_name, int idx, gpio_pin_t *out_pin) { phandle_t xref; pcell_t *cells; device_t busdev; struct gpiobus_pin pin; int ncells, rv; KASSERT(consumer != NULL && cnode > 0, ("both consumer and cnode required")); rv = ofw_bus_parse_xref_list_alloc(cnode, prop_name, "#gpio-cells", idx, &xref, &ncells, &cells); if (rv != 0) return (rv); /* Translate provider to device. */ pin.dev = OF_device_from_xref(xref); if (pin.dev == NULL) { OF_prop_free(cells); return (ENODEV); } /* Test if GPIO bus already exist. */ busdev = GPIO_GET_BUS(pin.dev); if (busdev == NULL) { OF_prop_free(cells); return (ENODEV); } /* Map GPIO pin. */ rv = gpio_map_gpios(pin.dev, cnode, OF_node_from_xref(xref), ncells, cells, &pin.pin, &pin.flags); OF_prop_free(cells); if (rv != 0) return (ENXIO); /* Reserve GPIO pin. */ rv = gpiobus_acquire_pin(busdev, pin.pin); if (rv != 0) return (EBUSY); *out_pin = malloc(sizeof(struct gpiobus_pin), M_DEVBUF, M_WAITOK | M_ZERO); **out_pin = pin; return (0); } int gpio_pin_get_by_ofw_idx(device_t consumer, phandle_t node, int idx, gpio_pin_t *pin) { return (gpio_pin_get_by_ofw_propidx(consumer, node, "gpios", idx, pin)); } int gpio_pin_get_by_ofw_property(device_t consumer, phandle_t node, char *name, gpio_pin_t *pin) { return (gpio_pin_get_by_ofw_propidx(consumer, node, name, 0, pin)); } int gpio_pin_get_by_ofw_name(device_t consumer, phandle_t node, char *name, gpio_pin_t *pin) { int rv, idx; KASSERT(consumer != NULL && node > 0, ("both consumer and node required")); rv = ofw_bus_find_string_index(node, "gpio-names", name, &idx); if (rv != 0) return (rv); return (gpio_pin_get_by_ofw_idx(consumer, node, idx, pin)); } /* * OFW_GPIOBUS driver. */ device_t ofw_gpiobus_add_fdt_child(device_t bus, const char *drvname, phandle_t child) { device_t childdev; int i; struct gpiobus_ivar *devi; struct ofw_gpiobus_devinfo *dinfo; /* * Check to see if we already have a child for @p child, and if so * return it. */ childdev = ofw_bus_find_child_device_by_phandle(bus, child); if (childdev != NULL) return (childdev); /* * Set up the GPIO child and OFW bus layer devinfo and add it to bus. */ childdev = device_add_child(bus, drvname, DEVICE_UNIT_ANY); if (childdev == NULL) return (NULL); dinfo = ofw_gpiobus_setup_devinfo(bus, childdev, child); if (dinfo == NULL) { device_delete_child(bus, childdev); return (NULL); } if (device_probe_and_attach(childdev) != 0) { ofw_gpiobus_destroy_devinfo(bus, dinfo); device_delete_child(bus, childdev); return (NULL); } /* Use the child name as pin name. */ devi = &dinfo->opd_dinfo; for (i = 0; i < devi->npins; i++) GPIOBUS_PIN_SETNAME(bus, devi->pins[i], device_get_nameunit(childdev)); return (childdev); } int ofw_gpiobus_parse_gpios(device_t consumer, char *pname, struct gpiobus_pin **pins) { return (ofw_gpiobus_parse_gpios_impl(consumer, ofw_bus_get_node(consumer), pname, NULL, pins)); } void ofw_gpiobus_register_provider(device_t provider) { phandle_t node; node = ofw_bus_get_node(provider); if (node != -1) OF_device_register_xref(OF_xref_from_node(node), provider); } void ofw_gpiobus_unregister_provider(device_t provider) { phandle_t node; node = ofw_bus_get_node(provider); if (node != -1) OF_device_register_xref(OF_xref_from_node(node), NULL); } static struct ofw_gpiobus_devinfo * ofw_gpiobus_setup_devinfo(device_t bus, device_t child, phandle_t node) { int i, npins; struct gpiobus_ivar *devi; struct gpiobus_pin *pins; struct gpiobus_softc *sc; struct ofw_gpiobus_devinfo *dinfo; sc = device_get_softc(bus); dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) return (NULL); if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, node) != 0) { free(dinfo, M_DEVBUF); return (NULL); } /* Parse the gpios property for the child. */ npins = ofw_gpiobus_parse_gpios_impl(child, node, "gpios", sc, &pins); if (npins <= 0) { ofw_bus_gen_destroy_devinfo(&dinfo->opd_obdinfo); free(dinfo, M_DEVBUF); return (NULL); } /* Initialize the irq resource list. */ resource_list_init(&dinfo->opd_dinfo.rl); /* Allocate the child ivars and copy the parsed pin data. */ devi = &dinfo->opd_dinfo; devi->npins = (uint32_t)npins; if (gpiobus_alloc_ivars(devi) != 0) { free(pins, M_DEVBUF); ofw_gpiobus_destroy_devinfo(bus, dinfo); return (NULL); } for (i = 0; i < devi->npins; i++) devi->pins[i] = pins[i].pin; free(pins, M_DEVBUF); /* Parse the interrupt resources. */ if (ofw_bus_intr_to_rl(bus, node, &dinfo->opd_dinfo.rl, NULL) != 0) { ofw_gpiobus_destroy_devinfo(bus, dinfo); return (NULL); } device_set_ivars(child, dinfo); return (dinfo); } static void ofw_gpiobus_destroy_devinfo(device_t bus, struct ofw_gpiobus_devinfo *dinfo) { int i; struct gpiobus_ivar *devi; struct gpiobus_softc *sc; sc = device_get_softc(bus); devi = &dinfo->opd_dinfo; for (i = 0; i < devi->npins; i++) { if (devi->pins[i] > sc->sc_npins) continue; sc->sc_pins[devi->pins[i]].mapped = 0; } gpiobus_free_ivars(devi); resource_list_free(&dinfo->opd_dinfo.rl); ofw_bus_gen_destroy_devinfo(&dinfo->opd_obdinfo); free(dinfo, M_DEVBUF); } static int ofw_gpiobus_parse_gpios_impl(device_t consumer, phandle_t cnode, char *pname, struct gpiobus_softc *bussc, struct gpiobus_pin **pins) { int gpiocells, i, j, ncells, npins; pcell_t *gpios; phandle_t gpio; ncells = OF_getencprop_alloc_multi(cnode, pname, sizeof(*gpios), (void **)&gpios); if (ncells == -1) { device_printf(consumer, "Warning: No %s specified in fdt data; " "device may not function.\n", pname); return (-1); } /* * The gpio-specifier is controller independent, the first pcell has * the reference to the GPIO controller phandler. * Count the number of encoded gpio-specifiers on the first pass. */ i = 0; npins = 0; while (i < ncells) { /* Allow NULL specifiers. */ if (gpios[i] == 0) { npins++; i++; continue; } gpio = OF_node_from_xref(gpios[i]); /* If we have bussc, ignore devices from other gpios. */ if (bussc != NULL) if (ofw_bus_get_node(bussc->sc_dev) != gpio) return (0); /* * Check for gpio-controller property and read the #gpio-cells * for this GPIO controller. */ if (!OF_hasprop(gpio, "gpio-controller") || OF_getencprop(gpio, "#gpio-cells", &gpiocells, sizeof(gpiocells)) < 0) { device_printf(consumer, "gpio reference is not a gpio-controller.\n"); OF_prop_free(gpios); return (-1); } if (ncells - i < gpiocells + 1) { device_printf(consumer, "%s cells doesn't match #gpio-cells.\n", pname); return (-1); } npins++; i += gpiocells + 1; } if (npins == 0 || pins == NULL) { if (npins == 0) device_printf(consumer, "no pin specified in %s.\n", pname); OF_prop_free(gpios); return (npins); } *pins = malloc(sizeof(struct gpiobus_pin) * npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (*pins == NULL) { OF_prop_free(gpios); return (-1); } /* Decode the gpio specifier on the second pass. */ i = 0; j = 0; while (i < ncells) { /* Allow NULL specifiers. */ if (gpios[i] == 0) { j++; i++; continue; } gpio = OF_node_from_xref(gpios[i]); /* Read gpio-cells property for this GPIO controller. */ if (OF_getencprop(gpio, "#gpio-cells", &gpiocells, sizeof(gpiocells)) < 0) { device_printf(consumer, "gpio does not have the #gpio-cells property.\n"); goto fail; } /* Return the device reference for the GPIO controller. */ (*pins)[j].dev = OF_device_from_xref(gpios[i]); if ((*pins)[j].dev == NULL) { device_printf(consumer, "no device registered for the gpio controller.\n"); goto fail; } /* * If the gpiobus softc is NULL we use the GPIO_GET_BUS() to * retrieve it. The GPIO_GET_BUS() method is only valid after * the child is probed and attached. */ if (bussc == NULL) { if (GPIO_GET_BUS((*pins)[j].dev) == NULL) { device_printf(consumer, "no gpiobus reference for %s.\n", device_get_nameunit((*pins)[j].dev)); goto fail; } bussc = device_get_softc(GPIO_GET_BUS((*pins)[j].dev)); } /* Get the GPIO pin number and flags. */ if (gpio_map_gpios((*pins)[j].dev, cnode, gpio, gpiocells, &gpios[i + 1], &(*pins)[j].pin, &(*pins)[j].flags) != 0) { device_printf(consumer, "cannot map the gpios specifier.\n"); goto fail; } /* Reserve the GPIO pin. */ if (gpiobus_acquire_pin(bussc->sc_busdev, (*pins)[j].pin) != 0) goto fail; j++; i += gpiocells + 1; } OF_prop_free(gpios); return (npins); fail: OF_prop_free(gpios); free(*pins, M_DEVBUF); return (-1); } static int ofw_gpiobus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW GPIO bus"); return (BUS_PROBE_DEFAULT); } static int ofw_gpiobus_attach(device_t dev) { int err; phandle_t child; err = gpiobus_init_softc(dev); if (err != 0) return (err); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); /* * Attach the children represented in the device tree. */ for (child = OF_child(ofw_bus_get_node(dev)); child != 0; child = OF_peer(child)) { if (OF_hasprop(child, "gpio-hog")) continue; if (!OF_hasprop(child, "gpios")) continue; if (ofw_gpiobus_add_fdt_child(dev, NULL, child) == NULL) continue; } return (bus_generic_attach(dev)); } static device_t ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_gpiobus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static const struct ofw_bus_devinfo * ofw_gpiobus_get_devinfo(device_t bus, device_t dev) { struct ofw_gpiobus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } static device_method_t ofw_gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_gpiobus_probe), DEVMETHOD(device_attach, ofw_gpiobus_attach), /* Bus interface */ DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_gpiobus_add_child), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_gpiobus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(gpiobus, ofw_gpiobus_driver, ofw_gpiobus_methods, sizeof(struct gpiobus_softc), gpiobus_driver); EARLY_DRIVER_MODULE(ofw_gpiobus, gpio, ofw_gpiobus_driver, 0, 0, BUS_PASS_BUS); MODULE_VERSION(ofw_gpiobus, 1); MODULE_DEPEND(ofw_gpiobus, gpiobus, 1, 1, 1); diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c index 548027b0320a..596f9d2e6cd4 100644 --- a/sys/dev/hid/hidbus.c +++ b/sys/dev/hid/hidbus.c @@ -1,976 +1,976 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019-2020 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define HID_DEBUG_VAR hid_debug #include #include #include #include "hid_if.h" #define INPUT_EPOCH global_epoch_preempt #define HID_RSIZE_MAX 1024 static hid_intr_t hidbus_intr; static device_probe_t hidbus_probe; static device_attach_t hidbus_attach; static device_detach_t hidbus_detach; struct hidbus_ivars { int32_t usage; uint8_t index; uint32_t flags; uintptr_t driver_info; /* for internal use */ struct mtx *mtx; /* child intr mtx */ hid_intr_t *intr_handler; /* executed under mtx*/ void *intr_ctx; unsigned int refcnt; /* protected by mtx */ struct epoch_context epoch_ctx; CK_STAILQ_ENTRY(hidbus_ivars) link; }; struct hidbus_softc { device_t dev; struct sx sx; struct mtx mtx; bool nowrite; struct hid_rdesc_info rdesc; bool overloaded; int nest; /* Child attach nesting lvl */ int nauto; /* Number of autochildren */ CK_STAILQ_HEAD(, hidbus_ivars) tlcs; }; static int hidbus_fill_rdesc_info(struct hid_rdesc_info *hri, const void *data, hid_size_t len) { int error = 0; hri->data = __DECONST(void *, data); hri->len = len; /* * If report descriptor is not available yet, set maximal * report sizes high enough to allow hidraw to work. */ hri->isize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_input, &hri->iid); hri->osize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_output, &hri->oid); hri->fsize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_feature, &hri->fid); if (hri->isize > HID_RSIZE_MAX) { DPRINTF("input size is too large, %u bytes (truncating)\n", hri->isize); hri->isize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->osize > HID_RSIZE_MAX) { DPRINTF("output size is too large, %u bytes (truncating)\n", hri->osize); hri->osize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->fsize > HID_RSIZE_MAX) { DPRINTF("feature size is too large, %u bytes (truncating)\n", hri->fsize); hri->fsize = HID_RSIZE_MAX; error = EOVERFLOW; } return (error); } int hidbus_locate(const void *desc, hid_size_t size, int32_t u, enum hid_kind k, uint8_t tlc_index, uint8_t index, struct hid_location *loc, uint32_t *flags, uint8_t *id, struct hid_absinfo *ai) { struct hid_data *d; struct hid_item h; int i; d = hid_start_parse(desc, size, 1 << k); HIDBUS_FOREACH_ITEM(d, &h, tlc_index) { for (i = 0; i < h.nusages; i++) { if (h.kind == k && h.usages[i] == u) { if (index--) break; if (loc != NULL) *loc = h.loc; if (flags != NULL) *flags = h.flags; if (id != NULL) *id = h.report_ID; if (ai != NULL && (h.flags&HIO_RELATIVE) == 0) *ai = (struct hid_absinfo) { .max = h.logical_maximum, .min = h.logical_minimum, .res = hid_item_resolution(&h), }; hid_end_parse(d); return (1); } } } if (loc != NULL) loc->size = 0; if (flags != NULL) *flags = 0; if (id != NULL) *id = 0; hid_end_parse(d); return (0); } bool hidbus_is_collection(const void *desc, hid_size_t size, int32_t usage, uint8_t tlc_index) { struct hid_data *d; struct hid_item h; bool ret = false; d = hid_start_parse(desc, size, 0); HIDBUS_FOREACH_ITEM(d, &h, tlc_index) { if (h.kind == hid_collection && h.usage == usage) { ret = true; break; } } hid_end_parse(d); return (ret); } static device_t hidbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct hidbus_softc *sc = device_get_softc(dev); struct hidbus_ivars *tlc; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); tlc = malloc(sizeof(struct hidbus_ivars), M_DEVBUF, M_WAITOK | M_ZERO); tlc->mtx = &sc->mtx; device_set_ivars(child, tlc); sx_xlock(&sc->sx); CK_STAILQ_INSERT_TAIL(&sc->tlcs, tlc, link); sx_unlock(&sc->sx); return (child); } static int hidbus_enumerate_children(device_t dev, const void* data, hid_size_t len) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_data *hd; struct hid_item hi; device_t child; uint8_t index = 0; if (data == NULL || len == 0) return (ENXIO); /* Add a child for each top level collection */ hd = hid_start_parse(data, len, 1 << hid_input); while (hid_get_item(hd, &hi)) { if (hi.kind != hid_collection || hi.collevel != 1) continue; child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "Could not add HID device\n"); continue; } hidbus_set_index(child, index); hidbus_set_usage(child, hi.usage); hidbus_set_flags(child, HIDBUS_FLAG_AUTOCHILD); index++; DPRINTF("Add child TLC: 0x%04x:0x%04x\n", HID_GET_USAGE_PAGE(hi.usage), HID_GET_USAGE(hi.usage)); } hid_end_parse(hd); if (index == 0) return (ENXIO); sc->nauto = index; return (0); } static int hidbus_attach_children(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); int error; HID_INTR_SETUP(device_get_parent(dev), dev, hidbus_intr, sc, &sc->rdesc); error = hidbus_enumerate_children(dev, sc->rdesc.data, sc->rdesc.len); if (error != 0) DPRINTF("failed to enumerate children: error %d\n", error); /* * hidbus_attach_children() can recurse through device_identify-> * hid_set_report_descr() call sequence. Do not perform children * attach twice in that case. */ sc->nest++; - bus_generic_probe(dev); + bus_identify_children(dev); sc->nest--; if (sc->nest != 0) return (0); if (hid_is_keyboard(sc->rdesc.data, sc->rdesc.len) != 0) error = bus_generic_attach(dev); else error = bus_delayed_attach_children(dev); if (error != 0) device_printf(dev, "failed to attach child: error %d\n", error); return (error); } static int hidbus_detach_children(device_t dev) { device_t *children, bus; bool is_bus; int i, error; error = 0; is_bus = device_get_devclass(dev) == devclass_find("hidbus"); bus = is_bus ? dev : device_get_parent(dev); KASSERT(device_get_devclass(bus) == devclass_find("hidbus"), ("Device is not hidbus or it's child")); if (is_bus) { /* If hidbus is passed, delete all children. */ bus_generic_detach(bus); device_delete_children(bus); } else { /* * If hidbus child is passed, delete all hidbus children * except caller. Deleting the caller may result in deadlock. */ error = device_get_children(bus, &children, &i); if (error != 0) return (error); while (i-- > 0) { if (children[i] == dev) continue; DPRINTF("Delete child. index=%d (%s)\n", hidbus_get_index(children[i]), device_get_nameunit(children[i])); error = device_delete_child(bus, children[i]); if (error) { DPRINTF("Failed deleting %s\n", device_get_nameunit(children[i])); break; } } free(children, M_TEMP); } HID_INTR_UNSETUP(device_get_parent(bus), bus); return (error); } static int hidbus_probe(device_t dev) { device_set_desc(dev, "HID bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } static int hidbus_attach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_device_info *devinfo = device_get_ivars(dev); void *d_ptr = NULL; hid_size_t d_len; int error; sc->dev = dev; CK_STAILQ_INIT(&sc->tlcs); mtx_init(&sc->mtx, "hidbus ivar lock", NULL, MTX_DEF); sx_init(&sc->sx, "hidbus ivar list lock"); /* * Ignore error. It is possible for non-HID device e.g. XBox360 gamepad * to emulate HID through overloading of report descriptor. */ d_len = devinfo->rdescsize; if (d_len != 0) { d_ptr = malloc(d_len, M_DEVBUF, M_ZERO | M_WAITOK); error = hid_get_rdesc(dev, d_ptr, d_len); if (error != 0) { free(d_ptr, M_DEVBUF); d_len = 0; d_ptr = NULL; } } hidbus_fill_rdesc_info(&sc->rdesc, d_ptr, d_len); sc->nowrite = hid_test_quirk(devinfo, HQ_NOWRITE); error = hidbus_attach_children(dev); if (error != 0) { hidbus_detach(dev); return (ENXIO); } return (0); } static int hidbus_detach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); hidbus_detach_children(dev); sx_destroy(&sc->sx); mtx_destroy(&sc->mtx); free(sc->rdesc.data, M_DEVBUF); return (0); } static void hidbus_child_detached(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); KASSERT(tlc->refcnt == 0, ("Child device is running")); tlc->mtx = &sc->mtx; tlc->intr_handler = NULL; tlc->flags &= ~HIDBUS_FLAG_CAN_POLL; } /* * Epoch callback indicating tlc is safe to destroy */ static void hidbus_ivar_dtor(epoch_context_t ctx) { struct hidbus_ivars *tlc; tlc = __containerof(ctx, struct hidbus_ivars, epoch_ctx); free(tlc, M_DEVBUF); } static void hidbus_child_deleted(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); sx_xlock(&sc->sx); KASSERT(tlc->refcnt == 0, ("Child device is running")); CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link); sx_unlock(&sc->sx); epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx); } static int hidbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: *result = tlc->index; break; case HIDBUS_IVAR_USAGE: *result = tlc->usage; break; case HIDBUS_IVAR_FLAGS: *result = tlc->flags; break; case HIDBUS_IVAR_DRIVER_INFO: *result = tlc->driver_info; break; case HIDBUS_IVAR_LOCK: *result = (uintptr_t)(tlc->mtx == &sc->mtx ? NULL : tlc->mtx); break; default: return (EINVAL); } return (0); } static int hidbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: tlc->index = value; break; case HIDBUS_IVAR_USAGE: tlc->usage = value; break; case HIDBUS_IVAR_FLAGS: tlc->flags = value; if ((value & HIDBUS_FLAG_CAN_POLL) != 0) HID_INTR_SETUP( device_get_parent(bus), bus, NULL, NULL, NULL); break; case HIDBUS_IVAR_DRIVER_INFO: tlc->driver_info = value; break; case HIDBUS_IVAR_LOCK: tlc->mtx = (struct mtx *)value == NULL ? &sc->mtx : (struct mtx *)value; break; default: return (EINVAL); } return (0); } /* Location hint for devctl(8) */ static int hidbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); sbuf_printf(sb, "index=%hhu", tlc->index); return (0); } /* PnP information for devctl(8) */ static int hidbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); struct hid_device_info *devinfo = device_get_ivars(bus); sbuf_printf(sb, "page=0x%04x usage=0x%04x bus=0x%02hx " "vendor=0x%04hx product=0x%04hx version=0x%04hx%s%s", HID_GET_USAGE_PAGE(tlc->usage), HID_GET_USAGE(tlc->usage), devinfo->idBus, devinfo->idVendor, devinfo->idProduct, devinfo->idVersion, devinfo->idPnP[0] == '\0' ? "" : " _HID=", devinfo->idPnP[0] == '\0' ? "" : devinfo->idPnP); return (0); } void hidbus_set_desc(device_t child, const char *suffix) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); struct hid_device_info *devinfo = device_get_ivars(bus); struct hidbus_ivars *tlc = device_get_ivars(child); /* Do not add NULL suffix or if device name already contains it. */ if (suffix != NULL && strcasestr(devinfo->name, suffix) == NULL && (sc->nauto > 1 || (tlc->flags & HIDBUS_FLAG_AUTOCHILD) == 0)) device_set_descf(child, "%s %s", devinfo->name, suffix); else device_set_desc(child, devinfo->name); } device_t hidbus_find_child(device_t bus, int32_t usage) { device_t *children, child; int ccount, i; bus_topo_assert(); /* Get a list of all hidbus children */ if (device_get_children(bus, &children, &ccount) != 0) return (NULL); /* Scan through to find required TLC */ for (i = 0, child = NULL; i < ccount; i++) { if (hidbus_get_usage(children[i]) == usage) { child = children[i]; break; } } free(children, M_TEMP); return (child); } void hidbus_intr(void *context, void *buf, hid_size_t len) { struct hidbus_softc *sc = context; struct hidbus_ivars *tlc; struct epoch_tracker et; /* * Broadcast input report to all subscribers. * TODO: Add check for input report ID. * * Relock mutex on every TLC item as we can't hold any locks over whole * TLC list here due to LOR with open()/close() handlers. */ if (!HID_IN_POLLING_MODE()) epoch_enter_preempt(INPUT_EPOCH, &et); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc->refcnt == 0 || tlc->intr_handler == NULL) continue; if (HID_IN_POLLING_MODE()) { if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0) tlc->intr_handler(tlc->intr_ctx, buf, len); } else { mtx_lock(tlc->mtx); tlc->intr_handler(tlc->intr_ctx, buf, len); mtx_unlock(tlc->mtx); } } if (!HID_IN_POLLING_MODE()) epoch_exit_preempt(INPUT_EPOCH, &et); } void hidbus_set_intr(device_t child, hid_intr_t *handler, void *context) { struct hidbus_ivars *tlc = device_get_ivars(child); tlc->intr_handler = handler; tlc->intr_ctx = context; } static int hidbus_intr_start(device_t bus, device_t child) { MPASS(bus == device_get_parent(child)); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; bool refcnted = false; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { refcnted |= (tlc->refcnt != 0); if (tlc == ivar) { mtx_lock(tlc->mtx); ++tlc->refcnt; mtx_unlock(tlc->mtx); } } error = refcnted ? 0 : hid_intr_start(bus); sx_unlock(&sc->sx); return (error); } static int hidbus_intr_stop(device_t bus, device_t child) { MPASS(bus == device_get_parent(child)); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; bool refcnted = false; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc == ivar) { mtx_lock(tlc->mtx); MPASS(tlc->refcnt != 0); --tlc->refcnt; mtx_unlock(tlc->mtx); } refcnted |= (tlc->refcnt != 0); } error = refcnted ? 0 : hid_intr_stop(bus); sx_unlock(&sc->sx); return (error); } static void hidbus_intr_poll(device_t bus, device_t child __unused) { hid_intr_poll(bus); } struct hid_rdesc_info * hidbus_get_rdesc_info(device_t child) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); return (&sc->rdesc); } /* * HID interface. * * Hidbus as well as any hidbus child can be passed as first arg. */ /* Read cached report descriptor */ int hid_get_report_descr(device_t dev, void **data, hid_size_t *len) { device_t bus; struct hidbus_softc *sc; bus = device_get_devclass(dev) == devclass_find("hidbus") ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not send request to a transport backend. * Use cached report descriptor instead of it. */ if (sc->rdesc.data == NULL || sc->rdesc.len == 0) return (ENXIO); if (data != NULL) *data = sc->rdesc.data; if (len != NULL) *len = sc->rdesc.len; return (0); } /* * Replace cached report descriptor with top level driver provided one. * * It deletes all hidbus children except caller and enumerates them again after * new descriptor has been registered. Currently it can not be called from * autoenumerated (by report's TLC) child device context as it results in child * duplication. To overcome this limitation hid_set_report_descr() should be * called from device_identify driver's handler with hidbus itself passed as * 'device_t dev' parameter. */ int hid_set_report_descr(device_t dev, const void *data, hid_size_t len) { struct hid_rdesc_info rdesc; device_t bus; struct hidbus_softc *sc; bool is_bus; int error; bus_topo_assert(); is_bus = device_get_devclass(dev) == devclass_find("hidbus"); bus = is_bus ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not overload already overloaded report descriptor in * device_identify handler. It causes infinite recursion loop. */ if (is_bus && sc->overloaded) return(0); DPRINTFN(5, "len=%d\n", len); DPRINTFN(5, "data = %*D\n", len, data, " "); error = hidbus_fill_rdesc_info(&rdesc, data, len); if (error != 0) return (error); error = hidbus_detach_children(dev); if (error != 0) return(error); /* Make private copy to handle a case of dynamicaly allocated data. */ rdesc.data = malloc(len, M_DEVBUF, M_ZERO | M_WAITOK); bcopy(data, rdesc.data, len); sc->overloaded = true; free(sc->rdesc.data, M_DEVBUF); bcopy(&rdesc, &sc->rdesc, sizeof(struct hid_rdesc_info)); error = hidbus_attach_children(bus); return (error); } static int hidbus_get_rdesc(device_t dev, device_t child __unused, void *data, hid_size_t len) { return (hid_get_rdesc(dev, data, len)); } static int hidbus_read(device_t dev, device_t child __unused, void *data, hid_size_t maxlen, hid_size_t *actlen) { return (hid_read(dev, data, maxlen, actlen)); } static int hidbus_write(device_t dev, device_t child __unused, const void *data, hid_size_t len) { struct hidbus_softc *sc; uint8_t id; sc = device_get_softc(dev); /* * Output interrupt endpoint is often optional. If HID device * does not provide it, send reports via control pipe. */ if (sc->nowrite) { /* try to extract the ID byte */ id = (sc->rdesc.oid & (len > 0)) ? *(const uint8_t*)data : 0; return (hid_set_report(dev, data, len, HID_OUTPUT_REPORT, id)); } return (hid_write(dev, data, len)); } static int hidbus_get_report(device_t dev, device_t child __unused, void *data, hid_size_t maxlen, hid_size_t *actlen, uint8_t type, uint8_t id) { return (hid_get_report(dev, data, maxlen, actlen, type, id)); } static int hidbus_set_report(device_t dev, device_t child __unused, const void *data, hid_size_t len, uint8_t type, uint8_t id) { return (hid_set_report(dev, data, len, type, id)); } static int hidbus_set_idle(device_t dev, device_t child __unused, uint16_t duration, uint8_t id) { return (hid_set_idle(dev, duration, id)); } static int hidbus_set_protocol(device_t dev, device_t child __unused, uint16_t protocol) { return (hid_set_protocol(dev, protocol)); } static int hidbus_ioctl(device_t dev, device_t child __unused, unsigned long cmd, uintptr_t data) { return (hid_ioctl(dev, cmd, data)); } /*------------------------------------------------------------------------* * hidbus_lookup_id * * This functions takes an array of "struct hid_device_id" and tries * to match the entries with the information in "struct hid_device_info". * * Return values: * NULL: No match found. * Else: Pointer to matching entry. *------------------------------------------------------------------------*/ const struct hid_device_id * hidbus_lookup_id(device_t dev, const struct hid_device_id *id, int nitems_id) { const struct hid_device_id *id_end; const struct hid_device_info *info; int32_t usage; bool is_child; if (id == NULL) { goto done; } id_end = id + nitems_id; info = hid_get_device_info(dev); is_child = device_get_devclass(dev) != devclass_find("hidbus"); if (is_child) usage = hidbus_get_usage(dev); /* * Keep on matching array entries until we find a match or * until we reach the end of the matching array: */ for (; id != id_end; id++) { if (is_child && (id->match_flag_page) && (id->page != HID_GET_USAGE_PAGE(usage))) { continue; } if (is_child && (id->match_flag_usage) && (id->usage != HID_GET_USAGE(usage))) { continue; } if ((id->match_flag_bus) && (id->idBus != info->idBus)) { continue; } if ((id->match_flag_vendor) && (id->idVendor != info->idVendor)) { continue; } if ((id->match_flag_product) && (id->idProduct != info->idProduct)) { continue; } if ((id->match_flag_ver_lo) && (id->idVersion_lo > info->idVersion)) { continue; } if ((id->match_flag_ver_hi) && (id->idVersion_hi < info->idVersion)) { continue; } if (id->match_flag_pnp && strncmp(id->idPnP, info->idPnP, HID_PNP_ID_SIZE) != 0) { continue; } /* We found a match! */ return (id); } done: return (NULL); } /*------------------------------------------------------------------------* * hidbus_lookup_driver_info - factored out code * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ int hidbus_lookup_driver_info(device_t child, const struct hid_device_id *id, int nitems_id) { id = hidbus_lookup_id(child, id, nitems_id); if (id) { /* copy driver info */ hidbus_set_driver_info(child, id->driver_info); return (0); } return (ENXIO); } const struct hid_device_info * hid_get_device_info(device_t dev) { device_t bus; bus = device_get_devclass(dev) == devclass_find("hidbus") ? dev : device_get_parent(dev); return (device_get_ivars(bus)); } static device_method_t hidbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, hidbus_probe), DEVMETHOD(device_attach, hidbus_attach), DEVMETHOD(device_detach, hidbus_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus interface */ DEVMETHOD(bus_add_child, hidbus_add_child), DEVMETHOD(bus_child_detached, hidbus_child_detached), DEVMETHOD(bus_child_deleted, hidbus_child_deleted), DEVMETHOD(bus_read_ivar, hidbus_read_ivar), DEVMETHOD(bus_write_ivar, hidbus_write_ivar), DEVMETHOD(bus_child_pnpinfo, hidbus_child_pnpinfo), DEVMETHOD(bus_child_location, hidbus_child_location), /* hid interface */ DEVMETHOD(hid_intr_start, hidbus_intr_start), DEVMETHOD(hid_intr_stop, hidbus_intr_stop), DEVMETHOD(hid_intr_poll, hidbus_intr_poll), DEVMETHOD(hid_get_rdesc, hidbus_get_rdesc), DEVMETHOD(hid_read, hidbus_read), DEVMETHOD(hid_write, hidbus_write), DEVMETHOD(hid_get_report, hidbus_get_report), DEVMETHOD(hid_set_report, hidbus_set_report), DEVMETHOD(hid_set_idle, hidbus_set_idle), DEVMETHOD(hid_set_protocol, hidbus_set_protocol), DEVMETHOD(hid_ioctl, hidbus_ioctl), DEVMETHOD_END }; driver_t hidbus_driver = { "hidbus", hidbus_methods, sizeof(struct hidbus_softc), }; MODULE_DEPEND(hidbus, hid, 1, 1, 1); MODULE_VERSION(hidbus, 1); DRIVER_MODULE(hidbus, atopcase, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, hvhid, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, iichid, hidbus_driver, 0, 0); DRIVER_MODULE(hidbus, usbhid, hidbus_driver, 0, 0); diff --git a/sys/dev/hyperv/vmbus/vmbus.c b/sys/dev/hyperv/vmbus/vmbus.c index 5d0b23a09213..672f94f5fd85 100644 --- a/sys/dev/hyperv/vmbus/vmbus.c +++ b/sys/dev/hyperv/vmbus/vmbus.c @@ -1,1703 +1,1703 @@ /*- * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VM Bus Driver Implementation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__aarch64__) #include #include #include #else #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_if.h" #include "pcib_if.h" #include "vmbus_if.h" #define VMBUS_GPADL_START 0xe1e10 struct vmbus_msghc { struct vmbus_xact *mh_xact; struct hypercall_postmsg_in mh_inprm_save; }; static void vmbus_identify(driver_t *, device_t); static int vmbus_probe(device_t); static int vmbus_attach(device_t); static int vmbus_detach(device_t); static int vmbus_read_ivar(device_t, device_t, int, uintptr_t *); static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *); static struct resource *vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs); static int vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs); static int vmbus_alloc_msix(device_t bus, device_t dev, int *irq); static int vmbus_release_msix(device_t bus, device_t dev, int irq); static int vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data); static uint32_t vmbus_get_version_method(device_t, device_t); static int vmbus_probe_guid_method(device_t, device_t, const struct hyperv_guid *); static uint32_t vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu); static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, int); #if defined(EARLY_AP_STARTUP) static void vmbus_intrhook(void *); #endif static int vmbus_init(struct vmbus_softc *); static int vmbus_connect(struct vmbus_softc *, uint32_t); static int vmbus_req_channels(struct vmbus_softc *sc); static void vmbus_disconnect(struct vmbus_softc *); static int vmbus_scan(struct vmbus_softc *); static void vmbus_scan_teardown(struct vmbus_softc *); static void vmbus_scan_done(struct vmbus_softc *, const struct vmbus_message *); static void vmbus_chanmsg_handle(struct vmbus_softc *, const struct vmbus_message *); static void vmbus_msg_task(void *, int); static void vmbus_synic_setup(void *); static void vmbus_synic_teardown(void *); static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); static int vmbus_dma_alloc(struct vmbus_softc *); static void vmbus_dma_free(struct vmbus_softc *); static int vmbus_intr_setup(struct vmbus_softc *); static void vmbus_intr_teardown(struct vmbus_softc *); static int vmbus_doattach(struct vmbus_softc *); static void vmbus_event_proc_dummy(struct vmbus_softc *, int); static bus_dma_tag_t vmbus_get_dma_tag(device_t parent, device_t child); static struct vmbus_softc *vmbus_sc; #if defined(__x86_64__) static int vmbus_alloc_cpu_mem(struct vmbus_softc *sc); static void vmbus_free_cpu_mem(struct vmbus_softc *sc); #endif SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Hyper-V vmbus"); static int vmbus_pin_evttask = 1; SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); #if defined(__x86_64__) static int hv_tlb_hcall = 1; SYSCTL_INT(_hw_vmbus, OID_AUTO, tlb_hcall , CTLFLAG_RDTUN, &hv_tlb_hcall, 0, "Use Hyper_V hyercall for tlb flush"); #endif uint32_t vmbus_current_version; static const uint32_t vmbus_version[] = { VMBUS_VERSION_WIN10, VMBUS_VERSION_WIN8_1, VMBUS_VERSION_WIN8, VMBUS_VERSION_WIN7, VMBUS_VERSION_WS2008 }; static const vmbus_chanmsg_proc_t vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) }; static device_method_t vmbus_methods[] = { /* Device interface */ DEVMETHOD(device_identify, vmbus_identify), DEVMETHOD(device_probe, vmbus_probe), DEVMETHOD(device_attach, vmbus_attach), DEVMETHOD(device_detach, vmbus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, vmbus_read_ivar), DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo), DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), DEVMETHOD(bus_get_dma_tag, vmbus_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_release_msix), DEVMETHOD(pcib_map_msi, vmbus_map_msi), /* Vmbus interface */ DEVMETHOD(vmbus_get_version, vmbus_get_version_method), DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), DEVMETHOD_END }; static driver_t vmbus_driver = { "vmbus", vmbus_methods, sizeof(struct vmbus_softc) }; uint32_t hv_max_vp_index; DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL); DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL); MODULE_DEPEND(vmbus, acpi, 1, 1, 1); MODULE_DEPEND(vmbus, pci, 1, 1, 1); MODULE_VERSION(vmbus, 1); static __inline struct vmbus_softc * vmbus_get_softc(void) { return vmbus_sc; } static bus_dma_tag_t vmbus_get_dma_tag(device_t dev, device_t child) { struct vmbus_softc *sc = vmbus_get_softc(); return (sc->dmat); } void vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) { struct hypercall_postmsg_in *inprm; if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) panic("invalid data size %zu", dsize); inprm = vmbus_xact_req_data(mh->mh_xact); memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); inprm->hc_connid = VMBUS_CONNID_MESSAGE; inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; inprm->hc_dsize = dsize; } struct vmbus_msghc * vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) { struct vmbus_msghc *mh; struct vmbus_xact *xact; if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) panic("invalid data size %zu", dsize); xact = vmbus_xact_get(sc->vmbus_xc, dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); if (xact == NULL) return (NULL); mh = vmbus_xact_priv(xact, sizeof(*mh)); mh->mh_xact = xact; vmbus_msghc_reset(mh, dsize); return (mh); } void vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { vmbus_xact_put(mh->mh_xact); } void * vmbus_msghc_dataptr(struct vmbus_msghc *mh) { struct hypercall_postmsg_in *inprm; inprm = vmbus_xact_req_data(mh->mh_xact); return (inprm->hc_data); } int vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) { sbintime_t time = SBT_1MS; struct hypercall_postmsg_in *inprm; bus_addr_t inprm_paddr; int i; inprm = vmbus_xact_req_data(mh->mh_xact); inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); /* * Save the input parameter so that we could restore the input * parameter if the Hypercall failed. * * XXX * Is this really necessary?! i.e. Will the Hypercall ever * overwrite the input parameter? */ memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); /* * In order to cope with transient failures, e.g. insufficient * resources on host side, we retry the post message Hypercall * several times. 20 retries seem sufficient. */ #define HC_RETRY_MAX 20 for (i = 0; i < HC_RETRY_MAX; ++i) { uint64_t status; status = hypercall_post_message(inprm_paddr); if (status == HYPERCALL_STATUS_SUCCESS) return 0; pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); if (time < SBT_1S * 2) time *= 2; /* Restore input parameter and try again */ memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); } #undef HC_RETRY_MAX return EIO; } int vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { int error; vmbus_xact_activate(mh->mh_xact); error = vmbus_msghc_exec_noresult(mh); if (error) vmbus_xact_deactivate(mh->mh_xact); return error; } void vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { vmbus_xact_deactivate(mh->mh_xact); } const struct vmbus_message * vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { size_t resp_len; return (vmbus_xact_wait(mh->mh_xact, &resp_len)); } const struct vmbus_message * vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { size_t resp_len; return (vmbus_xact_poll(mh->mh_xact, &resp_len)); } void vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) { vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); } uint32_t vmbus_gpadl_alloc(struct vmbus_softc *sc) { uint32_t gpadl; again: gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); if (gpadl == 0) goto again; return (gpadl); } /* Used for Hyper-V socket when guest client connects to host */ int vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id, struct hyperv_guid *host_srv_id) { struct vmbus_softc *sc = vmbus_get_softc(); struct vmbus_chanmsg_tl_connect *req; struct vmbus_msghc *mh; int error; if (!sc) return ENXIO; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for tl connect\n"); return ENXIO; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN; req->guest_endpoint_id = *guest_srv_id; req->host_service_id = *host_srv_id; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "tl connect msg hypercall failed\n"); } return error; } static int vmbus_connect(struct vmbus_softc *sc, uint32_t version) { struct vmbus_chanmsg_connect *req; const struct vmbus_message *msg; struct vmbus_msghc *mh; int error, done = 0; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) return ENXIO; req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; req->chm_ver = version; req->chm_evtflags = pmap_kextract((vm_offset_t)sc->vmbus_evtflags); req->chm_mnf1 = pmap_kextract((vm_offset_t)sc->vmbus_mnf1); req->chm_mnf2 = pmap_kextract((vm_offset_t)sc->vmbus_mnf2); error = vmbus_msghc_exec(sc, mh); if (error) { vmbus_msghc_put(sc, mh); return error; } msg = vmbus_msghc_wait_result(sc, mh); done = ((const struct vmbus_chanmsg_connect_resp *) msg->msg_data)->chm_done; vmbus_msghc_put(sc, mh); return (done ? 0 : EOPNOTSUPP); } static int vmbus_init(struct vmbus_softc *sc) { int i; for (i = 0; i < nitems(vmbus_version); ++i) { int error; error = vmbus_connect(sc, vmbus_version[i]); if (!error) { vmbus_current_version = vmbus_version[i]; sc->vmbus_version = vmbus_version[i]; device_printf(sc->vmbus_dev, "version %u.%u\n", VMBUS_VERSION_MAJOR(sc->vmbus_version), VMBUS_VERSION_MINOR(sc->vmbus_version)); return 0; } } return ENXIO; } static void vmbus_disconnect(struct vmbus_softc *sc) { struct vmbus_chanmsg_disconnect *req; struct vmbus_msghc *mh; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for disconnect\n"); return; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "disconnect msg hypercall failed\n"); } } static int vmbus_req_channels(struct vmbus_softc *sc) { struct vmbus_chanmsg_chrequest *req; struct vmbus_msghc *mh; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) return ENXIO; req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); return error; } static void vmbus_scan_done_task(void *xsc, int pending __unused) { struct vmbus_softc *sc = xsc; bus_topo_lock(); sc->vmbus_scandone = true; bus_topo_unlock(); wakeup(&sc->vmbus_scandone); } static void vmbus_scan_done(struct vmbus_softc *sc, const struct vmbus_message *msg __unused) { taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); } static int vmbus_scan(struct vmbus_softc *sc) { int error; /* * Identify, probe and attach for non-channel devices. */ - bus_generic_probe(sc->vmbus_dev); + bus_identify_children(sc->vmbus_dev); bus_generic_attach(sc->vmbus_dev); /* * This taskqueue serializes vmbus devices' attach and detach * for channel offer and rescind messages. */ sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, taskqueue_thread_enqueue, &sc->vmbus_devtq); taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); /* * This taskqueue handles sub-channel detach, so that vmbus * device's detach running in vmbus_devtq can drain its sub- * channels. */ sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, taskqueue_thread_enqueue, &sc->vmbus_subchtq); taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); /* * Start vmbus scanning. */ error = vmbus_req_channels(sc); if (error) { device_printf(sc->vmbus_dev, "channel request failed: %d\n", error); return (error); } /* * Wait for all vmbus devices from the initial channel offers to be * attached. */ bus_topo_assert(); while (!sc->vmbus_scandone) mtx_sleep(&sc->vmbus_scandone, bus_topo_mtx(), 0, "vmbusdev", 0); if (bootverbose) { device_printf(sc->vmbus_dev, "device scan, probe and attach " "done\n"); } return (0); } static void vmbus_scan_teardown(struct vmbus_softc *sc) { bus_topo_assert(); if (sc->vmbus_devtq != NULL) { bus_topo_unlock(); taskqueue_free(sc->vmbus_devtq); bus_topo_lock(); sc->vmbus_devtq = NULL; } if (sc->vmbus_subchtq != NULL) { bus_topo_unlock(); taskqueue_free(sc->vmbus_subchtq); bus_topo_lock(); sc->vmbus_subchtq = NULL; } } static void vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) { vmbus_chanmsg_proc_t msg_proc; uint32_t msg_type; msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", msg_type); return; } msg_proc = vmbus_chanmsg_handlers[msg_type]; if (msg_proc != NULL) msg_proc(sc, msg); /* Channel specific processing */ vmbus_chan_msgproc(sc, msg); } static void vmbus_msg_task(void *xsc, int pending __unused) { struct vmbus_softc *sc = xsc; volatile struct vmbus_message *msg; msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; for (;;) { if (msg->msg_type == HYPERV_MSGTYPE_NONE) { /* No message */ break; } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { /* Channel message */ vmbus_chanmsg_handle(sc, __DEVOLATILE(const struct vmbus_message *, msg)); } msg->msg_type = HYPERV_MSGTYPE_NONE; /* * Make sure the write to msg_type (i.e. set to * HYPERV_MSGTYPE_NONE) happens before we read the * msg_flags and EOMing. Otherwise, the EOMing will * not deliver any more messages since there is no * empty slot * * NOTE: * mb() is used here, since atomic_thread_fence_seq_cst() * will become compiler fence on UP kernel. */ mb(); if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ WRMSR(MSR_HV_EOM, 0); } } } static __inline int vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) { volatile struct vmbus_message *msg; struct vmbus_message *msg_base; msg_base = VMBUS_PCPU_GET(sc, message, cpu); /* * Check event timer. * * TODO: move this to independent IDT vector. */ vmbus_handle_timer_intr1(msg_base, frame); /* * Check events. Hot path for network and storage I/O data; high rate. * * NOTE: * As recommended by the Windows guest fellows, we check events before * checking messages. */ sc->vmbus_event_proc(sc, cpu); /* * Check messages. Mainly management stuffs; ultra low rate. */ msg = msg_base + VMBUS_SINT_MESSAGE; if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), VMBUS_PCPU_PTR(sc, message_task, cpu)); } return (FILTER_HANDLED); } void vmbus_handle_intr(struct trapframe *trap_frame) { struct vmbus_softc *sc = vmbus_get_softc(); int cpu = curcpu; /* * Disable preemption. */ critical_enter(); /* * Do a little interrupt counting. This used x86 specific * intrcnt_add function */ #if !defined(__aarch64__) (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; #endif /* not for aarch64 */ vmbus_handle_intr1(sc, trap_frame, cpu); /* * Enable preemption. */ critical_exit(); } static void vmbus_synic_setup(void *xsc) { struct vmbus_softc *sc = xsc; int cpu = curcpu; uint64_t val, orig; uint32_t sint; if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { /* Save virtual processor id. */ VMBUS_PCPU_GET(sc, vcpuid, cpu) = RDMSR(MSR_HV_VP_INDEX); } else { /* Set virtual processor id to 0 for compatibility. */ VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; } if (VMBUS_PCPU_GET(sc, vcpuid, cpu) > hv_max_vp_index) hv_max_vp_index = VMBUS_PCPU_GET(sc, vcpuid, cpu); /* * Setup the SynIC message. */ orig = RDMSR(MSR_HV_SIMP); val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, message, cpu)) & MSR_HV_SIMP_PGMASK; val |= MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK); WRMSR(MSR_HV_SIMP, val); /* * Setup the SynIC event flags. */ orig = RDMSR(MSR_HV_SIEFP); val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, event_flags, cpu)) & MSR_HV_SIMP_PGMASK; val |= MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK); WRMSR(MSR_HV_SIEFP, val); /* * Configure and unmask SINT for message and event flags. */ sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; orig = RDMSR(sint); val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | (orig & MSR_HV_SINT_RSVD_MASK); WRMSR(sint, val); /* * Configure and unmask SINT for timer. */ vmbus_synic_setup1(sc); /* * All done; enable SynIC. */ orig = RDMSR(MSR_HV_SCONTROL); val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); WRMSR(MSR_HV_SCONTROL, val); } #if defined(__x86_64__) void hyperv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2, smp_invl_local_cb_t curcpu_cb, enum invl_op_codes op) { struct vmbus_softc *sc = vmbus_get_softc(); return hv_vm_tlb_flush(pmap, addr1, addr2, op, sc, curcpu_cb); } #endif /*__x86_64__*/ static void vmbus_synic_teardown(void *arg) { uint64_t orig; uint32_t sint; /* * Disable SynIC. */ orig = RDMSR(MSR_HV_SCONTROL); WRMSR(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); /* * Mask message and event flags SINT. */ sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; orig = RDMSR(sint); WRMSR(sint, orig | MSR_HV_SINT_MASKED); /* * Mask timer SINT. */ vmbus_synic_teardown1(); /* * Teardown SynIC message. */ orig = RDMSR(MSR_HV_SIMP); WRMSR(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); /* * Teardown SynIC event flags. */ orig = RDMSR(MSR_HV_SIEFP); WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); } static int vmbus_dma_alloc(struct vmbus_softc *sc) { uint8_t *evtflags; int cpu; CPU_FOREACH(cpu) { void *ptr; /* * Per-cpu messages and event flags. */ ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (ptr == NULL) return ENOMEM; VMBUS_PCPU_GET(sc, message, cpu) = ptr; ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (ptr == NULL) return ENOMEM; VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; } evtflags = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (evtflags == NULL) return ENOMEM; sc->vmbus_rx_evtflags = (u_long *)evtflags; sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); sc->vmbus_evtflags = evtflags; sc->vmbus_mnf1 = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (sc->vmbus_mnf1 == NULL) return ENOMEM; sc->vmbus_mnf2 = contigmalloc(sizeof(struct vmbus_mnf), M_DEVBUF, M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (sc->vmbus_mnf2 == NULL) return ENOMEM; return 0; } static void vmbus_dma_free(struct vmbus_softc *sc) { int cpu; if (sc->vmbus_evtflags != NULL) { free(sc->vmbus_evtflags, M_DEVBUF); sc->vmbus_evtflags = NULL; sc->vmbus_rx_evtflags = NULL; sc->vmbus_tx_evtflags = NULL; } if (sc->vmbus_mnf1 != NULL) { free(sc->vmbus_mnf1, M_DEVBUF); sc->vmbus_mnf1 = NULL; } if (sc->vmbus_mnf2 != NULL) { free(sc->vmbus_mnf2, M_DEVBUF); sc->vmbus_mnf2 = NULL; } CPU_FOREACH(cpu) { if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { free(VMBUS_PCPU_GET(sc, message, cpu), M_DEVBUF); VMBUS_PCPU_GET(sc, message, cpu) = NULL; } if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { free(VMBUS_PCPU_GET(sc, event_flags, cpu), M_DEVBUF); VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; } } } static int vmbus_intr_setup(struct vmbus_softc *sc) { int cpu; CPU_FOREACH(cpu) { char buf[MAXCOMLEN + 1]; cpuset_t cpu_mask; /* Allocate an interrupt counter for Hyper-V interrupt */ snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); #if !defined(__aarch64__) intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); #endif /* not for aarch64 */ /* * Setup taskqueue to handle events. Task will be per- * channel. */ VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( "hyperv event", M_WAITOK, taskqueue_thread_enqueue, VMBUS_PCPU_PTR(sc, event_tq, cpu)); if (vmbus_pin_evttask) { CPU_SETOF(cpu, &cpu_mask); taskqueue_start_threads_cpuset( VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask, "hvevent%d", cpu); } else { taskqueue_start_threads( VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, "hvevent%d", cpu); } /* * Setup tasks and taskqueues to handle messages. */ VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, VMBUS_PCPU_PTR(sc, message_tq, cpu)); CPU_SETOF(cpu, &cpu_mask); taskqueue_start_threads_cpuset( VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, "hvmsg%d", cpu); TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, vmbus_msg_task, sc); } return (vmbus_setup_intr1(sc)); } static void vmbus_intr_teardown(struct vmbus_softc *sc) { vmbus_intr_teardown1(sc); } static int vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } static int vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { const struct vmbus_channel *chan; char guidbuf[HYPERV_GUID_STRLEN]; chan = vmbus_get_channel(child); if (chan == NULL) { /* Event timer device, which does not belong to a channel */ return (0); } hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); sbuf_printf(sb, "classid=%s", guidbuf); hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); sbuf_printf(sb, " deviceid=%s", guidbuf); return (0); } int vmbus_add_child(struct vmbus_channel *chan) { struct vmbus_softc *sc = chan->ch_vmbus; device_t parent = sc->vmbus_dev; bus_topo_lock(); chan->ch_dev = device_add_child(parent, NULL, DEVICE_UNIT_ANY); if (chan->ch_dev == NULL) { bus_topo_unlock(); device_printf(parent, "device_add_child for chan%u failed\n", chan->ch_id); return (ENXIO); } device_set_ivars(chan->ch_dev, chan); device_probe_and_attach(chan->ch_dev); bus_topo_unlock(); return (0); } int vmbus_delete_child(struct vmbus_channel *chan) { int error = 0; bus_topo_lock(); if (chan->ch_dev != NULL) { error = device_delete_child(chan->ch_vmbus->vmbus_dev, chan->ch_dev); chan->ch_dev = NULL; } bus_topo_unlock(); return (error); } static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) { struct vmbus_softc *sc = arg1; char verstr[16]; snprintf(verstr, sizeof(verstr), "%u.%u", VMBUS_VERSION_MAJOR(sc->vmbus_version), VMBUS_VERSION_MINOR(sc->vmbus_version)); return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); } /* * We need the function to make sure the MMIO resource is allocated from the * ranges found in _CRS. * * For the release function, we can use bus_generic_release_resource(). */ static struct resource * vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { device_t parent = device_get_parent(dev); struct resource *res; if (type == SYS_RES_MEMORY) { struct vmbus_softc *sc = device_get_softc(dev); res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, rid, start, end, count, flags); } else { res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, end, count, flags); } return (res); } static int vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) { return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) { return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } static int vmbus_alloc_msix(device_t bus, device_t dev, int *irq) { return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int vmbus_release_msix(device_t bus, device_t dev, int irq) { return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } static int vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); } static uint32_t vmbus_get_version_method(device_t bus, device_t dev) { struct vmbus_softc *sc = device_get_softc(bus); return sc->vmbus_version; } static int vmbus_probe_guid_method(device_t bus, device_t dev, const struct hyperv_guid *guid) { const struct vmbus_channel *chan = vmbus_get_channel(dev); if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) return 0; return ENXIO; } static uint32_t vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) { const struct vmbus_softc *sc = device_get_softc(bus); return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); } static struct taskqueue * vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) { const struct vmbus_softc *sc = device_get_softc(bus); KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); return (VMBUS_PCPU_GET(sc, event_tq, cpu)); } #define VTPM_BASE_ADDR 0xfed40000 #define FOUR_GB (1ULL << 32) enum parse_pass { parse_64, parse_32 }; struct parse_context { device_t vmbus_dev; enum parse_pass pass; }; static ACPI_STATUS parse_crs(ACPI_RESOURCE *res, void *ctx) { const struct parse_context *pc = ctx; device_t vmbus_dev = pc->vmbus_dev; struct vmbus_softc *sc = device_get_softc(vmbus_dev); UINT64 start, end; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS32: start = res->Data.Address32.Address.Minimum; end = res->Data.Address32.Address.Maximum; break; case ACPI_RESOURCE_TYPE_ADDRESS64: start = res->Data.Address64.Address.Minimum; end = res->Data.Address64.Address.Maximum; break; default: /* Unused types. */ return (AE_OK); } /* * We don't use <1MB addresses. */ if (end < 0x100000) return (AE_OK); /* Don't conflict with vTPM. */ if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) end = VTPM_BASE_ADDR - 1; if ((pc->pass == parse_32 && start < FOUR_GB) || (pc->pass == parse_64 && start >= FOUR_GB)) pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, start, end, 0); return (AE_OK); } static void vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) { struct parse_context pc; ACPI_STATUS status; if (bootverbose) device_printf(dev, "walking _CRS, pass=%d\n", pass); pc.vmbus_dev = vmbus_dev; pc.pass = pass; status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", parse_crs, &pc); if (bootverbose && ACPI_FAILURE(status)) device_printf(dev, "_CRS: not found, pass=%d\n", pass); } static void vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) { device_t acpi0, parent; parent = device_get_parent(dev); acpi0 = device_get_parent(parent); if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { device_t *children; int count; /* * Try to locate VMBUS resources and find _CRS on them. */ if (device_get_children(acpi0, &children, &count) == 0) { int i; for (i = 0; i < count; ++i) { if (!device_is_attached(children[i])) continue; if (strcmp("vmbus_res", device_get_name(children[i])) == 0) vmbus_get_crs(children[i], dev, pass); } free(children, M_TEMP); } /* * Try to find _CRS on acpi. */ vmbus_get_crs(acpi0, dev, pass); } else { device_printf(dev, "not grandchild of acpi\n"); } /* * Try to find _CRS on parent. */ vmbus_get_crs(parent, dev, pass); } static void vmbus_get_mmio_res(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); /* * We walk the resources twice to make sure that: in the resource * list, the 32-bit resources appear behind the 64-bit resources. * NB: resource_list_add() uses INSERT_TAIL. This way, when we * iterate through the list to find a range for a 64-bit BAR in * vmbus_alloc_resource(), we can make sure we try to use >4GB * ranges first. */ pcib_host_res_init(dev, &sc->vmbus_mmio_res); vmbus_get_mmio_res_pass(dev, parse_64); vmbus_get_mmio_res_pass(dev, parse_32); } /* * On Gen2 VMs, Hyper-V provides mmio space for framebuffer. * This mmio address range is not useable for other PCI devices. * Currently only efifb and vbefb drivers are using this range without * reserving it from system. * Therefore, vmbus driver reserves it before any other PCI device * drivers start to request mmio addresses. */ static struct resource *hv_fb_res; static void vmbus_fb_mmio_res(device_t dev) { struct efi_fb *efifb; #if !defined(__aarch64__) struct vbe_fb *vbefb; #endif /* aarch64 */ rman_res_t fb_start, fb_end, fb_count; int fb_height, fb_width; caddr_t kmdp; struct vmbus_softc *sc = device_get_softc(dev); int rid = 0; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); #if !defined(__aarch64__) vbefb = (struct vbe_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_VBE_FB); #endif /* aarch64 */ if (efifb != NULL) { fb_start = efifb->fb_addr; fb_end = efifb->fb_addr + efifb->fb_size; fb_count = efifb->fb_size; fb_height = efifb->fb_height; fb_width = efifb->fb_width; } #if !defined(__aarch64__) else if (vbefb != NULL) { fb_start = vbefb->fb_addr; fb_end = vbefb->fb_addr + vbefb->fb_size; fb_count = vbefb->fb_size; fb_height = vbefb->fb_height; fb_width = vbefb->fb_width; } #endif /* aarch64 */ else { if (bootverbose) device_printf(dev, "no preloaded kernel fb information\n"); /* We are on Gen1 VM, just return. */ return; } if (bootverbose) device_printf(dev, "fb: fb_addr: %#jx, size: %#jx, " "actual size needed: 0x%x\n", fb_start, fb_count, fb_height * fb_width); hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev, SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (hv_fb_res && bootverbose) device_printf(dev, "successfully reserved memory for framebuffer " "starting at %#jx, size %#jx\n", fb_start, fb_count); } static void vmbus_free_mmio_res(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); pcib_host_res_free(dev, &sc->vmbus_mmio_res); if (hv_fb_res) hv_fb_res = NULL; } static void vmbus_identify(driver_t *driver, device_t parent) { if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) return; device_add_child(parent, "vmbus", DEVICE_UNIT_ANY); } static int vmbus_probe(device_t dev) { if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) return (ENXIO); device_set_desc(dev, "Hyper-V Vmbus"); return (BUS_PROBE_DEFAULT); } #if defined(__x86_64__) static int vmbus_alloc_cpu_mem(struct vmbus_softc *sc) { int cpu; CPU_FOREACH(cpu) { void **hv_cpu_mem; hv_cpu_mem = VMBUS_PCPU_PTR(sc, cpu_mem, cpu); *hv_cpu_mem = contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); if (*hv_cpu_mem == NULL) return ENOMEM; } return 0; } static void vmbus_free_cpu_mem(struct vmbus_softc *sc) { int cpu; CPU_FOREACH(cpu) { void **hv_cpu_mem; hv_cpu_mem = VMBUS_PCPU_PTR(sc, cpu_mem, cpu); if(*hv_cpu_mem != NULL) { free(*hv_cpu_mem, M_DEVBUF); *hv_cpu_mem = NULL; } } } #endif /** * @brief Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - setup various driver entry points * - invoke the vmbus hv main init routine * - get the irq resource * - invoke the vmbus to add the vmbus root device * - setup the vmbus root device * - retrieve the channel offers */ static int vmbus_doattach(struct vmbus_softc *sc) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; int ret; device_t dev_res; ACPI_HANDLE handle; unsigned int coherent = 0; if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) return (0); vmbus_get_mmio_res(sc->vmbus_dev); vmbus_fb_mmio_res(sc->vmbus_dev); sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; sc->vmbus_gpadl = VMBUS_GPADL_START; mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); TAILQ_INIT(&sc->vmbus_prichans); mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); TAILQ_INIT(&sc->vmbus_chans); sc->vmbus_chmap = malloc( sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, M_WAITOK | M_ZERO); /* Coherency attribute */ dev_res = devclass_get_device(devclass_find("vmbus_res"), 0); if (dev_res != NULL) { handle = acpi_get_handle(dev_res); if (ACPI_FAILURE(acpi_GetInteger(handle, "_CCA", &coherent))) coherent = 0; } if (bootverbose) device_printf(sc->vmbus_dev, "Bus is%s cache-coherent\n", coherent ? "" : " not"); bus_dma_tag_create(bus_get_dma_tag(sc->vmbus_dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, &sc->dmat); /* * Create context for "post message" Hypercalls */ sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, sizeof(struct vmbus_msghc)); if (sc->vmbus_xc == NULL) { ret = ENXIO; goto cleanup; } /* * Allocate DMA stuffs. */ ret = vmbus_dma_alloc(sc); if (ret != 0) goto cleanup; /* * Setup interrupt. */ ret = vmbus_intr_setup(sc); if (ret != 0) goto cleanup; #if defined(__x86_64__) /* * Alloc per cpu memory for tlb flush hypercall */ if (hv_tlb_hcall) { ret = vmbus_alloc_cpu_mem(sc); if (ret != 0) { hv_tlb_hcall = 0; if (bootverbose) device_printf(sc->vmbus_dev, "cannot alloc contig memory for " "cpu_mem, use system provided " "tlb flush call.\n"); vmbus_free_cpu_mem(sc); } } #endif /* * Setup SynIC. */ if (bootverbose) device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); sc->vmbus_flags |= VMBUS_FLAG_SYNIC; #if defined(__x86_64__) if (hv_tlb_hcall) smp_targeted_tlb_shootdown = &hyperv_vm_tlb_flush; #endif /* * Initialize vmbus, e.g. connect to Hypervisor. */ ret = vmbus_init(sc); if (ret != 0) goto cleanup; if (sc->vmbus_version == VMBUS_VERSION_WS2008 || sc->vmbus_version == VMBUS_VERSION_WIN7) sc->vmbus_event_proc = vmbus_event_proc_compat; else sc->vmbus_event_proc = vmbus_event_proc; ret = vmbus_scan(sc); if (ret != 0) goto cleanup; ctx = device_get_sysctl_ctx(sc->vmbus_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, vmbus_sysctl_version, "A", "vmbus version"); return (ret); cleanup: vmbus_scan_teardown(sc); vmbus_intr_teardown(sc); vmbus_dma_free(sc); if (sc->vmbus_xc != NULL) { vmbus_xact_ctx_destroy(sc->vmbus_xc); sc->vmbus_xc = NULL; } free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); mtx_destroy(&sc->vmbus_prichan_lock); mtx_destroy(&sc->vmbus_chan_lock); return (ret); } static void vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) { } #if defined(EARLY_AP_STARTUP) static void vmbus_intrhook(void *xsc) { struct vmbus_softc *sc = xsc; if (bootverbose) device_printf(sc->vmbus_dev, "intrhook\n"); vmbus_doattach(sc); config_intrhook_disestablish(&sc->vmbus_intrhook); } #endif /* EARLY_AP_STARTUP */ static int vmbus_attach(device_t dev) { vmbus_sc = device_get_softc(dev); vmbus_sc->vmbus_dev = dev; vmbus_sc->vmbus_idtvec = -1; /* * Event processing logic will be configured: * - After the vmbus protocol version negotiation. * - Before we request channel offers. */ vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; #if defined(EARLY_AP_STARTUP) /* * Defer the real attach until the pause(9) works as expected. */ vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; config_intrhook_establish(&vmbus_sc->vmbus_intrhook); #endif /* EARLY_AP_STARTUP and aarch64 */ return (0); } static int vmbus_detach(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); bus_generic_detach(dev); vmbus_chan_destroy_all(sc); vmbus_scan_teardown(sc); vmbus_disconnect(sc); if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); } #if defined(__x86_64__) /* * Restore the tlb flush to native call */ if (hv_tlb_hcall) { smp_targeted_tlb_shootdown = &smp_targeted_tlb_shootdown_native; vmbus_free_cpu_mem(sc); } #endif vmbus_intr_teardown(sc); vmbus_dma_free(sc); if (sc->vmbus_xc != NULL) { vmbus_xact_ctx_destroy(sc->vmbus_xc); sc->vmbus_xc = NULL; } free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); mtx_destroy(&sc->vmbus_prichan_lock); mtx_destroy(&sc->vmbus_chan_lock); vmbus_free_mmio_res(dev); #if defined(__aarch64__) bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector, sc->ires); #endif return (0); } #if !defined(EARLY_AP_STARTUP) static void vmbus_sysinit(void *arg __unused) { struct vmbus_softc *sc = vmbus_get_softc(); if (vm_guest != VM_GUEST_HV || sc == NULL) return; vmbus_doattach(sc); } /* * NOTE: * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is * initialized. */ SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); #endif /* !EARLY_AP_STARTUP */ diff --git a/sys/dev/iicbus/iicbus.c b/sys/dev/iicbus/iicbus.c index f48338865e0a..bfc86ebe28a9 100644 --- a/sys/dev/iicbus/iicbus.c +++ b/sys/dev/iicbus/iicbus.c @@ -1,398 +1,398 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Autoconfiguration and support routines for the Philips serial I2C bus */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" /* See comments below for why auto-scanning is a bad idea. */ #define SCAN_IICBUS 0 SYSCTL_NODE(_hw, OID_AUTO, i2c, CTLFLAG_RW, 0, "i2c controls"); static int iicbus_probe(device_t dev) { device_set_desc(dev, "Philips I2C bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } #if SCAN_IICBUS static int iic_probe_device(device_t dev, u_char addr) { int count; char byte; if ((addr & 1) == 0) { /* is device writable? */ if (!iicbus_start(dev, (u_char)addr, 0)) { iicbus_stop(dev); return (1); } } else { /* is device readable? */ if (!iicbus_block_read(dev, (u_char)addr, &byte, 1, &count)) return (1); } return (0); } #endif /* * We add all the devices which we know about. * The generic attach routine will attach them if they are alive. */ int iicbus_attach_common(device_t dev, u_int bus_freq) { #if SCAN_IICBUS unsigned char addr; #endif struct iicbus_softc *sc = IICBUS_SOFTC(dev); int strict; sc->dev = dev; mtx_init(&sc->lock, "iicbus", NULL, MTX_DEF); iicbus_init_frequency(dev, bus_freq); iicbus_reset(dev, IIC_FASTEST, 0, NULL); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "strict", &strict) == 0) sc->strict = strict; else sc->strict = 1; /* device probing is meaningless since the bus is supposed to be * hot-plug. Moreover, some I2C chips do not appreciate random * accesses like stop after start to fast, reads for less than * x bytes... */ #if SCAN_IICBUS printf("Probing for devices on iicbus%d:", device_get_unit(dev)); /* probe any devices */ for (addr = 16; addr < 240; addr++) { if (iic_probe_device(dev, (u_char)addr)) { printf(" <%x>", addr); } } printf("\n"); #endif - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int iicbus_attach(device_t dev) { return (iicbus_attach_common(dev, 0)); } int iicbus_detach(device_t dev) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); int err; if ((err = device_delete_children(dev)) != 0) return (err); iicbus_reset(dev, IIC_FASTEST, 0, NULL); mtx_destroy(&sc->lock); return (0); } static int iicbus_print_child(device_t dev, device_t child) { struct iicbus_ivar *devi = IICBUS_IVAR(child); int retval = 0; retval += bus_print_child_header(dev, child); if (devi->addr != 0) retval += printf(" at addr %#x", devi->addr); resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } void iicbus_probe_nomatch(device_t bus, device_t child) { struct iicbus_ivar *devi = IICBUS_IVAR(child); device_printf(bus, " at addr %#x\n", devi->addr); } int iicbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct iicbus_ivar *devi = IICBUS_IVAR(child); sbuf_printf(sb, "addr=%#x", devi->addr); return (0); } int iicbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { return (0); } int iicbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct iicbus_ivar *devi = IICBUS_IVAR(child); switch (which) { default: return (EINVAL); case IICBUS_IVAR_ADDR: *result = devi->addr; break; } return (0); } int iicbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct iicbus_ivar *devi = IICBUS_IVAR(child); switch (which) { default: return (EINVAL); case IICBUS_IVAR_ADDR: if (devi->addr != 0) return (EINVAL); devi->addr = value; } return (0); } device_t iicbus_add_child_common(device_t dev, u_int order, const char *name, int unit, size_t ivars_size) { device_t child; struct iicbus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static device_t iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { return (iicbus_add_child_common( dev, order, name, unit, sizeof(struct iicbus_ivar))); } static void iicbus_child_deleted(device_t dev, device_t child) { struct iicbus_ivar *devi; devi = device_get_ivars(child); if (devi == NULL) return; resource_list_free(&devi->rl); free(devi, M_DEVBUF); } static void iicbus_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; int irq; struct iicbus_ivar *devi; child = BUS_ADD_CHILD(bus, 0, dname, dunit); devi = IICBUS_IVAR(child); resource_int_value(dname, dunit, "addr", &devi->addr); if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static struct resource_list * iicbus_get_resource_list(device_t bus __unused, device_t child) { struct iicbus_ivar *devi; devi = IICBUS_IVAR(child); return (&devi->rl); } int iicbus_generic_intr(device_t dev, int event, char *buf) { return (0); } int iicbus_null_callback(device_t dev, int index, caddr_t data) { return (0); } int iicbus_null_repeated_start(device_t dev, u_char addr) { return (IIC_ENOTSUPP); } void iicbus_init_frequency(device_t dev, u_int bus_freq) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); /* * If a bus frequency value was passed in, use it. Otherwise initialize * it first to the standard i2c 100KHz frequency, then override that * from a hint if one exists. */ if (bus_freq > 0) sc->bus_freq = bus_freq; else { sc->bus_freq = 100000; resource_int_value(device_get_name(dev), device_get_unit(dev), "frequency", (int *)&sc->bus_freq); } /* * Set up the sysctl that allows the bus frequency to be changed. * It is flagged as a tunable so that the user can set the value in * loader(8), and that will override any other setting from any source. * The sysctl tunable/value is the one most directly controlled by the * user and thus the one that always takes precedence. */ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "frequency", CTLFLAG_RWTUN, &sc->bus_freq, sc->bus_freq, "Bus frequency in Hz"); } static u_int iicbus_get_frequency(device_t dev, u_char speed) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); /* * If the frequency has not been configured for the bus, or the request * is specifically for SLOW speed, use the standard 100KHz rate, else * use the configured bus speed. */ if (sc->bus_freq == 0 || speed == IIC_SLOW) return (100000); return (sc->bus_freq); } static device_method_t iicbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, iicbus_probe), DEVMETHOD(device_attach, iicbus_attach), DEVMETHOD(device_detach, iicbus_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource_list, iicbus_get_resource_list), DEVMETHOD(bus_add_child, iicbus_add_child), DEVMETHOD(bus_child_deleted, iicbus_child_deleted), DEVMETHOD(bus_print_child, iicbus_print_child), DEVMETHOD(bus_probe_nomatch, iicbus_probe_nomatch), DEVMETHOD(bus_read_ivar, iicbus_read_ivar), DEVMETHOD(bus_write_ivar, iicbus_write_ivar), DEVMETHOD(bus_child_pnpinfo, iicbus_child_pnpinfo), DEVMETHOD(bus_child_location, iicbus_child_location), DEVMETHOD(bus_hinted_child, iicbus_hinted_child), /* iicbus interface */ DEVMETHOD(iicbus_transfer, iicbus_transfer), DEVMETHOD(iicbus_get_frequency, iicbus_get_frequency), DEVMETHOD_END }; driver_t iicbus_driver = { "iicbus", iicbus_methods, sizeof(struct iicbus_softc), }; MODULE_VERSION(iicbus, IICBUS_MODVER); DRIVER_MODULE(iicbus, iichb, iicbus_driver, 0, 0); diff --git a/sys/dev/iicbus/ofw_iicbus.c b/sys/dev/iicbus/ofw_iicbus.c index 04fc611d37cf..4b0494f8fcc4 100644 --- a/sys/dev/iicbus/ofw_iicbus.c +++ b/sys/dev/iicbus/ofw_iicbus.c @@ -1,261 +1,261 @@ /*- * Copyright (c) 2009, Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include "ofw_iicbus_if.h" /* Methods */ static device_probe_t ofw_iicbus_probe; static device_attach_t ofw_iicbus_attach; static device_t ofw_iicbus_add_child(device_t dev, u_int order, const char *name, int unit); static const struct ofw_bus_devinfo *ofw_iicbus_get_devinfo(device_t bus, device_t dev); static int ofw_iicbus_set_devinfo(device_t bus, device_t dev, phandle_t ofw_node, char *ofw_name, char *ofw_compat, uint32_t i2c_addr); static device_method_t ofw_iicbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_iicbus_probe), DEVMETHOD(device_attach, ofw_iicbus_attach), /* Bus interface */ DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_iicbus_add_child), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_iicbus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* ofw_iicbus interface */ DEVMETHOD(ofw_iicbus_set_devinfo, ofw_iicbus_set_devinfo), DEVMETHOD_END }; struct ofw_iicbus_devinfo { struct iicbus_ivar opd_dinfo; /* Must be the first. */ struct ofw_bus_devinfo opd_obdinfo; }; DEFINE_CLASS_1(iicbus, ofw_iicbus_driver, ofw_iicbus_methods, sizeof(struct iicbus_softc), iicbus_driver); EARLY_DRIVER_MODULE(ofw_iicbus, iicbb, ofw_iicbus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(ofw_iicbus, iichb, ofw_iicbus_driver, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(ofw_iicbus, twsi, ofw_iicbus_driver, 0, 0, BUS_PASS_BUS); MODULE_VERSION(ofw_iicbus, 1); MODULE_DEPEND(ofw_iicbus, iicbus, 1, 1, 1); static int ofw_iicbus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW I2C bus"); return (0); } static int ofw_iicbus_attach(device_t dev) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); struct ofw_iicbus_devinfo *dinfo; phandle_t child, node, root; pcell_t freq, paddr; device_t childdev; ssize_t compatlen; char compat[255]; char *curstr; u_int iic_addr_8bit = 0; sc->dev = dev; mtx_init(&sc->lock, "iicbus", NULL, MTX_DEF); /* * If there is a clock-frequency property for the device node, use it as * the starting value for the bus frequency. Then call the common * routine that handles the tunable/sysctl which allows the FDT value to * be overridden by the user. */ node = ofw_bus_get_node(dev); freq = 0; OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)); iicbus_init_frequency(dev, freq); iicbus_reset(dev, IIC_FASTEST, 0, NULL); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); /* * Check if we're running on a PowerMac, needed for the I2C * address below. */ root = OF_peer(0); compatlen = OF_getprop(root, "compatible", compat, sizeof(compat)); if (compatlen != -1) { for (curstr = compat; curstr < compat + compatlen; curstr += strlen(curstr) + 1) { if (strncmp(curstr, "MacRISC", 7) == 0) iic_addr_8bit = 1; } } /* * Attach those children represented in the device tree. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { /* * Try to get the I2C address first from the i2c-address * property, then try the reg property. It moves around * on different systems. */ if (OF_getencprop(child, "i2c-address", &paddr, sizeof(paddr)) == -1) if (OF_getencprop(child, "reg", &paddr, sizeof(paddr)) == -1) continue; /* * Now set up the I2C and OFW bus layer devinfo and add it * to the bus. */ dinfo = malloc(sizeof(struct ofw_iicbus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) continue; /* * FreeBSD drivers expect I2C addresses to be expressed as * 8-bit values. Apple OFW data contains 8-bit values, but * Linux FDT data contains 7-bit values, so shift them up to * 8-bit format. */ if (iic_addr_8bit) dinfo->opd_dinfo.addr = paddr; else dinfo->opd_dinfo.addr = paddr << 1; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } childdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); resource_list_init(&dinfo->opd_dinfo.rl); ofw_bus_intr_to_rl(childdev, child, &dinfo->opd_dinfo.rl, NULL); device_set_ivars(childdev, dinfo); } /* Register bus */ OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); } static device_t ofw_iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_iicbus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_iicbus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static const struct ofw_bus_devinfo * ofw_iicbus_get_devinfo(device_t bus, device_t dev) { struct ofw_iicbus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } static int ofw_iicbus_set_devinfo(device_t bus, device_t dev, phandle_t ofw_node, char *ofw_name, char *ofw_compat, uint32_t i2c_addr) { struct ofw_iicbus_devinfo *devi; /* * Setup OFW-related parts of the ivars for manually * created ofw_iicbus childern. */ devi = device_get_ivars(dev); if (devi == NULL) return (ENXIO); devi->opd_obdinfo.obd_node = ofw_node; if (ofw_name != NULL) devi->opd_obdinfo.obd_name = strdup(ofw_name, M_OFWPROP); if (ofw_compat != NULL) devi->opd_obdinfo.obd_compat = strdup(ofw_compat, M_OFWPROP); devi->opd_dinfo.addr = i2c_addr; return (0); } diff --git a/sys/dev/mdio/mdio.c b/sys/dev/mdio/mdio.c index 64d3b23c2372..827165ccd349 100644 --- a/sys/dev/mdio/mdio.c +++ b/sys/dev/mdio/mdio.c @@ -1,125 +1,125 @@ /*- * Copyright (c) 2011-2012 Stefan Bethke. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include "mdio_if.h" static void mdio_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, mdio_driver.name, -1) == NULL) BUS_ADD_CHILD(parent, 0, mdio_driver.name, DEVICE_UNIT_ANY); } static int mdio_probe(device_t dev) { device_set_desc(dev, "MDIO"); return (BUS_PROBE_SPECIFIC); } static int mdio_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } static int mdio_readreg(device_t dev, int phy, int reg) { return (MDIO_READREG(device_get_parent(dev), phy, reg)); } static int mdio_writereg(device_t dev, int phy, int reg, int val) { return (MDIO_WRITEREG(device_get_parent(dev), phy, reg, val)); } static int mdio_readextreg(device_t dev, int phy, int devad, int reg) { return (MDIO_READEXTREG(device_get_parent(dev), phy, devad, reg)); } static int mdio_writeextreg(device_t dev, int phy, int devad, int reg, int val) { return (MDIO_WRITEEXTREG(device_get_parent(dev), phy, devad, reg, val)); } static void mdio_hinted_child(device_t dev, const char *name, int unit) { device_add_child(dev, name, unit); } static device_method_t mdio_methods[] = { /* device interface */ DEVMETHOD(device_identify, mdio_identify), DEVMETHOD(device_probe, mdio_probe), DEVMETHOD(device_attach, mdio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_hinted_child, mdio_hinted_child), /* MDIO access */ DEVMETHOD(mdio_readreg, mdio_readreg), DEVMETHOD(mdio_writereg, mdio_writereg), DEVMETHOD(mdio_readextreg, mdio_readextreg), DEVMETHOD(mdio_writeextreg, mdio_writeextreg), DEVMETHOD_END }; driver_t mdio_driver = { "mdio", mdio_methods, 0 }; MODULE_VERSION(mdio, 1); diff --git a/sys/dev/ofw/ofw_cpu.c b/sys/dev/ofw/ofw_cpu.c index a79413157a68..ae4863b45c0f 100644 --- a/sys/dev/ofw/ofw_cpu.c +++ b/sys/dev/ofw/ofw_cpu.c @@ -1,401 +1,401 @@ /*- * Copyright (C) 2009 Nathan Whitehorn * Copyright (C) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) #include #endif static int ofw_cpulist_probe(device_t); static int ofw_cpulist_attach(device_t); static const struct ofw_bus_devinfo *ofw_cpulist_get_devinfo(device_t dev, device_t child); static MALLOC_DEFINE(M_OFWCPU, "ofwcpu", "OFW CPU device information"); struct ofw_cpulist_softc { pcell_t sc_addr_cells; }; static device_method_t ofw_cpulist_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpulist_probe), DEVMETHOD(device_attach, ofw_cpulist_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_device_path, ofw_bus_gen_get_device_path), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_cpulist_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t ofw_cpulist_driver = { "cpulist", ofw_cpulist_methods, sizeof(struct ofw_cpulist_softc) }; DRIVER_MODULE(ofw_cpulist, ofwbus, ofw_cpulist_driver, 0, 0); static int ofw_cpulist_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL || strcmp(name, "cpus") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU Group"); return (0); } static int ofw_cpulist_attach(device_t dev) { struct ofw_cpulist_softc *sc; phandle_t root, child; device_t cdev; struct ofw_bus_devinfo *dinfo; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); sc->sc_addr_cells = 1; OF_getencprop(root, "#address-cells", &sc->sc_addr_cells, sizeof(sc->sc_addr_cells)); for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_OFWCPU, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(dinfo, child) != 0) { free(dinfo, M_OFWCPU); continue; } cdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obd_name); ofw_bus_gen_destroy_devinfo(dinfo); free(dinfo, M_OFWCPU); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static const struct ofw_bus_devinfo * ofw_cpulist_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); } static int ofw_cpu_probe(device_t); static int ofw_cpu_attach(device_t); static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); struct ofw_cpu_softc { struct pcpu *sc_cpu_pcpu; uint32_t sc_nominal_mhz; bool sc_reg_valid; pcell_t sc_reg[2]; }; static device_method_t ofw_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpu_probe), DEVMETHOD(device_attach, ofw_cpu_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, ofw_cpu_read_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), DEVMETHOD_END }; static driver_t ofw_cpu_driver = { "cpu", ofw_cpu_methods, sizeof(struct ofw_cpu_softc) }; DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, 0, 0); static int ofw_cpu_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (type == NULL || strcmp(type, "cpu") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU"); if (!bootverbose && device_get_unit(dev) != 0) { device_quiet(dev); device_quiet_children(dev); } return (0); } static int ofw_cpu_attach(device_t dev) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; phandle_t node; pcell_t cell; int rv; #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) clk_t cpuclk; uint64_t freq; #endif sc = device_get_softc(dev); psc = device_get_softc(device_get_parent(dev)); if (nitems(sc->sc_reg) < psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Too many address cells\n"); return (EINVAL); } node = ofw_bus_get_node(dev); /* Read and validate the reg property for use later */ sc->sc_reg_valid = false; rv = OF_getencprop(node, "reg", sc->sc_reg, sizeof(sc->sc_reg)); if (rv < 0) device_printf(dev, "missing 'reg' property\n"); else if ((rv % 4) != 0) { if (bootverbose) device_printf(dev, "Malformed reg property\n"); } else if ((rv / 4) != psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Invalid reg size %u\n", rv); } else sc->sc_reg_valid = true; #ifdef __powerpc__ /* * On powerpc, "interrupt-servers" denotes a SMT CPU. Look for any * thread on this CPU, and assign that. */ if (OF_hasprop(node, "ibm,ppc-interrupt-server#s")) { struct cpuref cpuref; cell_t *servers; int i, nservers, rv; if ((nservers = OF_getencprop_alloc(node, "ibm,ppc-interrupt-server#s", (void **)&servers)) < 0) return (ENXIO); nservers /= sizeof(cell_t); for (i = 0; i < nservers; i++) { for (rv = platform_smp_first_cpu(&cpuref); rv == 0; rv = platform_smp_next_cpu(&cpuref)) { if (cpuref.cr_hwref == servers[i]) { sc->sc_cpu_pcpu = pcpu_find(cpuref.cr_cpuid); if (sc->sc_cpu_pcpu == NULL) { OF_prop_free(servers); return (ENXIO); } break; } } if (rv != ENOENT) break; } OF_prop_free(servers); if (sc->sc_cpu_pcpu == NULL) { device_printf(dev, "No CPU found for this device.\n"); return (ENXIO); } } else #endif sc->sc_cpu_pcpu = pcpu_find(device_get_unit(dev)); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) < 0) { #if defined(__arm__) || defined(__arm64__) || defined(__riscv__) rv = clk_get_by_ofw_index(dev, 0, 0, &cpuclk); if (rv == 0) { rv = clk_get_freq(cpuclk, &freq); if (rv != 0 && bootverbose) device_printf(dev, "Cannot get freq of property clocks\n"); else sc->sc_nominal_mhz = freq / 1000000; } else #endif { if (bootverbose) device_printf(dev, "missing 'clock-frequency' property\n"); } } else sc->sc_nominal_mhz = cell / 1000000; /* convert to MHz */ if (sc->sc_nominal_mhz != 0 && bootverbose) device_printf(dev, "Nominal frequency %dMhz\n", sc->sc_nominal_mhz); - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; sc = device_get_softc(dev); switch (index) { case CPU_IVAR_PCPU: *result = (uintptr_t)sc->sc_cpu_pcpu; return (0); case CPU_IVAR_NOMINAL_MHZ: if (sc->sc_nominal_mhz > 0) { *result = (uintptr_t)sc->sc_nominal_mhz; return (0); } break; case CPU_IVAR_CPUID_SIZE: psc = device_get_softc(device_get_parent(dev)); *result = psc->sc_addr_cells; return (0); case CPU_IVAR_CPUID: if (sc->sc_reg_valid) { *result = (uintptr_t)sc->sc_reg; return (0); } break; } return (ENOENT); } int ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, bool only_runnable) { phandle_t node, child; pcell_t addr_cells, reg[2]; char status[16]; char device_type[16]; u_int id, next_id; int count, rv; count = 0; id = 0; next_id = 0; node = OF_finddevice("/cpus"); if (node == -1) return (-1); /* Find the number of cells in the cpu register */ if (OF_getencprop(node, "#address-cells", &addr_cells, sizeof(addr_cells)) < 0) return (-1); for (child = OF_child(node); child != 0; child = OF_peer(child), id = next_id) { /* Check if child is a CPU */ memset(device_type, 0, sizeof(device_type)); rv = OF_getprop(child, "device_type", device_type, sizeof(device_type) - 1); if (rv < 0) continue; if (strcmp(device_type, "cpu") != 0) continue; /* We're processing CPU, update next_id used in the next iteration */ next_id++; /* * If we are filtering by runnable then limit to only * those that have been enabled, or do provide a method * to enable them. */ if (only_runnable) { status[0] = '\0'; OF_getprop(child, "status", status, sizeof(status)); if (status[0] != '\0' && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0 && !OF_hasprop(child, "enable-method")) continue; } /* * Check we have a register to identify the cpu */ rv = OF_getencprop(child, "reg", reg, addr_cells * sizeof(cell_t)); if (rv != addr_cells * sizeof(cell_t)) continue; if (callback == NULL || callback(id, child, addr_cells, reg)) count++; } return (only_runnable ? count : id); } diff --git a/sys/dev/ofw/ofwbus.c b/sys/dev/ofw/ofwbus.c index 51e6072ad4ba..af1a85fecc9e 100644 --- a/sys/dev/ofw/ofwbus.c +++ b/sys/dev/ofw/ofwbus.c @@ -1,189 +1,189 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * Copyright 2001 by Thomas Moestl . * Copyright 2006 by Marius Strobl . * All rights reserved. * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/nexus.c,v 1.43 2001/02/09 */ #include #include #include #include #include #include #include #include #include #include #include #include /* * The ofwbus (which is a pseudo-bus actually) iterates over the nodes that * hang from the Open Firmware root node and adds them as devices to this bus * (except some special nodes which are excluded) so that drivers can be * attached to them. There should be only one ofwbus in the system, added * directly as a child of nexus0. */ static device_probe_t ofwbus_probe; static device_attach_t ofwbus_attach; static bus_alloc_resource_t ofwbus_alloc_resource; static bus_release_resource_t ofwbus_release_resource; static device_method_t ofwbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofwbus_probe), DEVMETHOD(device_attach, ofwbus_attach), /* Bus interface */ DEVMETHOD(bus_alloc_resource, ofwbus_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, ofwbus_release_resource), DEVMETHOD_END }; DEFINE_CLASS_1(ofwbus, ofwbus_driver, ofwbus_methods, sizeof(struct simplebus_softc), simplebus_driver); EARLY_DRIVER_MODULE(ofwbus, nexus, ofwbus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ofwbus, 1); static int ofwbus_probe(device_t dev) { if (OF_peer(0) == 0) return (ENXIO); /* Only one instance of ofwbus. */ if (device_get_unit(dev) != 0) panic("ofwbus added with non-zero unit number: %d\n", device_get_unit(dev)); device_set_desc(dev, "Open Firmware Device Tree"); return (BUS_PROBE_NOWILDCARD); } static int ofwbus_attach(device_t dev) { phandle_t node; node = OF_peer(0); /* * If no Open Firmware, bail early */ if (node == -1) return (ENXIO); /* * ofwbus bus starts on unamed node in FDT, so we cannot make * ofw_bus_devinfo from it. Pass node to simplebus_init directly. */ simplebus_init(dev, node); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); } static struct resource * ofwbus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *rv; struct resource_list_entry *rle; bool isdefault, passthrough; isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); rle = NULL; if (!passthrough && isdefault) { rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; count = ummax(count, rle->count); end = ummax(rle->end, start + count - 1); } /* Let nexus handle the allocation. */ rv = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); if (rv == NULL) return (NULL); if (!passthrough && rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rle->end - rle->start + 1; } return (rv); } static int ofwbus_release_resource(device_t bus, device_t child, struct resource *r) { struct resource_list_entry *rle; bool passthrough; passthrough = (device_get_parent(child) != bus); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), rman_get_type(r), rman_get_rid(r)); if (rle != NULL) rle->res = NULL; } /* Let nexus handle the release. */ return (bus_generic_release_resource(bus, child, r)); } diff --git a/sys/dev/pci/hostb_pci.c b/sys/dev/pci/hostb_pci.c index 8b3e29c00d5f..2d9e35e07bab 100644 --- a/sys/dev/pci/hostb_pci.c +++ b/sys/dev/pci/hostb_pci.c @@ -1,287 +1,287 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 1997, Stefan Esser * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include /* * Provide a device to "eat" the host->pci bridge devices that show up * on PCI buses and stop them showing up twice on the probes. This also * stops them showing up as 'none' in pciconf -l. If the host bridge * provides an AGP capability then we create a child agp device for the * agp GART driver to attach to. */ static int pci_hostb_probe(device_t dev) { u_int32_t id; id = pci_get_devid(dev); switch (id) { /* VIA VT82C596 Power Management Function */ case 0x30501106: return (ENXIO); default: break; } if (pci_get_class(dev) == PCIC_BRIDGE && pci_get_subclass(dev) == PCIS_BRIDGE_HOST) { device_set_desc(dev, "Host to PCI bridge"); device_quiet(dev); return (-10000); } return (ENXIO); } static int pci_hostb_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); /* * If AGP capabilities are present on this device, then create * an AGP child. */ if (pci_find_cap(dev, PCIY_AGP, NULL) == 0) device_add_child(dev, "agp", DEVICE_UNIT_ANY); bus_generic_attach(dev); return (0); } /* Bus interface. */ static int pci_hostb_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } static int pci_hostb_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct resource * pci_hostb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, start, end, count, flags)); } static int pci_hostb_release_resource(device_t dev, device_t child, struct resource *r) { return (bus_release_resource(dev, r)); } /* PCI interface. */ static uint32_t pci_hostb_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } static void pci_hostb_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } static int pci_hostb_enable_busmaster(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_enable_busmaster\n", device_get_nameunit(child)); return (pci_enable_busmaster(dev)); } static int pci_hostb_disable_busmaster(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_disable_busmaster\n", device_get_nameunit(child)); return (pci_disable_busmaster(dev)); } static int pci_hostb_enable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_enable_io\n", device_get_nameunit(child)); return (pci_enable_io(dev, space)); } static int pci_hostb_disable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_disable_io\n", device_get_nameunit(child)); return (pci_disable_io(dev, space)); } static int pci_hostb_set_powerstate(device_t dev, device_t child, int state) { device_printf(dev, "child %s requested pci_set_powerstate\n", device_get_nameunit(child)); return (pci_set_powerstate(dev, state)); } static int pci_hostb_get_powerstate(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_get_powerstate\n", device_get_nameunit(child)); return (pci_get_powerstate(dev)); } static int pci_hostb_assign_interrupt(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_assign_interrupt\n", device_get_nameunit(child)); return (PCI_ASSIGN_INTERRUPT(device_get_parent(dev), dev)); } static int pci_hostb_find_cap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_cap(dev, capability, capreg)); } static int pci_hostb_find_next_cap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_cap(dev, capability, start, capreg)); } static int pci_hostb_find_extcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_extcap(dev, capability, capreg)); } static int pci_hostb_find_next_extcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_extcap(dev, capability, start, capreg)); } static int pci_hostb_find_htcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_htcap(dev, capability, capreg)); } static int pci_hostb_find_next_htcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_htcap(dev, capability, start, capreg)); } static device_method_t pci_hostb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_hostb_probe), DEVMETHOD(device_attach, pci_hostb_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, pci_hostb_read_ivar), DEVMETHOD(bus_write_ivar, pci_hostb_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, pci_hostb_alloc_resource), DEVMETHOD(bus_release_resource, pci_hostb_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), /* PCI interface */ DEVMETHOD(pci_read_config, pci_hostb_read_config), DEVMETHOD(pci_write_config, pci_hostb_write_config), DEVMETHOD(pci_enable_busmaster, pci_hostb_enable_busmaster), DEVMETHOD(pci_disable_busmaster, pci_hostb_disable_busmaster), DEVMETHOD(pci_enable_io, pci_hostb_enable_io), DEVMETHOD(pci_disable_io, pci_hostb_disable_io), DEVMETHOD(pci_get_powerstate, pci_hostb_get_powerstate), DEVMETHOD(pci_set_powerstate, pci_hostb_set_powerstate), DEVMETHOD(pci_assign_interrupt, pci_hostb_assign_interrupt), DEVMETHOD(pci_find_cap, pci_hostb_find_cap), DEVMETHOD(pci_find_next_cap, pci_hostb_find_next_cap), DEVMETHOD(pci_find_extcap, pci_hostb_find_extcap), DEVMETHOD(pci_find_next_extcap, pci_hostb_find_next_extcap), DEVMETHOD(pci_find_htcap, pci_hostb_find_htcap), DEVMETHOD(pci_find_next_htcap, pci_hostb_find_next_htcap), { 0, 0 } }; static driver_t pci_hostb_driver = { "hostb", pci_hostb_methods, 1, }; DRIVER_MODULE(hostb, pci, pci_hostb_driver, 0, 0); diff --git a/sys/dev/pci/isa_pci.c b/sys/dev/pci/isa_pci.c index c8ad277edb41..f63c63afc384 100644 --- a/sys/dev/pci/isa_pci.c +++ b/sys/dev/pci/isa_pci.c @@ -1,240 +1,240 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * PCI:ISA bridge support */ #include #include #include #include #include #include #include #include #include #include #include static int isab_pci_probe(device_t dev); static int isab_pci_attach(device_t dev); static struct resource * isab_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int isab_pci_release_resource(device_t dev, device_t child, struct resource *r); static device_method_t isab_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isab_pci_probe), DEVMETHOD(device_attach, isab_pci_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, isab_pci_alloc_resource), DEVMETHOD(bus_release_resource, isab_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; struct isab_pci_resource { struct resource *ip_res; int ip_refs; }; struct isab_pci_softc { struct isab_pci_resource isab_pci_res[PCIR_MAX_BAR_0 + 1]; }; static driver_t isab_driver = { "isab", isab_methods, sizeof(struct isab_pci_softc), }; DRIVER_MODULE(isab, pci, isab_driver, 0, 0); /* * XXX we need to add a quirk list here for bridges that don't correctly * report themselves. */ static int isab_pci_probe(device_t dev) { int matched = 0; /* * Try for a generic match based on class/subclass. */ if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_ISA)) { matched = 1; } else { /* * These are devices that we *know* are PCI:ISA bridges. * Sometimes, however, they don't report themselves as * such. Check in case one of them is pretending to be * something else. */ switch (pci_get_devid(dev)) { case 0x04848086: /* Intel 82378ZB/82378IB */ case 0x122e8086: /* Intel 82371FB */ case 0x70008086: /* Intel 82371SB */ case 0x71108086: /* Intel 82371AB */ case 0x71988086: /* Intel 82443MX */ case 0x24108086: /* Intel 82801AA (ICH) */ case 0x24208086: /* Intel 82801AB (ICH0) */ case 0x24408086: /* Intel 82801AB (ICH2) */ case 0x00061004: /* VLSI 82C593 */ case 0x05861106: /* VIA 82C586 */ case 0x05961106: /* VIA 82C596 */ case 0x06861106: /* VIA 82C686 */ case 0x153310b9: /* AcerLabs M1533 */ case 0x154310b9: /* AcerLabs M1543 */ case 0x00081039: /* SiS 85c503 */ case 0x00001078: /* Cyrix Cx5510 */ case 0x01001078: /* Cyrix Cx5530 */ case 0xc7001045: /* OPTi 82C700 (FireStar) */ case 0x886a1060: /* UMC UM8886 ISA */ case 0x02001166: /* ServerWorks IB6566 PCI */ if (bootverbose) printf("PCI-ISA bridge with incorrect subclass 0x%x\n", pci_get_subclass(dev)); matched = 1; break; default: break; } } if (matched) { device_set_desc(dev, "PCI-ISA bridge"); return(-10000); } return(ENXIO); } static int isab_pci_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); return (isab_attach(dev)); } static struct resource * isab_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct isab_pci_softc *sc; int bar; if (device_get_parent(child) != dev) return bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags); switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we cache the resource so that we only allocate it * from the PCI bus once. */ bar = PCI_RID2BAR(*rid); if (bar < 0 || bar > PCIR_MAX_BAR_0) return (NULL); sc = device_get_softc(dev); if (sc->isab_pci_res[bar].ip_res == NULL) sc->isab_pci_res[bar].ip_res = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (sc->isab_pci_res[bar].ip_res != NULL) sc->isab_pci_res[bar].ip_refs++; return (sc->isab_pci_res[bar].ip_res); } return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } static int isab_pci_release_resource(device_t dev, device_t child, struct resource *r) { struct isab_pci_softc *sc; int bar, error; if (device_get_parent(child) != dev) return bus_generic_release_resource(dev, child, r); switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we release the resource from the PCI bus * when the last child reference goes away. */ bar = PCI_RID2BAR(rman_get_rid(r)); if (bar < 0 || bar > PCIR_MAX_BAR_0) return (EINVAL); sc = device_get_softc(dev); if (sc->isab_pci_res[bar].ip_res == NULL) return (EINVAL); KASSERT(sc->isab_pci_res[bar].ip_res == r, ("isa_pci resource mismatch")); if (sc->isab_pci_res[bar].ip_refs > 1) { sc->isab_pci_res[bar].ip_refs--; return (0); } KASSERT(sc->isab_pci_res[bar].ip_refs > 0, ("isa_pci resource reference count underflow")); error = bus_release_resource(dev, r); if (error == 0) { sc->isab_pci_res[bar].ip_res = NULL; sc->isab_pci_res[bar].ip_refs = 0; } return (error); } return (bus_generic_release_resource(dev, child, r)); } diff --git a/sys/dev/pci/vga_pci.c b/sys/dev/pci/vga_pci.c index 5a4749933574..2f86171e5ea9 100644 --- a/sys/dev/pci/vga_pci.c +++ b/sys/dev/pci/vga_pci.c @@ -1,770 +1,770 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2005 John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Simple driver for PCI VGA display devices. Drivers such as agp(4) and * drm(4) should attach as children of this device. * * XXX: The vgapci name is a hack until we somehow merge the isa vga driver * in or rename it. */ #include #include #include #include #include #include #include #if defined(__amd64__) || defined(__i386__) #include #include #endif #include #include #include /* To re-POST the card. */ struct vga_resource { struct resource *vr_res; int vr_refs; }; struct vga_pci_softc { device_t vga_msi_child; /* Child driver using MSI. */ struct vga_resource vga_bars[PCIR_MAX_BAR_0 + 1]; struct vga_resource vga_bios; }; SYSCTL_DECL(_hw_pci); static struct vga_resource *lookup_res(struct vga_pci_softc *sc, int rid); static struct resource *vga_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int vga_pci_release_resource(device_t dev, device_t child, struct resource *r); int vga_pci_default_unit = -1; SYSCTL_INT(_hw_pci, OID_AUTO, default_vgapci_unit, CTLFLAG_RDTUN, &vga_pci_default_unit, -1, "Default VGA-compatible display"); int vga_pci_is_boot_display(device_t dev) { int unit; device_t pcib; uint16_t config; /* Check that the given device is a video card */ if ((pci_get_class(dev) != PCIC_DISPLAY && (pci_get_class(dev) != PCIC_OLD || pci_get_subclass(dev) != PCIS_OLD_VGA))) return (0); unit = device_get_unit(dev); if (vga_pci_default_unit >= 0) { /* * The boot display device was determined by a previous * call to this function, or the user forced it using * the hw.pci.default_vgapci_unit tunable. */ return (vga_pci_default_unit == unit); } /* * The primary video card used as a boot display must have the * "I/O" and "Memory Address Space Decoding" bits set in its * Command register. * * Furthermore, if the card is attached to a bridge, instead of * the root PCI bus, the bridge must have the "VGA Enable" bit * set in its Control register. */ pcib = device_get_parent(device_get_parent(dev)); if (device_get_devclass(device_get_parent(pcib)) == devclass_find("pci")) { /* * The parent bridge is a PCI-to-PCI bridge: check the * value of the "VGA Enable" bit. */ config = pci_read_config(pcib, PCIR_BRIDGECTL_1, 2); if ((config & PCIB_BCR_VGA_ENABLE) == 0) return (0); } config = pci_read_config(dev, PCIR_COMMAND, 2); if ((config & (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) == 0) return (0); /* * Disable interrupts until a chipset driver is loaded for * this PCI device. Else unhandled display adapter interrupts * might freeze the CPU. */ pci_write_config(dev, PCIR_COMMAND, config | PCIM_CMD_INTxDIS, 2); /* This video card is the boot display: record its unit number. */ vga_pci_default_unit = unit; device_set_flags(dev, 1); return (1); } static void vga_pci_reset(device_t dev) { int ps; /* * FLR is unsupported on GPUs so attempt a power-management reset by cycling * the device in/out of D3 state. * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); } void * vga_pci_map_bios(device_t dev, size_t *size) { struct vga_resource *vr; struct resource *res; device_t pcib; uint32_t rom_addr; uint16_t config; volatile unsigned char *bios; int i, rid, found; #if defined(__amd64__) || defined(__i386__) if (vga_pci_is_boot_display(dev)) { /* * On x86, the System BIOS copy the default display * device's Video BIOS at a fixed location in system * memory (0xC0000, 128 kBytes long) at boot time. * * We use this copy for the default boot device, because * the original ROM may not be valid after boot. */ *size = VGA_PCI_BIOS_SHADOW_SIZE; return (pmap_mapbios(VGA_PCI_BIOS_SHADOW_ADDR, *size)); } #endif pcib = device_get_parent(device_get_parent(dev)); if (device_get_devclass(device_get_parent(pcib)) == devclass_find("pci")) { /* * The parent bridge is a PCI-to-PCI bridge: check the * value of the "VGA Enable" bit. */ config = pci_read_config(pcib, PCIR_BRIDGECTL_1, 2); if ((config & PCIB_BCR_VGA_ENABLE) == 0) { config |= PCIB_BCR_VGA_ENABLE; pci_write_config(pcib, PCIR_BRIDGECTL_1, config, 2); } } switch(pci_read_config(dev, PCIR_HDRTYPE, 1)) { case PCIM_HDRTYPE_BRIDGE: rid = PCIR_BIOS_1; break; case PCIM_HDRTYPE_CARDBUS: rid = 0; break; default: rid = PCIR_BIOS; break; } if (rid == 0) return (NULL); res = vga_pci_alloc_resource(dev, NULL, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (res == NULL) { device_printf(dev, "vga_pci_alloc_resource failed\n"); return (NULL); } bios = rman_get_virtual(res); *size = rman_get_size(res); for (found = i = 0; i < hz; i++) { found = (bios[0] == 0x55 && bios[1] == 0xaa); if (found) break; pause("vgabios", 1); } if (found) return (__DEVOLATILE(void *, bios)); if (bootverbose) device_printf(dev, "initial ROM mapping failed -- resetting\n"); /* * Enable ROM decode */ vga_pci_reset(dev); rom_addr = pci_read_config(dev, rid, 4); rom_addr &= 0x7ff; rom_addr |= rman_get_start(res) | 0x1; pci_write_config(dev, rid, rom_addr, 4); vr = lookup_res(device_get_softc(dev), rid); vga_pci_release_resource(dev, NULL, vr->vr_res); /* * re-allocate */ res = vga_pci_alloc_resource(dev, NULL, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (res == NULL) { device_printf(dev, "vga_pci_alloc_resource failed\n"); return (NULL); } bios = rman_get_virtual(res); *size = rman_get_size(res); for (found = i = 0; i < 3*hz; i++) { found = (bios[0] == 0x55 && bios[1] == 0xaa); if (found) break; pause("vgabios", 1); } if (found) return (__DEVOLATILE(void *, bios)); device_printf(dev, "ROM mapping failed\n"); vr = lookup_res(device_get_softc(dev), rid); vga_pci_release_resource(dev, NULL, vr->vr_res); return (NULL); } void vga_pci_unmap_bios(device_t dev, void *bios) { struct vga_resource *vr; int rid; if (bios == NULL) { return; } #if defined(__amd64__) || defined(__i386__) if (vga_pci_is_boot_display(dev)) { /* We mapped the BIOS shadow copy located at 0xC0000. */ pmap_unmapdev(bios, VGA_PCI_BIOS_SHADOW_SIZE); return; } #endif switch(pci_read_config(dev, PCIR_HDRTYPE, 1)) { case PCIM_HDRTYPE_BRIDGE: rid = PCIR_BIOS_1; break; case PCIM_HDRTYPE_CARDBUS: rid = 0; break; default: rid = PCIR_BIOS; break; } if (rid == 0) return; /* * Look up the PCIR_BIOS resource in our softc. It should match * the address we returned previously. */ vr = lookup_res(device_get_softc(dev), rid); KASSERT(vr->vr_res != NULL, ("vga_pci_unmap_bios: bios not mapped")); KASSERT(rman_get_virtual(vr->vr_res) == bios, ("vga_pci_unmap_bios: mismatch")); vga_pci_release_resource(dev, NULL, vr->vr_res); } int vga_pci_repost(device_t dev) { #if defined(__amd64__) || defined(__i386__) x86regs_t regs; if (!vga_pci_is_boot_display(dev)) return (EINVAL); if (x86bios_get_orm(VGA_PCI_BIOS_SHADOW_ADDR) == NULL) return (ENOTSUP); x86bios_init_regs(®s); regs.R_AH = pci_get_bus(dev); regs.R_AL = (pci_get_slot(dev) << 3) | (pci_get_function(dev) & 0x07); regs.R_DL = 0x80; device_printf(dev, "REPOSTing\n"); x86bios_call(®s, X86BIOS_PHYSTOSEG(VGA_PCI_BIOS_SHADOW_ADDR + 3), X86BIOS_PHYSTOOFF(VGA_PCI_BIOS_SHADOW_ADDR + 3)); x86bios_get_intr(0x10); return (0); #else return (ENOTSUP); #endif } static int vga_pci_probe(device_t dev) { switch (pci_get_class(dev)) { case PCIC_DISPLAY: break; case PCIC_OLD: if (pci_get_subclass(dev) != PCIS_OLD_VGA) return (ENXIO); break; default: return (ENXIO); } /* Probe default display. */ vga_pci_is_boot_display(dev); device_set_desc(dev, "VGA-compatible display"); return (BUS_PROBE_GENERIC); } static int vga_pci_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); /* Always create a drmn child for now to make it easier on drm. */ device_add_child(dev, "drmn", DEVICE_UNIT_ANY); bus_generic_attach(dev); if (vga_pci_is_boot_display(dev)) device_printf(dev, "Boot video device\n"); return (0); } static int vga_pci_detach(device_t dev) { int error; error = bus_generic_detach(dev); if (error == 0) error = device_delete_children(dev); return (error); } /* Bus interface. */ static int vga_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } static int vga_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static int vga_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { return (BUS_SETUP_INTR(device_get_parent(dev), dev, irq, flags, filter, intr, arg, cookiep)); } static int vga_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { return (BUS_TEARDOWN_INTR(device_get_parent(dev), dev, irq, cookie)); } static struct vga_resource * lookup_res(struct vga_pci_softc *sc, int rid) { int bar; if (rid == PCIR_BIOS) return (&sc->vga_bios); bar = PCI_RID2BAR(rid); if (bar >= 0 && bar <= PCIR_MAX_BAR_0) return (&sc->vga_bars[bar]); return (NULL); } static struct resource * vga_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct vga_resource *vr; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we cache the resource so that we only allocate it * from the PCI bus once. */ vr = lookup_res(device_get_softc(dev), *rid); if (vr == NULL) return (NULL); if (vr->vr_res == NULL) vr->vr_res = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (vr->vr_res != NULL) vr->vr_refs++; return (vr->vr_res); } return (bus_alloc_resource(dev, type, rid, start, end, count, flags)); } static int vga_pci_release_resource(device_t dev, device_t child, struct resource *r) { struct vga_resource *vr; int error; switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: /* * For BARs, we release the resource from the PCI bus * when the last child reference goes away. */ vr = lookup_res(device_get_softc(dev), rman_get_rid(r)); if (vr == NULL) return (EINVAL); if (vr->vr_res == NULL) return (EINVAL); KASSERT(vr->vr_res == r, ("vga_pci resource mismatch")); if (vr->vr_refs > 1) { vr->vr_refs--; return (0); } KASSERT(vr->vr_refs > 0, ("vga_pci resource reference count underflow")); error = bus_release_resource(dev, r); if (error == 0) { vr->vr_res = NULL; vr->vr_refs = 0; } return (error); } return (bus_release_resource(dev, r)); } /* PCI interface. */ static uint32_t vga_pci_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } static void vga_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } static int vga_pci_enable_busmaster(device_t dev, device_t child) { return (pci_enable_busmaster(dev)); } static int vga_pci_disable_busmaster(device_t dev, device_t child) { return (pci_disable_busmaster(dev)); } static int vga_pci_enable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_enable_io\n", device_get_nameunit(child)); return (pci_enable_io(dev, space)); } static int vga_pci_disable_io(device_t dev, device_t child, int space) { device_printf(dev, "child %s requested pci_disable_io\n", device_get_nameunit(child)); return (pci_disable_io(dev, space)); } static int vga_pci_get_vpd_ident(device_t dev, device_t child, const char **identptr) { return (pci_get_vpd_ident(dev, identptr)); } static int vga_pci_get_vpd_readonly(device_t dev, device_t child, const char *kw, const char **vptr) { return (pci_get_vpd_readonly(dev, kw, vptr)); } static int vga_pci_set_powerstate(device_t dev, device_t child, int state) { device_printf(dev, "child %s requested pci_set_powerstate\n", device_get_nameunit(child)); return (pci_set_powerstate(dev, state)); } static int vga_pci_get_powerstate(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_get_powerstate\n", device_get_nameunit(child)); return (pci_get_powerstate(dev)); } static int vga_pci_assign_interrupt(device_t dev, device_t child) { device_printf(dev, "child %s requested pci_assign_interrupt\n", device_get_nameunit(child)); return (PCI_ASSIGN_INTERRUPT(device_get_parent(dev), dev)); } static int vga_pci_find_cap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_cap(dev, capability, capreg)); } static int vga_pci_find_next_cap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_cap(dev, capability, start, capreg)); } static int vga_pci_find_extcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_extcap(dev, capability, capreg)); } static int vga_pci_find_next_extcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_extcap(dev, capability, start, capreg)); } static int vga_pci_find_htcap(device_t dev, device_t child, int capability, int *capreg) { return (pci_find_htcap(dev, capability, capreg)); } static int vga_pci_find_next_htcap(device_t dev, device_t child, int capability, int start, int *capreg) { return (pci_find_next_htcap(dev, capability, start, capreg)); } static int vga_pci_alloc_msi(device_t dev, device_t child, int *count) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != NULL) return (EBUSY); error = pci_alloc_msi(dev, count); if (error == 0) sc->vga_msi_child = child; return (error); } static int vga_pci_alloc_msix(device_t dev, device_t child, int *count) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != NULL) return (EBUSY); error = pci_alloc_msix(dev, count); if (error == 0) sc->vga_msi_child = child; return (error); } static int vga_pci_remap_msix(device_t dev, device_t child, int count, const u_int *vectors) { struct vga_pci_softc *sc; sc = device_get_softc(dev); if (sc->vga_msi_child != child) return (ENXIO); return (pci_remap_msix(dev, count, vectors)); } static int vga_pci_release_msi(device_t dev, device_t child) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != child) return (ENXIO); error = pci_release_msi(dev); if (error == 0) sc->vga_msi_child = NULL; return (error); } static int vga_pci_msi_count(device_t dev, device_t child) { return (pci_msi_count(dev)); } static int vga_pci_msix_count(device_t dev, device_t child) { return (pci_msix_count(dev)); } static bus_dma_tag_t vga_pci_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t vga_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vga_pci_probe), DEVMETHOD(device_attach, vga_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_detach, vga_pci_detach), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, vga_pci_read_ivar), DEVMETHOD(bus_write_ivar, vga_pci_write_ivar), DEVMETHOD(bus_setup_intr, vga_pci_setup_intr), DEVMETHOD(bus_teardown_intr, vga_pci_teardown_intr), DEVMETHOD(bus_alloc_resource, vga_pci_alloc_resource), DEVMETHOD(bus_release_resource, vga_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_dma_tag, vga_pci_get_dma_tag), /* PCI interface */ DEVMETHOD(pci_read_config, vga_pci_read_config), DEVMETHOD(pci_write_config, vga_pci_write_config), DEVMETHOD(pci_enable_busmaster, vga_pci_enable_busmaster), DEVMETHOD(pci_disable_busmaster, vga_pci_disable_busmaster), DEVMETHOD(pci_enable_io, vga_pci_enable_io), DEVMETHOD(pci_disable_io, vga_pci_disable_io), DEVMETHOD(pci_get_vpd_ident, vga_pci_get_vpd_ident), DEVMETHOD(pci_get_vpd_readonly, vga_pci_get_vpd_readonly), DEVMETHOD(pci_get_powerstate, vga_pci_get_powerstate), DEVMETHOD(pci_set_powerstate, vga_pci_set_powerstate), DEVMETHOD(pci_assign_interrupt, vga_pci_assign_interrupt), DEVMETHOD(pci_find_cap, vga_pci_find_cap), DEVMETHOD(pci_find_next_cap, vga_pci_find_next_cap), DEVMETHOD(pci_find_extcap, vga_pci_find_extcap), DEVMETHOD(pci_find_next_extcap, vga_pci_find_next_extcap), DEVMETHOD(pci_find_htcap, vga_pci_find_htcap), DEVMETHOD(pci_find_next_htcap, vga_pci_find_next_htcap), DEVMETHOD(pci_alloc_msi, vga_pci_alloc_msi), DEVMETHOD(pci_alloc_msix, vga_pci_alloc_msix), DEVMETHOD(pci_remap_msix, vga_pci_remap_msix), DEVMETHOD(pci_release_msi, vga_pci_release_msi), DEVMETHOD(pci_msi_count, vga_pci_msi_count), DEVMETHOD(pci_msix_count, vga_pci_msix_count), { 0, 0 } }; static driver_t vga_pci_driver = { "vgapci", vga_pci_methods, sizeof(struct vga_pci_softc), }; DRIVER_MODULE(vgapci, pci, vga_pci_driver, 0, 0); MODULE_DEPEND(vgapci, x86bios, 1, 1, 1); diff --git a/sys/dev/ppbus/ppbconf.c b/sys/dev/ppbus/ppbconf.c index 469b2ecd045f..1f82df001b49 100644 --- a/sys/dev/ppbus/ppbconf.c +++ b/sys/dev/ppbus/ppbconf.c @@ -1,601 +1,601 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997, 1998, 1999 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include #include "opt_ppb_1284.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "ppbus_if.h" #define DEVTOSOFTC(dev) ((struct ppb_data *)device_get_softc(dev)) static MALLOC_DEFINE(M_PPBUSDEV, "ppbusdev", "Parallel Port bus device"); static int ppbus_intr(void *arg); /* * Device methods */ static int ppbus_print_child(device_t bus, device_t dev) { struct ppb_device *ppbdev; int retval; retval = bus_print_child_header(bus, dev); ppbdev = (struct ppb_device *)device_get_ivars(dev); if (ppbdev->flags != 0) retval += printf(" flags 0x%x", ppbdev->flags); retval += bus_print_child_footer(bus, dev); return (retval); } static int ppbus_probe(device_t dev) { device_set_desc(dev, "Parallel port bus"); return (0); } /* * ppbus_add_child() * * Add a ppbus device, allocate/initialize the ivars */ static device_t ppbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct ppb_device *ppbdev; device_t child; /* allocate ivars for the new ppbus child */ ppbdev = malloc(sizeof(struct ppb_device), M_PPBUSDEV, M_NOWAIT | M_ZERO); if (!ppbdev) return (NULL); /* initialize the ivars */ ppbdev->name = name; /* add the device as a child to the ppbus bus with the allocated * ivars */ child = device_add_child_ordered(dev, order, name, unit); device_set_ivars(child, ppbdev); return (child); } static int ppbus_read_ivar(device_t bus, device_t dev, int index, uintptr_t* val) { switch (index) { case PPBUS_IVAR_MODE: /* XXX yet device mode = ppbus mode = chipset mode */ *val = (u_long)ppb_get_mode(bus); break; default: return (ENOENT); } return (0); } static int ppbus_write_ivar(device_t bus, device_t dev, int index, uintptr_t val) { switch (index) { case PPBUS_IVAR_MODE: /* XXX yet device mode = ppbus mode = chipset mode */ ppb_set_mode(bus, val); break; default: return (ENOENT); } return (0); } #define PPB_PNP_PRINTER 0 #define PPB_PNP_MODEM 1 #define PPB_PNP_NET 2 #define PPB_PNP_HDC 3 #define PPB_PNP_PCMCIA 4 #define PPB_PNP_MEDIA 5 #define PPB_PNP_FDC 6 #define PPB_PNP_PORTS 7 #define PPB_PNP_SCANNER 8 #define PPB_PNP_DIGICAM 9 #ifndef DONTPROBE_1284 static char *pnp_tokens[] = { "PRINTER", "MODEM", "NET", "HDC", "PCMCIA", "MEDIA", "FDC", "PORTS", "SCANNER", "DIGICAM", "", NULL }; #if 0 static char *pnp_classes[] = { "printer", "modem", "network device", "hard disk", "PCMCIA", "multimedia device", "floppy disk", "ports", "scanner", "digital camera", "unknown device", NULL }; #endif /* * search_token() * * Search the first occurrence of a token within a string */ static char * search_token(char *str, int slen, char *token) { int tlen, i; #define UNKNOWN_LENGTH -1 if (slen == UNKNOWN_LENGTH) /* get string's length */ slen = strlen(str); /* get token's length */ tlen = strlen(token); if (tlen == 0) return (str); for (i = 0; i <= slen-tlen; i++) { if (strncmp(str + i, token, tlen) == 0) return (&str[i]); } return (NULL); } /* * ppb_pnp_detect() * * Returns the class id. of the peripherial, -1 otherwise */ static int ppb_pnp_detect(device_t bus) { char *token, *class = NULL; int i, len, error; int class_id = -1; char str[PPB_PnP_STRING_SIZE+1]; device_printf(bus, "Probing for PnP devices:\n"); if ((error = ppb_1284_read_id(bus, PPB_NIBBLE, str, PPB_PnP_STRING_SIZE, &len))) goto end_detect; #ifdef DEBUG_1284 device_printf(bus, " %d characters: ", len); for (i = 0; i < len; i++) printf("%c(0x%x) ", str[i], str[i]); printf("\n"); #endif /* replace ';' characters by '\0' */ for (i = 0; i < len; i++) str[i] = (str[i] == ';') ? '\0' : str[i]; if ((token = search_token(str, len, "MFG")) != NULL || (token = search_token(str, len, "MANUFACTURER")) != NULL) device_printf(bus, "<%s", search_token(token, UNKNOWN_LENGTH, ":") + 1); else device_printf(bus, ""); if ((token = search_token(str, len, "CLS")) != NULL) { class = search_token(token, UNKNOWN_LENGTH, ":") + 1; printf(" %s", class); } if ((token = search_token(str, len, "CMD")) != NULL || (token = search_token(str, len, "COMMAND")) != NULL) printf(" %s", search_token(token, UNKNOWN_LENGTH, ":") + 1); printf("\n"); if (class) /* identify class ident */ for (i = 0; pnp_tokens[i] != NULL; i++) { if (search_token(class, len, pnp_tokens[i]) != NULL) { class_id = i; goto end_detect; } } class_id = PPB_PnP_UNKNOWN; end_detect: return (class_id); } /* * ppb_scan_bus() * * Scan the ppbus for IEEE1284 compliant devices */ static int ppb_scan_bus(device_t bus) { struct ppb_data * ppb = (struct ppb_data *)device_get_softc(bus); int error = 0; /* try all IEEE1284 modes, for one device only * * XXX We should implement the IEEE1284.3 standard to detect * daisy chained devices */ error = ppb_1284_negociate(bus, PPB_NIBBLE, PPB_REQUEST_ID); if ((ppb->state == PPB_ERROR) && (ppb->error == PPB_NOT_IEEE1284)) goto end_scan; ppb_1284_terminate(bus); device_printf(bus, "IEEE1284 device found "); if (!(error = ppb_1284_negociate(bus, PPB_NIBBLE, 0))) { printf("/NIBBLE"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_PS2, 0))) { printf("/PS2"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, 0))) { printf("/ECP"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_USE_RLE))) { printf("/ECP_RLE"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_EPP, 0))) { printf("/EPP"); ppb_1284_terminate(bus); } /* try more IEEE1284 modes */ if (bootverbose) { if (!(error = ppb_1284_negociate(bus, PPB_NIBBLE, PPB_REQUEST_ID))) { printf("/NIBBLE_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_PS2, PPB_REQUEST_ID))) { printf("/PS2_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_REQUEST_ID))) { printf("/ECP_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_ECP, PPB_REQUEST_ID | PPB_USE_RLE))) { printf("/ECP_RLE_ID"); ppb_1284_terminate(bus); } if (!(error = ppb_1284_negociate(bus, PPB_COMPATIBLE, PPB_EXTENSIBILITY_LINK))) { printf("/Extensibility Link"); ppb_1284_terminate(bus); } } printf("\n"); /* detect PnP devices */ ppb->class_id = ppb_pnp_detect(bus); return (0); end_scan: return (error); } #endif /* !DONTPROBE_1284 */ static int ppbus_attach(device_t dev) { struct ppb_data *ppb = device_get_softc(dev); int error, rid; error = BUS_READ_IVAR(device_get_parent(dev), dev, PPC_IVAR_LOCK, (uintptr_t *)&ppb->ppc_lock); if (error) { device_printf(dev, "Unable to fetch parent's lock\n"); return (error); } rid = 0; ppb->ppc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (ppb->ppc_irq_res != NULL) { mtx_lock(ppb->ppc_lock); error = BUS_WRITE_IVAR(device_get_parent(dev), dev, PPC_IVAR_INTR_HANDLER, (uintptr_t)&ppbus_intr); mtx_unlock(ppb->ppc_lock); if (error) { device_printf(dev, "Unable to set interrupt handler\n"); return (error); } } /* Locate our children */ - bus_generic_probe(dev); + bus_identify_children(dev); #ifndef DONTPROBE_1284 /* detect IEEE1284 compliant devices */ mtx_lock(ppb->ppc_lock); ppb_scan_bus(dev); mtx_unlock(ppb->ppc_lock); #endif /* !DONTPROBE_1284 */ /* launch attachment of the added children */ bus_generic_attach(dev); return (0); } static int ppbus_detach(device_t dev) { int error; error = bus_generic_detach(dev); if (error) return (error); /* detach & delete all children */ device_delete_children(dev); return (0); } static int ppbus_intr(void *arg) { struct ppb_device *ppbdev; struct ppb_data *ppb = arg; mtx_assert(ppb->ppc_lock, MA_OWNED); if (ppb->ppb_owner == NULL) return (ENOENT); ppbdev = device_get_ivars(ppb->ppb_owner); if (ppbdev->intr_hook == NULL) return (ENOENT); ppbdev->intr_hook(ppbdev->intr_arg); return (0); } static int ppbus_setup_intr(device_t bus, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct ppb_device *ppbdev = device_get_ivars(child); struct ppb_data *ppb = DEVTOSOFTC(bus); /* We do not support filters. */ if (filt != NULL || ihand == NULL) return (EINVAL); /* Can only attach handlers to the parent device's resource. */ if (ppb->ppc_irq_res != r) return (EINVAL); mtx_lock(ppb->ppc_lock); ppbdev->intr_hook = ihand; ppbdev->intr_arg = arg; *cookiep = ppbdev; mtx_unlock(ppb->ppc_lock); return (0); } static int ppbus_teardown_intr(device_t bus, device_t child, struct resource *r, void *ih) { struct ppb_device *ppbdev = device_get_ivars(child); struct ppb_data *ppb = DEVTOSOFTC(bus); mtx_lock(ppb->ppc_lock); if (ppbdev != ih || ppb->ppc_irq_res != r) { mtx_unlock(ppb->ppc_lock); return (EINVAL); } ppbdev->intr_hook = NULL; mtx_unlock(ppb->ppc_lock); return (0); } /* * ppb_request_bus() * * Allocate the device to perform transfers. * * how : PPB_WAIT or PPB_DONTWAIT */ int ppb_request_bus(device_t bus, device_t dev, int how) { struct ppb_data *ppb = DEVTOSOFTC(bus); struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev); int error = 0; mtx_assert(ppb->ppc_lock, MA_OWNED); while (!error) { if (ppb->ppb_owner) { switch (how) { case PPB_WAIT | PPB_INTR: error = mtx_sleep(ppb, ppb->ppc_lock, PPBPRI | PCATCH, "ppbreq", 0); break; case PPB_WAIT | PPB_NOINTR: error = mtx_sleep(ppb, ppb->ppc_lock, PPBPRI, "ppbreq", 0); break; default: return (EWOULDBLOCK); } } else { ppb->ppb_owner = dev; /* restore the context of the device * The first time, ctx.valid is certainly false * then do not change anything. This is useful for * drivers that do not set there operating mode * during attachement */ if (ppbdev->ctx.valid) ppb_set_mode(bus, ppbdev->ctx.mode); return (0); } } return (error); } /* * ppb_release_bus() * * Release the device allocated with ppb_request_bus() */ int ppb_release_bus(device_t bus, device_t dev) { struct ppb_data *ppb = DEVTOSOFTC(bus); struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev); mtx_assert(ppb->ppc_lock, MA_OWNED); if (ppb->ppb_owner != dev) return (EACCES); /* save the context of the device */ ppbdev->ctx.mode = ppb_get_mode(bus); /* ok, now the context of the device is valid */ ppbdev->ctx.valid = 1; ppb->ppb_owner = 0; /* wakeup waiting processes */ wakeup(ppb); return (0); } static device_method_t ppbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, ppbus_probe), DEVMETHOD(device_attach, ppbus_attach), DEVMETHOD(device_detach, ppbus_detach), /* bus interface */ DEVMETHOD(bus_add_child, ppbus_add_child), DEVMETHOD(bus_print_child, ppbus_print_child), DEVMETHOD(bus_read_ivar, ppbus_read_ivar), DEVMETHOD(bus_write_ivar, ppbus_write_ivar), DEVMETHOD(bus_setup_intr, ppbus_setup_intr), DEVMETHOD(bus_teardown_intr, ppbus_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), { 0, 0 } }; static driver_t ppbus_driver = { "ppbus", ppbus_methods, sizeof(struct ppb_data), }; DRIVER_MODULE(ppbus, ppc, ppbus_driver, 0, 0); diff --git a/sys/dev/pwm/ofw_pwmbus.c b/sys/dev/pwm/ofw_pwmbus.c index 2d2c47291bca..6bc1a63ccde9 100644 --- a/sys/dev/pwm/ofw_pwmbus.c +++ b/sys/dev/pwm/ofw_pwmbus.c @@ -1,218 +1,218 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ian Lepore * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" struct ofw_pwmbus_ivars { struct pwmbus_ivars base; struct ofw_bus_devinfo devinfo; }; struct ofw_pwmbus_softc { struct pwmbus_softc base; }; /* * bus_if methods... */ static device_t ofw_pwmbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_pwmbus_ivars *ivars; if ((ivars = malloc(sizeof(struct ofw_pwmbus_ivars), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { return (NULL); } if ((child = device_add_child_ordered(dev, order, name, unit)) == NULL) { free(ivars, M_DEVBUF); return (NULL); } ivars->devinfo.obd_node = -1; device_set_ivars(child, ivars); return (child); } static void ofw_pwmbus_child_deleted(device_t dev, device_t child) { struct ofw_pwmbus_ivars *ivars; ivars = device_get_ivars(child); if (ivars != NULL) { ofw_bus_gen_destroy_devinfo(&ivars->devinfo); free(ivars, M_DEVBUF); } } static const struct ofw_bus_devinfo * ofw_pwmbus_get_devinfo(device_t bus, device_t dev) { struct ofw_pwmbus_ivars *ivars; ivars = device_get_ivars(dev); return (&ivars->devinfo); } /* * device_if methods... */ static int ofw_pwmbus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) { return (ENXIO); } device_set_desc(dev, "OFW PWM bus"); return (BUS_PROBE_DEFAULT); } static int ofw_pwmbus_attach(device_t dev) { struct ofw_pwmbus_softc *sc; struct ofw_pwmbus_ivars *ivars; phandle_t node; device_t child, parent; pcell_t chan; bool any_children; sc = device_get_softc(dev); sc->base.dev = dev; parent = device_get_parent(dev); if (PWMBUS_CHANNEL_COUNT(parent, &sc->base.nchannels) != 0 || sc->base.nchannels == 0) { device_printf(dev, "No channels on parent %s\n", device_get_nameunit(parent)); return (ENXIO); } /* * Attach the children found in the fdt node of the hardware controller. * Hardware controllers must implement the ofw_bus_get_node method so * that our call to ofw_bus_get_node() gets back the controller's node. */ any_children = false; node = ofw_bus_get_node(dev); for (node = OF_child(node); node != 0; node = OF_peer(node)) { /* * The child has to have a reg property; its value is the * channel number so range-check it. */ if (OF_getencprop(node, "reg", &chan, sizeof(chan)) == -1) continue; if (chan >= sc->base.nchannels) continue; if ((child = ofw_pwmbus_add_child(dev, 0, NULL, -1)) == NULL) continue; ivars = device_get_ivars(child); ivars->base.pi_channel = chan; /* Set up the standard ofw devinfo. */ if (ofw_bus_gen_setup_devinfo(&ivars->devinfo, node) != 0) { device_delete_child(dev, child); continue; } any_children = true; } /* * If we didn't find any children in the fdt data, add a pwmc(4) child * for each channel, like the base pwmbus does. The idea is that if * there is any fdt data, then we do exactly what it says and nothing * more, otherwise we just provide generic userland access to all the * pwm channels that exist like the base pwmbus's attach code does. */ if (!any_children) { for (chan = 0; chan < sc->base.nchannels; ++chan) { child = ofw_pwmbus_add_child(dev, 0, "pwmc", -1); if (child == NULL) { device_printf(dev, "failed to add pwmc child " " device for channel %u\n", chan); continue; } ivars = device_get_ivars(child); ivars->base.pi_channel = chan; } } bus_enumerate_hinted_children(dev); - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } static device_method_t ofw_pwmbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_pwmbus_probe), DEVMETHOD(device_attach, ofw_pwmbus_attach), /* Bus interface */ DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_pwmbus_add_child), DEVMETHOD(bus_child_deleted, ofw_pwmbus_child_deleted), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_pwmbus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(pwmbus, ofw_pwmbus_driver, ofw_pwmbus_methods, sizeof(struct pwmbus_softc), pwmbus_driver); EARLY_DRIVER_MODULE(ofw_pwmbus, pwm, ofw_pwmbus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ofw_pwmbus, 1); MODULE_DEPEND(ofw_pwmbus, pwmbus, 1, 1, 1); diff --git a/sys/dev/pwm/pwmbus.c b/sys/dev/pwm/pwmbus.c index e4feaa95e590..1ead8fd7a65d 100644 --- a/sys/dev/pwm/pwmbus.c +++ b/sys/dev/pwm/pwmbus.c @@ -1,281 +1,281 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" /* * bus_if methods... */ static device_t pwmbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct pwmbus_ivars *ivars; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); ivars = malloc(sizeof(struct pwmbus_ivars), M_DEVBUF, M_NOWAIT | M_ZERO); if (ivars == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, ivars); return (child); } static int pwmbus_child_location(device_t dev, device_t child, struct sbuf *sb) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); sbuf_printf(sb, "hwdev=%s channel=%u", device_get_nameunit(device_get_parent(dev)), ivars->pi_channel); return (0); } static void pwmbus_hinted_child(device_t dev, const char *dname, int dunit) { struct pwmbus_ivars *ivars; device_t child; child = pwmbus_add_child(dev, 0, dname, dunit); /* * If there is a channel hint, use it. Otherwise pi_channel was * initialized to zero, so that's the channel we'll use. */ ivars = device_get_ivars(child); resource_int_value(dname, dunit, "channel", &ivars->pi_channel); } static int pwmbus_print_child(device_t dev, device_t child) { struct pwmbus_ivars *ivars; int rv; ivars = device_get_ivars(child); rv = bus_print_child_header(dev, child); rv += printf(" channel %u", ivars->pi_channel); rv += bus_print_child_footer(dev, child); return (rv); } static void pwmbus_probe_nomatch(device_t dev, device_t child) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); if (ivars != NULL) device_printf(dev, " on channel %u\n", ivars->pi_channel); return; } static int pwmbus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); switch (which) { case PWMBUS_IVAR_CHANNEL: *(u_int *)result = ivars->pi_channel; break; default: return (EINVAL); } return (0); } /* * device_if methods... */ static int pwmbus_probe(device_t dev) { device_set_desc(dev, "PWM bus"); return (BUS_PROBE_GENERIC); } static int pwmbus_attach(device_t dev) { struct pwmbus_softc *sc; struct pwmbus_ivars *ivars; device_t child, parent; u_int chan; sc = device_get_softc(dev); sc->dev = dev; parent = device_get_parent(dev); if (PWMBUS_CHANNEL_COUNT(parent, &sc->nchannels) != 0 || sc->nchannels == 0) { device_printf(sc->dev, "No channels on parent %s\n", device_get_nameunit(parent)); return (ENXIO); } /* Add a pwmc(4) child for each channel. */ for (chan = 0; chan < sc->nchannels; ++chan) { if ((child = pwmbus_add_child(sc->dev, 0, "pwmc", -1)) == NULL) { device_printf(dev, "failed to add pwmc child device " "for channel %u\n", chan); continue; } ivars = device_get_ivars(child); ivars->pi_channel = chan; } bus_enumerate_hinted_children(dev); - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } static int pwmbus_detach(device_t dev) { int rv; if ((rv = bus_generic_detach(dev)) == 0) rv = device_delete_children(dev); return (rv); } /* * pwmbus_if methods... */ static int pwmbus_channel_config(device_t dev, u_int chan, u_int period, u_int duty) { return (PWMBUS_CHANNEL_CONFIG(device_get_parent(dev), chan, period, duty)); } static int pwmbus_channel_get_config(device_t dev, u_int chan, u_int *period, u_int *duty) { return (PWMBUS_CHANNEL_GET_CONFIG(device_get_parent(dev), chan, period, duty)); } static int pwmbus_channel_get_flags(device_t dev, u_int chan, uint32_t *flags) { return (PWMBUS_CHANNEL_GET_FLAGS(device_get_parent(dev), chan, flags)); } static int pwmbus_channel_enable(device_t dev, u_int chan, bool enable) { return (PWMBUS_CHANNEL_ENABLE(device_get_parent(dev), chan, enable)); } static int pwmbus_channel_set_flags(device_t dev, u_int chan, uint32_t flags) { return (PWMBUS_CHANNEL_SET_FLAGS(device_get_parent(dev), chan, flags)); } static int pwmbus_channel_is_enabled(device_t dev, u_int chan, bool *enable) { return (PWMBUS_CHANNEL_IS_ENABLED(device_get_parent(dev), chan, enable)); } static int pwmbus_channel_count(device_t dev, u_int *nchannel) { return (PWMBUS_CHANNEL_COUNT(device_get_parent(dev), nchannel)); } static device_method_t pwmbus_methods[] = { /* device_if */ DEVMETHOD(device_probe, pwmbus_probe), DEVMETHOD(device_attach, pwmbus_attach), DEVMETHOD(device_detach, pwmbus_detach), /* bus_if */ DEVMETHOD(bus_add_child, pwmbus_add_child), DEVMETHOD(bus_child_location, pwmbus_child_location), DEVMETHOD(bus_hinted_child, pwmbus_hinted_child), DEVMETHOD(bus_print_child, pwmbus_print_child), DEVMETHOD(bus_probe_nomatch, pwmbus_probe_nomatch), DEVMETHOD(bus_read_ivar, pwmbus_read_ivar), /* pwmbus_if */ DEVMETHOD(pwmbus_channel_count, pwmbus_channel_count), DEVMETHOD(pwmbus_channel_config, pwmbus_channel_config), DEVMETHOD(pwmbus_channel_get_config, pwmbus_channel_get_config), DEVMETHOD(pwmbus_channel_set_flags, pwmbus_channel_set_flags), DEVMETHOD(pwmbus_channel_get_flags, pwmbus_channel_get_flags), DEVMETHOD(pwmbus_channel_enable, pwmbus_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, pwmbus_channel_is_enabled), DEVMETHOD_END }; driver_t pwmbus_driver = { "pwmbus", pwmbus_methods, sizeof(struct pwmbus_softc), }; EARLY_DRIVER_MODULE(pwmbus, pwm, pwmbus_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(pwmbus, 1); diff --git a/sys/dev/sdhci/fsl_sdhci.c b/sys/dev/sdhci/fsl_sdhci.c index c3c1b6207a23..91a0daa3e3b4 100644 --- a/sys/dev/sdhci/fsl_sdhci.c +++ b/sys/dev/sdhci/fsl_sdhci.c @@ -1,1002 +1,1002 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include /* * SDHCI driver glue for Freescale i.MX SoC and QorIQ families. * * This supports both eSDHC (earlier SoCs) and uSDHC (more recent SoCs). */ #include "opt_mmccam.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __arm__ #include #include #endif #ifdef __powerpc__ #include #endif #include #include #include #include #include #include #include "mmcbr_if.h" #include "sdhci_if.h" struct fsl_sdhci_softc { device_t dev; struct resource * mem_res; struct resource * irq_res; void * intr_cookie; struct sdhci_slot slot; struct callout r1bfix_callout; sbintime_t r1bfix_timeout_at; struct sdhci_fdt_gpio * gpio; uint32_t baseclk_hz; uint32_t cmd_and_mode; uint32_t r1bfix_intmask; uint16_t sdclockreg_freq_bits; uint8_t r1bfix_type; uint8_t hwtype; bool slot_init_done; }; #define R1BFIX_NONE 0 /* No fix needed at next interrupt. */ #define R1BFIX_NODATA 1 /* Synthesize DATA_END for R1B w/o data. */ #define R1BFIX_AC12 2 /* Wait for busy after auto command 12. */ #define HWTYPE_NONE 0 /* Hardware not recognized/supported. */ #define HWTYPE_ESDHC 1 /* fsl5x and earlier. */ #define HWTYPE_USDHC 2 /* fsl6. */ /* * Freescale-specific registers, or in some cases the layout of bits within the * sdhci-defined register is different on Freescale. These names all begin with * SDHC_ (not SDHCI_). */ #define SDHC_WTMK_LVL 0x44 /* Watermark Level register. */ #define USDHC_MIX_CONTROL 0x48 /* Mix(ed) Control register. */ #define SDHC_VEND_SPEC 0xC0 /* Vendor-specific register. */ #define SDHC_VEND_FRC_SDCLK_ON (1 << 8) #define SDHC_VEND_IPGEN (1 << 11) #define SDHC_VEND_HCKEN (1 << 12) #define SDHC_VEND_PEREN (1 << 13) #define SDHC_PRES_STATE 0x24 #define SDHC_PRES_CIHB (1 << 0) #define SDHC_PRES_CDIHB (1 << 1) #define SDHC_PRES_DLA (1 << 2) #define SDHC_PRES_SDSTB (1 << 3) #define SDHC_PRES_IPGOFF (1 << 4) #define SDHC_PRES_HCKOFF (1 << 5) #define SDHC_PRES_PEROFF (1 << 6) #define SDHC_PRES_SDOFF (1 << 7) #define SDHC_PRES_WTA (1 << 8) #define SDHC_PRES_RTA (1 << 9) #define SDHC_PRES_BWEN (1 << 10) #define SDHC_PRES_BREN (1 << 11) #define SDHC_PRES_RTR (1 << 12) #define SDHC_PRES_CINST (1 << 16) #define SDHC_PRES_CDPL (1 << 18) #define SDHC_PRES_WPSPL (1 << 19) #define SDHC_PRES_CLSL (1 << 23) #define SDHC_PRES_DLSL_SHIFT 24 #define SDHC_PRES_DLSL_MASK (0xffU << SDHC_PRES_DLSL_SHIFT) #define SDHC_PROT_CTRL 0x28 #define SDHC_PROT_LED (1 << 0) #define SDHC_PROT_WIDTH_1BIT (0 << 1) #define SDHC_PROT_WIDTH_4BIT (1 << 1) #define SDHC_PROT_WIDTH_8BIT (2 << 1) #define SDHC_PROT_WIDTH_MASK (3 << 1) #define SDHC_PROT_D3CD (1 << 3) #define SDHC_PROT_EMODE_BIG (0 << 4) #define SDHC_PROT_EMODE_HALF (1 << 4) #define SDHC_PROT_EMODE_LITTLE (2 << 4) #define SDHC_PROT_EMODE_MASK (3 << 4) #define SDHC_PROT_SDMA (0 << 8) #define SDHC_PROT_ADMA1 (1 << 8) #define SDHC_PROT_ADMA2 (2 << 8) #define SDHC_PROT_ADMA264 (3 << 8) #define SDHC_PROT_DMA_MASK (3 << 8) #define SDHC_PROT_CDTL (1 << 6) #define SDHC_PROT_CDSS (1 << 7) #define SDHC_SYS_CTRL 0x2c /* * The clock enable bits exist in different registers for ESDHC vs USDHC, but * they are the same bits in both cases. The divisor values go into the * standard sdhci clock register, but in different bit positions and meanings than the sdhci spec values. */ #define SDHC_CLK_IPGEN (1 << 0) #define SDHC_CLK_HCKEN (1 << 1) #define SDHC_CLK_PEREN (1 << 2) #define SDHC_CLK_SDCLKEN (1 << 3) #define SDHC_CLK_ENABLE_MASK 0x0000000f #define SDHC_CLK_DIVISOR_MASK 0x000000f0 #define SDHC_CLK_DIVISOR_SHIFT 4 #define SDHC_CLK_PRESCALE_MASK 0x0000ff00 #define SDHC_CLK_PRESCALE_SHIFT 8 static struct ofw_compat_data compat_data[] = { {"fsl,imx6q-usdhc", HWTYPE_USDHC}, {"fsl,imx6sl-usdhc", HWTYPE_USDHC}, {"fsl,imx53-esdhc", HWTYPE_ESDHC}, {"fsl,imx51-esdhc", HWTYPE_ESDHC}, {"fsl,esdhc", HWTYPE_ESDHC}, {NULL, HWTYPE_NONE}, }; static uint16_t fsl_sdhc_get_clock(struct fsl_sdhci_softc *sc); static void fsl_sdhc_set_clock(struct fsl_sdhci_softc *sc, uint16_t val); static void fsl_sdhci_r1bfix_func(void *arg); static inline uint32_t RD4(struct fsl_sdhci_softc *sc, bus_size_t off) { return (bus_read_4(sc->mem_res, off)); } static inline void WR4(struct fsl_sdhci_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(sc->mem_res, off, val); } static uint8_t fsl_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct fsl_sdhci_softc *sc = device_get_softc(dev); uint32_t val32, wrk32; /* * Most of the things in the standard host control register are in the * hardware's wider protocol control register, but some of the bits are * moved around. */ if (off == SDHCI_HOST_CONTROL) { wrk32 = RD4(sc, SDHC_PROT_CTRL); val32 = wrk32 & (SDHCI_CTRL_LED | SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD); switch (wrk32 & SDHC_PROT_WIDTH_MASK) { case SDHC_PROT_WIDTH_1BIT: /* Value is already 0. */ break; case SDHC_PROT_WIDTH_4BIT: val32 |= SDHCI_CTRL_4BITBUS; break; case SDHC_PROT_WIDTH_8BIT: val32 |= SDHCI_CTRL_8BITBUS; break; } switch (wrk32 & SDHC_PROT_DMA_MASK) { case SDHC_PROT_SDMA: /* Value is already 0. */ break; case SDHC_PROT_ADMA1: /* This value is deprecated, should never appear. */ break; case SDHC_PROT_ADMA2: val32 |= SDHCI_CTRL_ADMA2; break; case SDHC_PROT_ADMA264: val32 |= SDHCI_CTRL_ADMA264; break; } return val32; } /* * XXX can't find the bus power on/off knob. For now we have to say the * power is always on and always set to the same voltage. */ if (off == SDHCI_POWER_CONTROL) { return (SDHCI_POWER_ON | SDHCI_POWER_300); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xff); } static uint16_t fsl_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct fsl_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; if (sc->hwtype == HWTYPE_USDHC) { /* * The USDHC hardware has nothing in the version register, but * it's v3 compatible with all our translation code. */ if (off == SDHCI_HOST_VERSION) { return (SDHCI_SPEC_300 << SDHCI_SPEC_VER_SHIFT); } /* * The USDHC hardware moved the transfer mode bits to the mixed * control register, fetch them from there. */ if (off == SDHCI_TRANSFER_MODE) return (RD4(sc, USDHC_MIX_CONTROL) & 0x37); } else if (sc->hwtype == HWTYPE_ESDHC) { /* * The ESDHC hardware has the typical 32-bit combined "command * and mode" register that we have to cache so that command * isn't written until after mode. On a read, just retrieve the * cached values last written. */ if (off == SDHCI_TRANSFER_MODE) { return (sc->cmd_and_mode & 0x0000ffff); } else if (off == SDHCI_COMMAND_FLAGS) { return (sc->cmd_and_mode >> 16); } } /* * This hardware only manages one slot. Synthesize a slot interrupt * status register... if there are any enabled interrupts active they * must be coming from our one and only slot. */ if (off == SDHCI_SLOT_INT_STATUS) { val32 = RD4(sc, SDHCI_INT_STATUS); val32 &= RD4(sc, SDHCI_SIGNAL_ENABLE); return (val32 ? 1 : 0); } /* * Clock bits are scattered into various registers which differ by * hardware type, complex enough to have their own function. */ if (off == SDHCI_CLOCK_CONTROL) { return (fsl_sdhc_get_clock(sc)); } return ((RD4(sc, off & ~3) >> (off & 3) * 8) & 0xffff); } static uint32_t fsl_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off) { struct fsl_sdhci_softc *sc = device_get_softc(dev); uint32_t val32, wrk32; val32 = RD4(sc, off); /* * The hardware leaves the base clock frequency out of the capabilities * register, but we filled it in by setting slot->max_clk at attach time * rather than here, because we can't represent frequencies above 63MHz * in an sdhci 2.0 capabliities register. The timeout clock is the same * as the active output sdclock; we indicate that with a quirk setting * so don't populate the timeout frequency bits. * * XXX Turn off (for now) features the hardware can do but this driver * doesn't yet handle (1.8v, suspend/resume, etc). */ if (off == SDHCI_CAPABILITIES) { val32 &= ~SDHCI_CAN_VDD_180; val32 &= ~SDHCI_CAN_DO_SUSPEND; val32 |= SDHCI_CAN_DO_8BITBUS; return (val32); } /* * The hardware moves bits around in the present state register to make * room for all 8 data line state bits. To translate, mask out all the * bits which are not in the same position in both registers (this also * masks out some Freescale-specific bits in locations defined as * reserved by sdhci), then shift the data line and retune request bits * down to their standard locations. */ if (off == SDHCI_PRESENT_STATE) { wrk32 = val32; val32 &= 0x000F0F07; val32 |= (wrk32 >> 4) & SDHCI_STATE_DAT_MASK; val32 |= (wrk32 >> 9) & SDHCI_RETUNE_REQUEST; return (val32); } /* * fsl_sdhci_intr() can synthesize a DATA_END interrupt following a * command with an R1B response, mix it into the hardware status. */ if (off == SDHCI_INT_STATUS) { return (val32 | sc->r1bfix_intmask); } return val32; } static void fsl_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct fsl_sdhci_softc *sc = device_get_softc(dev); bus_read_multi_4(sc->mem_res, off, data, count); } static void fsl_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint8_t val) { struct fsl_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* * Most of the things in the standard host control register are in the * hardware's wider protocol control register, but some of the bits are * moved around. */ if (off == SDHCI_HOST_CONTROL) { val32 = RD4(sc, SDHC_PROT_CTRL); val32 &= ~(SDHC_PROT_LED | SDHC_PROT_DMA_MASK | SDHC_PROT_WIDTH_MASK | SDHC_PROT_CDTL | SDHC_PROT_CDSS); val32 |= (val & SDHCI_CTRL_LED); if (val & SDHCI_CTRL_8BITBUS) val32 |= SDHC_PROT_WIDTH_8BIT; else val32 |= (val & SDHCI_CTRL_4BITBUS); val32 |= (val & (SDHCI_CTRL_SDMA | SDHCI_CTRL_ADMA2)) << 4; val32 |= (val & (SDHCI_CTRL_CARD_DET | SDHCI_CTRL_FORCE_CARD)); WR4(sc, SDHC_PROT_CTRL, val32); return; } /* XXX I can't find the bus power on/off knob; do nothing. */ if (off == SDHCI_POWER_CONTROL) { return; } #ifdef __powerpc__ /* XXX Reset doesn't seem to work as expected. Do nothing for now. */ if (off == SDHCI_SOFTWARE_RESET) return; #endif val32 = RD4(sc, off & ~3); val32 &= ~(0xff << (off & 3) * 8); val32 |= (val << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void fsl_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint16_t val) { struct fsl_sdhci_softc *sc = device_get_softc(dev); uint32_t val32; /* * The clock control stuff is complex enough to have its own function * that can handle the ESDHC versus USDHC differences. */ if (off == SDHCI_CLOCK_CONTROL) { fsl_sdhc_set_clock(sc, val); return; } /* * Figure out whether we need to check the DAT0 line for busy status at * interrupt time. The controller should be doing this, but for some * reason it doesn't. There are two cases: * - R1B response with no data transfer should generate a DATA_END (aka * TRANSFER_COMPLETE) interrupt after waiting for busy, but if * there's no data transfer there's no DATA_END interrupt. This is * documented; they seem to think it's a feature. * - R1B response after Auto-CMD12 appears to not work, even though * there's a control bit for it (bit 3) in the vendor register. * When we're starting a command that needs a manual DAT0 line check at * interrupt time, we leave ourselves a note in r1bfix_type so that we * can do the extra work in fsl_sdhci_intr(). */ if (off == SDHCI_COMMAND_FLAGS) { if (val & SDHCI_CMD_DATA) { const uint32_t MBAUTOCMD = SDHCI_TRNS_ACMD12 | SDHCI_TRNS_MULTI; val32 = RD4(sc, USDHC_MIX_CONTROL); if ((val32 & MBAUTOCMD) == MBAUTOCMD) sc->r1bfix_type = R1BFIX_AC12; } else { if ((val & SDHCI_CMD_RESP_MASK) == SDHCI_CMD_RESP_SHORT_BUSY) { WR4(sc, SDHCI_INT_ENABLE, slot->intmask | SDHCI_INT_RESPONSE); WR4(sc, SDHCI_SIGNAL_ENABLE, slot->intmask | SDHCI_INT_RESPONSE); sc->r1bfix_type = R1BFIX_NODATA; } } } /* * The USDHC hardware moved the transfer mode bits to mixed control; we * just write them there and we're done. The ESDHC hardware has the * typical combined cmd-and-mode register that allows only 32-bit * access, so when writing the mode bits just save them, then later when * writing the command bits, add in the saved mode bits. */ if (sc->hwtype == HWTYPE_USDHC) { if (off == SDHCI_TRANSFER_MODE) { val32 = RD4(sc, USDHC_MIX_CONTROL); val32 &= ~0x3f; val32 |= val & 0x37; // XXX acmd23 not supported here (or by sdhci driver) WR4(sc, USDHC_MIX_CONTROL, val32); return; } } else if (sc->hwtype == HWTYPE_ESDHC) { if (off == SDHCI_TRANSFER_MODE) { sc->cmd_and_mode = (sc->cmd_and_mode & 0xffff0000) | val; return; } else if (off == SDHCI_COMMAND_FLAGS) { sc->cmd_and_mode = (sc->cmd_and_mode & 0xffff) | (val << 16); WR4(sc, SDHCI_TRANSFER_MODE, sc->cmd_and_mode); return; } } val32 = RD4(sc, off & ~3); val32 &= ~(0xffff << (off & 3) * 8); val32 |= ((val & 0xffff) << (off & 3) * 8); WR4(sc, off & ~3, val32); } static void fsl_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t val) { struct fsl_sdhci_softc *sc = device_get_softc(dev); /* Clear synthesized interrupts, then pass the value to the hardware. */ if (off == SDHCI_INT_STATUS) { sc->r1bfix_intmask &= ~val; } WR4(sc, off, val); } static void fsl_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off, uint32_t *data, bus_size_t count) { struct fsl_sdhci_softc *sc = device_get_softc(dev); bus_write_multi_4(sc->mem_res, off, data, count); } static uint16_t fsl_sdhc_get_clock(struct fsl_sdhci_softc *sc) { uint16_t val; /* * Whenever the sdhci driver writes the clock register we save a * snapshot of just the frequency bits, so that we can play them back * here on a register read without recalculating the frequency from the * prescalar and divisor bits in the real register. We'll start with * those bits, and mix in the clock status and enable bits that come * from different places depending on which hardware we've got. */ val = sc->sdclockreg_freq_bits; /* * The internal clock is always enabled (actually, the hardware manages * it). Whether the internal clock is stable yet after a frequency * change comes from the present-state register on both hardware types. */ val |= SDHCI_CLOCK_INT_EN; if (RD4(sc, SDHC_PRES_STATE) & SDHC_PRES_SDSTB) val |= SDHCI_CLOCK_INT_STABLE; /* * On i.MX ESDHC hardware the card bus clock enable is in the usual * sdhci register but it's a different bit, so transcribe it (note the * difference between standard SDHCI_ and Freescale SDHC_ prefixes * here). On USDHC and QorIQ ESDHC hardware there is a force-on bit, but * no force-off for the card bus clock (the hardware runs the clock when * transfers are active no matter what), so we always say the clock is * on. * XXX Maybe we should say it's in whatever state the sdhci driver last * set it to. */ if (sc->hwtype == HWTYPE_ESDHC) { #ifdef __arm__ if (RD4(sc, SDHC_SYS_CTRL) & SDHC_CLK_SDCLKEN) #endif val |= SDHCI_CLOCK_CARD_EN; } else { val |= SDHCI_CLOCK_CARD_EN; } return (val); } static void fsl_sdhc_set_clock(struct fsl_sdhci_softc *sc, uint16_t val) { uint32_t divisor, freq, prescale, val32; val32 = RD4(sc, SDHCI_CLOCK_CONTROL); /* * Save the frequency-setting bits in SDHCI format so that we can play * them back in get_clock without complex decoding of hardware regs, * then deal with the freqency part of the value based on hardware type. */ sc->sdclockreg_freq_bits = val & SDHCI_DIVIDERS_MASK; if (sc->hwtype == HWTYPE_ESDHC) { /* * The i.MX5 ESDHC hardware requires the driver to manually * start and stop the sd bus clock. If the enable bit is not * set, turn off the clock in hardware and we're done, otherwise * decode the requested frequency. ESDHC hardware is sdhci 2.0; * the sdhci driver will use the original 8-bit divisor field * and the "base / 2^N" divisor scheme. */ if ((val & SDHCI_CLOCK_CARD_EN) == 0) { #ifdef __arm__ /* On QorIQ, this is a reserved bit. */ WR4(sc, SDHCI_CLOCK_CONTROL, val32 & ~SDHC_CLK_SDCLKEN); #endif return; } divisor = (val >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK; freq = sc->baseclk_hz >> ffs(divisor); } else { /* * The USDHC hardware provides only "force always on" control * over the sd bus clock, but no way to turn it off. (If a cmd * or data transfer is in progress the clock is on, otherwise it * is off.) If the clock is being disabled, we can just return * now, otherwise we decode the requested frequency. USDHC * hardware is sdhci 3.0; the sdhci driver will use a 10-bit * divisor using the "base / 2*N" divisor scheme. */ if ((val & SDHCI_CLOCK_CARD_EN) == 0) return; divisor = ((val >> SDHCI_DIVIDER_SHIFT) & SDHCI_DIVIDER_MASK) | ((val >> SDHCI_DIVIDER_HI_SHIFT) & SDHCI_DIVIDER_HI_MASK) << SDHCI_DIVIDER_MASK_LEN; if (divisor == 0) freq = sc->baseclk_hz; else freq = sc->baseclk_hz / (2 * divisor); } /* * Get a prescaler and final divisor to achieve the desired frequency. */ for (prescale = 2; freq < sc->baseclk_hz / (prescale * 16);) prescale <<= 1; for (divisor = 1; freq < sc->baseclk_hz / (prescale * divisor);) ++divisor; #ifdef DEBUG device_printf(sc->dev, "desired SD freq: %d, actual: %d; base %d prescale %d divisor %d\n", freq, sc->baseclk_hz / (prescale * divisor), sc->baseclk_hz, prescale, divisor); #endif /* * Adjust to zero-based values, and store them to the hardware. */ prescale >>= 1; divisor -= 1; val32 &= ~(SDHC_CLK_DIVISOR_MASK | SDHC_CLK_PRESCALE_MASK); val32 |= divisor << SDHC_CLK_DIVISOR_SHIFT; val32 |= prescale << SDHC_CLK_PRESCALE_SHIFT; val32 |= SDHC_CLK_IPGEN; WR4(sc, SDHCI_CLOCK_CONTROL, val32); } static boolean_t fsl_sdhci_r1bfix_is_wait_done(struct fsl_sdhci_softc *sc) { uint32_t inhibit; mtx_assert(&sc->slot.mtx, MA_OWNED); /* * Check the DAT0 line status using both the DLA (data line active) and * CDIHB (data inhibit) bits in the present state register. In theory * just DLA should do the trick, but in practice it takes both. If the * DAT0 line is still being held and we're not yet beyond the timeout * point, just schedule another callout to check again later. */ inhibit = RD4(sc, SDHC_PRES_STATE) & (SDHC_PRES_DLA | SDHC_PRES_CDIHB); if (inhibit && getsbinuptime() < sc->r1bfix_timeout_at) { callout_reset_sbt(&sc->r1bfix_callout, SBT_1MS, 0, fsl_sdhci_r1bfix_func, sc, 0); return (false); } /* * If we reach this point with the inhibit bits still set, we've got a * timeout, synthesize a DATA_TIMEOUT interrupt. Otherwise the DAT0 * line has been released, and we synthesize a DATA_END, and if the type * of fix needed was on a command-without-data we also now add in the * original INT_RESPONSE that we suppressed earlier. */ if (inhibit) sc->r1bfix_intmask |= SDHCI_INT_DATA_TIMEOUT; else { sc->r1bfix_intmask |= SDHCI_INT_DATA_END; if (sc->r1bfix_type == R1BFIX_NODATA) sc->r1bfix_intmask |= SDHCI_INT_RESPONSE; } sc->r1bfix_type = R1BFIX_NONE; return (true); } static void fsl_sdhci_r1bfix_func(void * arg) { struct fsl_sdhci_softc *sc = arg; boolean_t r1bwait_done; mtx_lock(&sc->slot.mtx); r1bwait_done = fsl_sdhci_r1bfix_is_wait_done(sc); mtx_unlock(&sc->slot.mtx); if (r1bwait_done) sdhci_generic_intr(&sc->slot); } static void fsl_sdhci_intr(void *arg) { struct fsl_sdhci_softc *sc = arg; uint32_t intmask; mtx_lock(&sc->slot.mtx); /* * Manually check the DAT0 line for R1B response types that the * controller fails to handle properly. The controller asserts the done * interrupt while the card is still asserting busy with the DAT0 line. * * We check DAT0 immediately because most of the time, especially on a * read, the card will actually be done by time we get here. If it's * not, then the wait_done routine will schedule a callout to re-check * periodically until it is done. In that case we clear the interrupt * out of the hardware now so that we can present it later when the DAT0 * line is released. * * If we need to wait for the DAT0 line to be released, we set up a * timeout point 250ms in the future. This number comes from the SD * spec, which allows a command to take that long. In the real world, * cards tend to take 10-20ms for a long-running command such as a write * or erase that spans two pages. */ switch (sc->r1bfix_type) { case R1BFIX_NODATA: intmask = RD4(sc, SDHCI_INT_STATUS) & SDHCI_INT_RESPONSE; break; case R1BFIX_AC12: intmask = RD4(sc, SDHCI_INT_STATUS) & SDHCI_INT_DATA_END; break; default: intmask = 0; break; } if (intmask) { sc->r1bfix_timeout_at = getsbinuptime() + 250 * SBT_1MS; if (!fsl_sdhci_r1bfix_is_wait_done(sc)) { WR4(sc, SDHCI_INT_STATUS, intmask); bus_barrier(sc->mem_res, SDHCI_INT_STATUS, 4, BUS_SPACE_BARRIER_WRITE); } } mtx_unlock(&sc->slot.mtx); sdhci_generic_intr(&sc->slot); } static int fsl_sdhci_get_ro(device_t bus, device_t child) { struct fsl_sdhci_softc *sc = device_get_softc(bus); return (sdhci_fdt_gpio_get_readonly(sc->gpio)); } static bool fsl_sdhci_get_card_present(device_t dev, struct sdhci_slot *slot) { struct fsl_sdhci_softc *sc = device_get_softc(dev); return (sdhci_fdt_gpio_get_present(sc->gpio)); } #ifdef __powerpc__ static uint32_t fsl_sdhci_get_platform_clock(device_t dev) { phandle_t node; uint32_t clock; node = ofw_bus_get_node(dev); /* Get sdhci node properties */ if((OF_getprop(node, "clock-frequency", (void *)&clock, sizeof(clock)) <= 0) || (clock == 0)) { clock = mpc85xx_get_system_clock(); if (clock == 0) { device_printf(dev,"Cannot acquire correct sdhci " "frequency from DTS.\n"); return (0); } } if (bootverbose) device_printf(dev, "Acquired clock: %d from DTS\n", clock); return (clock); } #endif static int fsl_sdhci_detach(device_t dev) { struct fsl_sdhci_softc *sc = device_get_softc(dev); if (sc->gpio != NULL) sdhci_fdt_gpio_teardown(sc->gpio); callout_drain(&sc->r1bfix_callout); if (sc->slot_init_done) sdhci_cleanup_slot(&sc->slot); if (sc->intr_cookie != NULL) bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); if (sc->mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); } return (0); } static int fsl_sdhci_attach(device_t dev) { struct fsl_sdhci_softc *sc = device_get_softc(dev); int rid, err; #ifdef __powerpc__ phandle_t node; uint32_t protctl; #endif sc->dev = dev; callout_init(&sc->r1bfix_callout, 1); sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (sc->hwtype == HWTYPE_NONE) panic("Impossible: not compatible in fsl_sdhci_attach()"); rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "cannot allocate memory window\n"); err = ENXIO; goto fail; } rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, fsl_sdhci_intr, sc, &sc->intr_cookie)) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } sc->slot.quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; /* * DMA is not really broken, I just haven't implemented it yet. */ sc->slot.quirks |= SDHCI_QUIRK_BROKEN_DMA; /* * Set the buffer watermark level to 128 words (512 bytes) for both read * and write. The hardware has a restriction that when the read or * write ready status is asserted, that means you can read exactly the * number of words set in the watermark register before you have to * re-check the status and potentially wait for more data. The main * sdhci driver provides no hook for doing status checking on less than * a full block boundary, so we set the watermark level to be a full * block. Reads and writes where the block size is less than the * watermark size will work correctly too, no need to change the * watermark for different size blocks. However, 128 is the maximum * allowed for the watermark, so PIO is limitted to 512 byte blocks * (which works fine for SD cards, may be a problem for SDIO some day). * * XXX need named constants for this stuff. */ /* P1022 has the '*_BRST_LEN' fields as reserved, always reading 0x10 */ if (ofw_bus_is_compatible(dev, "fsl,p1022-esdhc")) WR4(sc, SDHC_WTMK_LVL, 0x10801080); else WR4(sc, SDHC_WTMK_LVL, 0x08800880); /* * We read in native byte order in the main driver, but the register * defaults to little endian. */ #ifdef __powerpc__ sc->baseclk_hz = fsl_sdhci_get_platform_clock(dev); #else sc->baseclk_hz = imx_ccm_sdhci_hz(); #endif sc->slot.max_clk = sc->baseclk_hz; /* * Set up any gpio pin handling described in the FDT data. This cannot * fail; see comments in sdhci_fdt_gpio.h for details. */ sc->gpio = sdhci_fdt_gpio_setup(dev, &sc->slot); #ifdef __powerpc__ node = ofw_bus_get_node(dev); /* Default to big-endian on powerpc */ protctl = RD4(sc, SDHC_PROT_CTRL); protctl &= ~SDHC_PROT_EMODE_MASK; if (OF_hasprop(node, "little-endian")) protctl |= SDHC_PROT_EMODE_LITTLE; else protctl |= SDHC_PROT_EMODE_BIG; WR4(sc, SDHC_PROT_CTRL, protctl); #endif sdhci_init_slot(dev, &sc->slot, 0); sc->slot_init_done = true; - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sdhci_start_slot(&sc->slot); return (0); fail: fsl_sdhci_detach(dev); return (err); } static int fsl_sdhci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) { case HWTYPE_ESDHC: device_set_desc(dev, "Freescale eSDHC controller"); return (BUS_PROBE_DEFAULT); case HWTYPE_USDHC: device_set_desc(dev, "Freescale uSDHC controller"); return (BUS_PROBE_DEFAULT); default: break; } return (ENXIO); } static device_method_t fsl_sdhci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fsl_sdhci_probe), DEVMETHOD(device_attach, fsl_sdhci_attach), DEVMETHOD(device_detach, fsl_sdhci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar), DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar), /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios), DEVMETHOD(mmcbr_request, sdhci_generic_request), DEVMETHOD(mmcbr_get_ro, fsl_sdhci_get_ro), DEVMETHOD(mmcbr_acquire_host, sdhci_generic_acquire_host), DEVMETHOD(mmcbr_release_host, sdhci_generic_release_host), /* SDHCI accessors */ DEVMETHOD(sdhci_read_1, fsl_sdhci_read_1), DEVMETHOD(sdhci_read_2, fsl_sdhci_read_2), DEVMETHOD(sdhci_read_4, fsl_sdhci_read_4), DEVMETHOD(sdhci_read_multi_4, fsl_sdhci_read_multi_4), DEVMETHOD(sdhci_write_1, fsl_sdhci_write_1), DEVMETHOD(sdhci_write_2, fsl_sdhci_write_2), DEVMETHOD(sdhci_write_4, fsl_sdhci_write_4), DEVMETHOD(sdhci_write_multi_4, fsl_sdhci_write_multi_4), DEVMETHOD(sdhci_get_card_present,fsl_sdhci_get_card_present), DEVMETHOD_END }; static driver_t fsl_sdhci_driver = { "sdhci_fsl", fsl_sdhci_methods, sizeof(struct fsl_sdhci_softc), }; DRIVER_MODULE(sdhci_fsl, simplebus, fsl_sdhci_driver, NULL, NULL); SDHCI_DEPEND(sdhci_fsl); #ifndef MMCCAM MMC_DECLARE_BRIDGE(sdhci_fsl); #endif diff --git a/sys/dev/smbus/smbus.c b/sys/dev/smbus/smbus.c index b0983fa32c30..1ba824b69b53 100644 --- a/sys/dev/smbus/smbus.c +++ b/sys/dev/smbus/smbus.c @@ -1,248 +1,248 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #include "bus_if.h" struct smbus_ivar { uint8_t addr; }; /* * Autoconfiguration and support routines for System Management bus */ static int smbus_probe(device_t dev) { device_set_desc(dev, "System Management Bus"); return (0); } static int smbus_attach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); mtx_init(&sc->lock, device_get_nameunit(dev), "smbus", MTX_DEF); - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int smbus_detach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); device_delete_children(dev); mtx_destroy(&sc->lock); return (0); } void smbus_generic_intr(device_t dev, u_char devaddr, char low, char high, int err) { } static device_t smbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct smbus_ivar *devi; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct smbus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, devi); return (child); } static void smbus_child_deleted(device_t dev, device_t child) { free(device_get_ivars(child), M_DEVBUF); } static void smbus_hinted_child(device_t bus, const char *dname, int dunit) { struct smbus_ivar *devi; device_t child; int addr; addr = 0; resource_int_value(dname, dunit, "addr", &addr); if (addr > UINT8_MAX) { device_printf(bus, "ignored incorrect slave address hint 0x%x" " for %s%d\n", addr, dname, dunit); return; } child = BUS_ADD_CHILD(bus, SMBUS_ORDER_HINTED, dname, dunit); if (child == NULL) return; devi = device_get_ivars(child); devi->addr = addr; } static int smbus_child_location(device_t parent, device_t child, struct sbuf *sb) { struct smbus_ivar *devi; devi = device_get_ivars(child); if (devi->addr != 0) sbuf_printf(sb, "addr=0x%x", devi->addr); return (0); } static int smbus_print_child(device_t parent, device_t child) { struct smbus_ivar *devi; int retval; devi = device_get_ivars(child); retval = bus_print_child_header(parent, child); if (devi->addr != 0) retval += printf(" at addr 0x%x", devi->addr); retval += bus_print_child_footer(parent, child); return (retval); } static int smbus_read_ivar(device_t parent, device_t child, int which, uintptr_t *result) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: if (devi->addr != 0) *result = devi->addr; else *result = -1; break; default: return (ENOENT); } return (0); } static int smbus_write_ivar(device_t parent, device_t child, int which, uintptr_t value) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: /* Allow to set but no change the slave address. */ if (devi->addr != 0) return (EINVAL); devi->addr = value; break; default: return (ENOENT); } return (0); } static void smbus_probe_nomatch(device_t bus, device_t child) { struct smbus_ivar *devi = device_get_ivars(child); /* * Ignore (self-identified) devices without a slave address set. * For example, smb(4). */ if (devi->addr != 0) device_printf(bus, " at addr %#x\n", devi->addr); } /* * Device methods */ static device_method_t smbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, smbus_probe), DEVMETHOD(device_attach, smbus_attach), DEVMETHOD(device_detach, smbus_detach), /* bus interface */ DEVMETHOD(bus_add_child, smbus_add_child), DEVMETHOD(bus_child_deleted, smbus_child_deleted), DEVMETHOD(bus_hinted_child, smbus_hinted_child), DEVMETHOD(bus_probe_nomatch, smbus_probe_nomatch), DEVMETHOD(bus_child_location, smbus_child_location), DEVMETHOD(bus_print_child, smbus_print_child), DEVMETHOD(bus_read_ivar, smbus_read_ivar), DEVMETHOD(bus_write_ivar, smbus_write_ivar), DEVMETHOD_END }; driver_t smbus_driver = { "smbus", smbus_methods, sizeof(struct smbus_softc), }; MODULE_VERSION(smbus, SMBUS_MODVER); diff --git a/sys/dev/spibus/ofw_spibus.c b/sys/dev/spibus/ofw_spibus.c index 57a5f562c9b3..caa36644dc80 100644 --- a/sys/dev/spibus/ofw_spibus.c +++ b/sys/dev/spibus/ofw_spibus.c @@ -1,243 +1,243 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009, Nathan Whitehorn * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" struct ofw_spibus_devinfo { struct spibus_ivar opd_dinfo; struct ofw_bus_devinfo opd_obdinfo; }; /* Methods */ static device_probe_t ofw_spibus_probe; static device_attach_t ofw_spibus_attach; static device_t ofw_spibus_add_child(device_t dev, u_int order, const char *name, int unit); static const struct ofw_bus_devinfo *ofw_spibus_get_devinfo(device_t bus, device_t dev); static int ofw_spibus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW SPI bus"); return (BUS_PROBE_DEFAULT + 1); } static int ofw_spibus_attach(device_t dev) { struct spibus_softc *sc = device_get_softc(dev); struct ofw_spibus_devinfo *dinfo; phandle_t child; pcell_t clock, paddr; device_t childdev; uint32_t mode = SPIBUS_MODE_NONE; sc->dev = dev; - bus_generic_probe(dev); + bus_identify_children(dev); bus_enumerate_hinted_children(dev); /* * Attach those children represented in the device tree. */ for (child = OF_child(ofw_bus_get_node(dev)); child != 0; child = OF_peer(child)) { /* * Try to get the CS number first from the spi-chipselect * property, then try the reg property. */ if (OF_getencprop(child, "spi-chipselect", &paddr, sizeof(paddr)) == -1) { if (OF_getencprop(child, "reg", &paddr, sizeof(paddr)) == -1) continue; } /* * Try to get the cpol/cpha mode */ if (OF_hasprop(child, "spi-cpol")) mode = SPIBUS_MODE_CPOL; if (OF_hasprop(child, "spi-cpha")) { if (mode == SPIBUS_MODE_CPOL) mode = SPIBUS_MODE_CPOL_CPHA; else mode = SPIBUS_MODE_CPHA; } /* * Try to get the CS polarity */ if (OF_hasprop(child, "spi-cs-high")) paddr |= SPIBUS_CS_HIGH; /* * Get the maximum clock frequency for device, zero means * use the default bus speed. * * XXX Note that the current (2018-04-07) dts bindings say that * spi-max-frequency is a required property (but says nothing of * how to interpret a value of zero). */ if (OF_getencprop(child, "spi-max-frequency", &clock, sizeof(clock)) == -1) clock = 0; /* * Now set up the SPI and OFW bus layer devinfo and add it * to the bus. */ dinfo = malloc(sizeof(struct ofw_spibus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) continue; dinfo->opd_dinfo.cs = paddr; dinfo->opd_dinfo.clock = clock; dinfo->opd_dinfo.mode = mode; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } childdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); resource_list_init(&dinfo->opd_dinfo.rl); ofw_bus_intr_to_rl(childdev, child, &dinfo->opd_dinfo.rl, NULL); device_set_ivars(childdev, dinfo); } return (bus_generic_attach(dev)); } static device_t ofw_spibus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_spibus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_spibus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static void ofw_spibus_child_deleted(device_t dev, device_t child) { free(device_get_ivars(child), M_DEVBUF); } static const struct ofw_bus_devinfo * ofw_spibus_get_devinfo(device_t bus, device_t dev) { struct ofw_spibus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } static struct resource_list * ofw_spibus_get_resource_list(device_t bus __unused, device_t child) { struct spibus_ivar *devi; devi = SPIBUS_IVAR(child); return (&devi->rl); } static device_method_t ofw_spibus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_spibus_probe), DEVMETHOD(device_attach, ofw_spibus_attach), /* Bus interface */ DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_spibus_add_child), DEVMETHOD(bus_child_deleted, ofw_spibus_child_deleted), DEVMETHOD(bus_get_resource_list, ofw_spibus_get_resource_list), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_spibus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_1(spibus, ofw_spibus_driver, ofw_spibus_methods, sizeof(struct spibus_softc), spibus_driver); DRIVER_MODULE(ofw_spibus, spi, ofw_spibus_driver, 0, 0); MODULE_VERSION(ofw_spibus, 1); MODULE_DEPEND(ofw_spibus, spibus, 1, 1, 1); diff --git a/sys/dev/sram/mmio_sram.c b/sys/dev/sram/mmio_sram.c index c1d024459650..b9a104760194 100644 --- a/sys/dev/sram/mmio_sram.c +++ b/sys/dev/sram/mmio_sram.c @@ -1,161 +1,161 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2022 Ruslan Bukin * * This work was supported by Innovate UK project 105694, "Digital Security * by Design (DSbD) Technology Platform Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "mmio_sram_if.h" #define dprintf(fmt, ...) static struct resource_spec mmio_sram_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; struct mmio_sram_softc { struct simplebus_softc simplebus_sc; struct resource *res[1]; device_t dev; }; static int mmio_sram_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "mmio-sram")) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); device_set_desc(dev, "MMIO SRAM"); return (BUS_PROBE_DEFAULT); } static int mmio_sram_attach(device_t dev) { struct mmio_sram_softc *sc; phandle_t node; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, mmio_sram_spec, sc->res) != 0) { device_printf(dev, "Can't allocate resources for device.\n"); return (ENXIO); } node = ofw_bus_get_node(dev); if (node == -1) return (ENXIO); simplebus_init(dev, node); /* * Allow devices to identify. */ - bus_generic_probe(dev); + bus_identify_children(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (bus_generic_attach(dev)); } static int mmio_sram_detach(device_t dev) { struct mmio_sram_softc *sc; sc = device_get_softc(dev); bus_release_resources(dev, mmio_sram_spec, sc->res); return (0); } static uint8_t mmio_sram_read_1(device_t dev, bus_size_t offset) { struct mmio_sram_softc *sc; sc = device_get_softc(dev); dprintf("%s: reading from %lx\n", __func__, offset); return (bus_read_1(sc->res[0], offset)); } static void mmio_sram_write_1(device_t dev, bus_size_t offset, uint8_t val) { struct mmio_sram_softc *sc; sc = device_get_softc(dev); dprintf("%s: writing to %lx val %x\n", __func__, offset, val); bus_write_1(sc->res[0], offset, val); } static device_method_t mmio_sram_methods[] = { /* Device Interface */ DEVMETHOD(device_probe, mmio_sram_probe), DEVMETHOD(device_attach, mmio_sram_attach), DEVMETHOD(device_detach, mmio_sram_detach), /* MMIO interface */ DEVMETHOD(mmio_sram_read_1, mmio_sram_read_1), DEVMETHOD(mmio_sram_write_1, mmio_sram_write_1), DEVMETHOD_END }; DEFINE_CLASS_1(mmio_sram, mmio_sram_driver, mmio_sram_methods, sizeof(struct mmio_sram_softc), simplebus_driver); EARLY_DRIVER_MODULE(mmio_sram, simplebus, mmio_sram_driver, 0, 0, BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(mmio_sram, 1); diff --git a/sys/dev/superio/superio.c b/sys/dev/superio/superio.c index 7dbd7d30907a..fe049a5d5419 100644 --- a/sys/dev/superio/superio.c +++ b/sys/dev/superio/superio.c @@ -1,1121 +1,1121 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Andriy Gapon * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "isa_if.h" typedef void (*sio_conf_enter_f)(struct resource*, uint16_t); typedef void (*sio_conf_exit_f)(struct resource*, uint16_t); struct sio_conf_methods { sio_conf_enter_f enter; sio_conf_exit_f exit; superio_vendor_t vendor; }; struct sio_device { uint8_t ldn; superio_dev_type_t type; }; struct superio_devinfo { STAILQ_ENTRY(superio_devinfo) link; struct resource_list resources; device_t dev; uint8_t ldn; superio_dev_type_t type; uint16_t iobase; uint16_t iobase2; uint8_t irq; uint8_t dma; }; struct siosc { struct mtx conf_lock; STAILQ_HEAD(, superio_devinfo) devlist; struct resource* io_res; struct cdev *chardev; int io_rid; uint16_t io_port; const struct sio_conf_methods *methods; const struct sio_device *known_devices; superio_vendor_t vendor; uint16_t devid; uint8_t revid; int extid; uint8_t current_ldn; uint8_t ldn_reg; uint8_t enable_reg; }; static d_ioctl_t superio_ioctl; static struct cdevsw superio_cdevsw = { .d_version = D_VERSION, .d_ioctl = superio_ioctl, .d_name = "superio", }; #define NUMPORTS 2 static uint8_t sio_read(struct resource* res, uint8_t reg) { bus_write_1(res, 0, reg); return (bus_read_1(res, 1)); } /* Read a word from two one-byte registers, big endian. */ static uint16_t sio_readw(struct resource* res, uint8_t reg) { uint16_t v; v = sio_read(res, reg); v <<= 8; v |= sio_read(res, reg + 1); return (v); } static void sio_write(struct resource* res, uint8_t reg, uint8_t val) { bus_write_1(res, 0, reg); bus_write_1(res, 1, val); } static void sio_ldn_select(struct siosc *sc, uint8_t ldn) { mtx_assert(&sc->conf_lock, MA_OWNED); if (ldn == sc->current_ldn) return; sio_write(sc->io_res, sc->ldn_reg, ldn); sc->current_ldn = ldn; } static uint8_t sio_ldn_read(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_read(sc->io_res, reg)); } static uint16_t sio_ldn_readw(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_readw(sc->io_res, reg)); } static void sio_ldn_write(struct siosc *sc, uint8_t ldn, uint8_t reg, uint8_t val) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg <= sc->ldn_reg) { printf("ignored attempt to write special register 0x%x\n", reg); return; } sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); sio_write(sc->io_res, reg, val); } static void sio_conf_enter(struct siosc *sc) { mtx_lock(&sc->conf_lock); sc->methods->enter(sc->io_res, sc->io_port); } static void sio_conf_exit(struct siosc *sc) { sc->methods->exit(sc->io_res, sc->io_port); sc->current_ldn = 0xff; mtx_unlock(&sc->conf_lock); } static void ite_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x01); bus_write_1(res, 0, 0x55); bus_write_1(res, 0, port == 0x2e ? 0x55 : 0xaa); } static void ite_conf_exit(struct resource* res, uint16_t port) { sio_write(res, 0x02, 0x02); } static const struct sio_conf_methods ite_conf_methods = { .enter = ite_conf_enter, .exit = ite_conf_exit, .vendor = SUPERIO_VENDOR_ITE }; static void nvt_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void nvt_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods nvt_conf_methods = { .enter = nvt_conf_enter, .exit = nvt_conf_exit, .vendor = SUPERIO_VENDOR_NUVOTON }; static void fintek_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void fintek_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods fintek_conf_methods = { .enter = fintek_conf_enter, .exit = fintek_conf_exit, .vendor = SUPERIO_VENDOR_FINTEK }; static const struct sio_conf_methods * const methods_table[] = { &ite_conf_methods, &nvt_conf_methods, &fintek_conf_methods, NULL }; static const uint16_t ports_table[] = { 0x2e, 0x4e, 0 }; const struct sio_device ite_devices[] = { { .ldn = 4, .type = SUPERIO_DEV_HWM }, { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device w83627_devices[] = { { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .ldn = 9, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nvt_devices[] = { { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct5104_devices[] = { { .ldn = 7, .type = SUPERIO_DEV_GPIO }, { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .ldn = 15, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct5585_devices[] = { { .ldn = 9, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct611x_devices[] = { { .ldn = 0x7, .type = SUPERIO_DEV_GPIO }, { .ldn = 0x8, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct67xx_devices[] = { { .ldn = 0x8, .type = SUPERIO_DEV_WDT }, { .ldn = 0x9, .type = SUPERIO_DEV_GPIO }, { .ldn = 0xb, .type = SUPERIO_DEV_HWM }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device fintek_devices[] = { { .ldn = 6, .type = SUPERIO_DEV_GPIO }, { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; static const struct { superio_vendor_t vendor; uint16_t devid; uint16_t mask; int extid; /* Extra ID: used to handle conflicting devid. */ const char *descr; const struct sio_device *devices; } superio_table[] = { { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8613, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8712, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8716, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8718, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8720, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8721, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8726, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8728, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8771, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x1061, .mask = 0x00, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. A)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5200, .mask = 0xff, .descr = "Winbond 83627HF/F/HG/G", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5900, .mask = 0xff, .descr = "Winbond 83627S", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6000, .mask = 0xff, .descr = "Winbond 83697HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6800, .mask = 0xff, .descr = "Winbond 83697UG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x7000, .mask = 0xff, .descr = "Winbond 83637HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8200, .mask = 0xff, .descr = "Winbond 83627THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8500, .mask = 0xff, .descr = "Winbond 83687THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8800, .mask = 0xff, .descr = "Winbond 83627EHF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa000, .mask = 0xff, .descr = "Winbond 83627DHG", .devices = w83627_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa200, .mask = 0xff, .descr = "Winbond 83627UHG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa500, .mask = 0xff, .descr = "Winbond 83667HG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb000, .mask = 0xff, .descr = "Winbond 83627DHG-P", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb300, .mask = 0xff, .descr = "Winbond 83667HG-B", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb400, .mask = 0xff, .descr = "Nuvoton NCT6775", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc300, .mask = 0xff, .descr = "Nuvoton NCT6776", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc400, .mask = 0xff, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. B+)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc500, .mask = 0xff, .descr = "Nuvoton NCT6779D", .devices = nct67xx_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd42a, .extid = 1, .descr = "Nuvoton NCT6796D-E", .devices = nct67xx_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd42a, .extid = 2, .descr = "Nuvoton NCT5585D", .devices = nct5585_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc800, .mask = 0xff, .descr = "Nuvoton NCT6791", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc900, .mask = 0xff, .descr = "Nuvoton NCT6792", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd100, .mask = 0xff, .descr = "Nuvoton NCT6793", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd200, .mask = 0xff, .descr = "Nuvoton NCT6112D/NCT6114D/NCT6116D", .devices = nct611x_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd300, .mask = 0xff, .descr = "Nuvoton NCT6795", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_FINTEK, .devid = 0x1210, .mask = 0xff, .descr = "Fintek F81803", .devices = fintek_devices, }, { .vendor = SUPERIO_VENDOR_FINTEK, .devid = 0x0704, .descr = "Fintek F81865", .devices = fintek_devices, }, { 0, 0 } }; static const char * devtype_to_str(superio_dev_type_t type) { switch (type) { case SUPERIO_DEV_NONE: return ("none"); case SUPERIO_DEV_HWM: return ("HWM"); case SUPERIO_DEV_WDT: return ("WDT"); case SUPERIO_DEV_GPIO: return ("GPIO"); case SUPERIO_DEV_MAX: return ("invalid"); } return ("invalid"); } static int superio_detect(device_t dev, bool claim, struct siosc *sc) { struct resource *res; rman_res_t port; rman_res_t count; uint16_t devid; uint8_t revid; int error; int rid; int i, m; int prefer; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &port, &count); if (error != 0) return (error); if (port > UINT16_MAX || count < NUMPORTS) { device_printf(dev, "unexpected I/O range size\n"); return (ENXIO); } /* * Make a temporary resource reservation for hardware probing. * If we can't get the resources we need then * we need to abort. Possibly this indicates * the resources were used by another device * in which case the probe would have failed anyhow. */ rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { if (claim) device_printf(dev, "failed to allocate I/O resource\n"); return (ENXIO); } prefer = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer", &prefer); if (bootverbose && prefer > 0) device_printf(dev, "prefer extid %d\n", prefer); for (m = 0; methods_table[m] != NULL; m++) { methods_table[m]->enter(res, port); if (methods_table[m]->vendor == SUPERIO_VENDOR_ITE) { devid = sio_readw(res, 0x20); revid = sio_read(res, 0x22); } else if (methods_table[m]->vendor == SUPERIO_VENDOR_NUVOTON) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else if (methods_table[m]->vendor == SUPERIO_VENDOR_FINTEK) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else { continue; } methods_table[m]->exit(res, port); for (i = 0; superio_table[i].vendor != 0; i++) { uint16_t mask; mask = superio_table[i].mask; if (superio_table[i].vendor != methods_table[m]->vendor) continue; if ((superio_table[i].devid & ~mask) != (devid & ~mask)) continue; if (prefer > 0 && prefer != superio_table[i].extid) continue; break; } /* Found a matching SuperIO entry. */ if (superio_table[i].vendor != 0) break; } if (methods_table[m] == NULL) error = ENXIO; else error = 0; if (!claim || error != 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, res); return (error); } sc->methods = methods_table[m]; sc->vendor = sc->methods->vendor; sc->known_devices = superio_table[i].devices; sc->io_res = res; sc->io_rid = rid; sc->io_port = port; sc->devid = devid; sc->revid = revid; sc->extid = superio_table[i].extid; KASSERT(sc->vendor == SUPERIO_VENDOR_ITE || sc->vendor == SUPERIO_VENDOR_NUVOTON || sc->vendor == SUPERIO_VENDOR_FINTEK, ("Only ITE, Nuvoton and Fintek SuperIO-s are supported")); sc->ldn_reg = 0x07; sc->enable_reg = 0x30; /* FIXME enable_reg not used by nctgpio(4). */ sc->current_ldn = 0xff; /* no device should have this */ if (superio_table[i].descr != NULL) { device_set_desc(dev, superio_table[i].descr); } else if (sc->vendor == SUPERIO_VENDOR_ITE) { device_set_descf(dev, "ITE IT%4x SuperIO (revision 0x%02x)", sc->devid, sc->revid); } return (0); } static void superio_identify(driver_t *driver, device_t parent) { device_t child; int i; /* * Don't create child devices if any already exist. * Those could be created via isa hints or if this * driver is loaded, unloaded and then loaded again. */ if (device_find_child(parent, "superio", -1)) { if (bootverbose) printf("superio: device(s) already created\n"); return; } /* * Create a child for each candidate port. * It would be nice if we could somehow clean up those * that this driver fails to probe. */ for (i = 0; ports_table[i] != 0; i++) { child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "superio", -1); if (child == NULL) { device_printf(parent, "failed to add superio child\n"); continue; } bus_set_resource(child, SYS_RES_IOPORT, 0, ports_table[i], 2); if (superio_detect(child, false, NULL) != 0) device_delete_child(parent, child); } } static int superio_probe(device_t dev) { struct siosc *sc; int error; /* Make sure we do not claim some ISA PNP device. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* * XXX We can populate the softc now only because we return * BUS_PROBE_SPECIFIC */ sc = device_get_softc(dev); error = superio_detect(dev, true, sc); if (error != 0) return (error); return (BUS_PROBE_SPECIFIC); } static void superio_add_known_child(device_t dev, superio_dev_type_t type, uint8_t ldn) { struct siosc *sc = device_get_softc(dev); struct superio_devinfo *dinfo; device_t child; child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "failed to add child for ldn %d, type %s\n", ldn, devtype_to_str(type)); return; } dinfo = device_get_ivars(child); dinfo->ldn = ldn; dinfo->type = type; sio_conf_enter(sc); dinfo->iobase = sio_ldn_readw(sc, ldn, 0x60); dinfo->iobase2 = sio_ldn_readw(sc, ldn, 0x62); dinfo->irq = sio_ldn_readw(sc, ldn, 0x70); dinfo->dma = sio_ldn_readw(sc, ldn, 0x74); sio_conf_exit(sc); STAILQ_INSERT_TAIL(&sc->devlist, dinfo, link); } static int superio_attach(device_t dev) { struct siosc *sc = device_get_softc(dev); int i; mtx_init(&sc->conf_lock, device_get_nameunit(dev), "superio", MTX_DEF); STAILQ_INIT(&sc->devlist); for (i = 0; sc->known_devices[i].type != SUPERIO_DEV_NONE; i++) { superio_add_known_child(dev, sc->known_devices[i].type, sc->known_devices[i].ldn); } - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); sc->chardev = make_dev(&superio_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "superio%d", device_get_unit(dev)); if (sc->chardev == NULL) device_printf(dev, "failed to create character device\n"); else sc->chardev->si_drv1 = sc; return (0); } static int superio_detach(device_t dev) { struct siosc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error != 0) return (error); if (sc->chardev != NULL) destroy_dev(sc->chardev); device_delete_children(dev); bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); mtx_destroy(&sc->conf_lock); return (0); } static device_t superio_add_child(device_t dev, u_int order, const char *name, int unit) { struct superio_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->ldn = 0xff; dinfo->type = SUPERIO_DEV_NONE; dinfo->dev = child; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } static void superio_child_deleted(device_t dev, device_t child) { struct superio_devinfo *dinfo; dinfo = device_get_ivars(child); if (dinfo == NULL) return; resource_list_free(&dinfo->resources); free(dinfo, M_DEVBUF); } static int superio_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct superio_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case SUPERIO_IVAR_LDN: *result = dinfo->ldn; break; case SUPERIO_IVAR_TYPE: *result = dinfo->type; break; case SUPERIO_IVAR_IOBASE: *result = dinfo->iobase; break; case SUPERIO_IVAR_IOBASE2: *result = dinfo->iobase2; break; case SUPERIO_IVAR_IRQ: *result = dinfo->irq; break; case SUPERIO_IVAR_DMA: *result = dinfo->dma; break; default: return (ENOENT); } return (0); } static int superio_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case SUPERIO_IVAR_LDN: case SUPERIO_IVAR_TYPE: case SUPERIO_IVAR_IOBASE: case SUPERIO_IVAR_IOBASE2: case SUPERIO_IVAR_IRQ: case SUPERIO_IVAR_DMA: return (EINVAL); default: return (ENOENT); } } static struct resource_list * superio_get_resource_list(device_t dev, device_t child) { struct superio_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int superio_printf(struct superio_devinfo *dinfo, const char *fmt, ...) { va_list ap; int retval; retval = printf("superio:%s@ldn%0x2x: ", devtype_to_str(dinfo->type), dinfo->ldn); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static void superio_child_detached(device_t dev, device_t child) { struct superio_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) superio_printf(dinfo, "Device leaked IRQ resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) superio_printf(dinfo, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) superio_printf(dinfo, "Device leaked I/O resources\n"); } static int superio_child_location(device_t parent, device_t child, struct sbuf *sb) { uint8_t ldn; ldn = superio_get_ldn(child); sbuf_printf(sb, "ldn=0x%02x", ldn); return (0); } static int superio_child_pnp(device_t parent, device_t child, struct sbuf *sb) { superio_dev_type_t type; type = superio_get_type(child); sbuf_printf(sb, "type=%s", devtype_to_str(type)); return (0); } static int superio_print_child(device_t parent, device_t child) { superio_dev_type_t type; uint8_t ldn; int retval; ldn = superio_get_ldn(child); type = superio_get_type(child); retval = bus_print_child_header(parent, child); retval += printf(" at %s ldn 0x%02x", devtype_to_str(type), ldn); retval += bus_print_child_footer(parent, child); return (retval); } superio_vendor_t superio_vendor(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->vendor); } uint16_t superio_devid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->devid); } uint8_t superio_revid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->revid); } int superio_extid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->extid); } uint8_t superio_ldn_read(device_t dev, uint8_t ldn, uint8_t reg) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); uint8_t v; sio_conf_enter(sc); v = sio_ldn_read(sc, ldn, reg); sio_conf_exit(sc); return (v); } uint8_t superio_read(device_t dev, uint8_t reg) { struct superio_devinfo *dinfo = device_get_ivars(dev); return (superio_ldn_read(dev, dinfo->ldn, reg)); } void superio_ldn_write(device_t dev, uint8_t ldn, uint8_t reg, uint8_t val) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); sio_conf_enter(sc); sio_ldn_write(sc, ldn, reg, val); sio_conf_exit(sc); } void superio_write(device_t dev, uint8_t reg, uint8_t val) { struct superio_devinfo *dinfo = device_get_ivars(dev); return (superio_ldn_write(dev, dinfo->ldn, reg, val)); } bool superio_dev_enabled(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return (true); v = superio_read(dev, sc->enable_reg); /* FIXME enable_reg not used by nctgpio(4). */ return ((v & mask) != 0); } void superio_dev_enable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v |= mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } void superio_dev_disable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v &= ~mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } device_t superio_find_dev(device_t superio, superio_dev_type_t type, int ldn) { struct siosc *sc = device_get_softc(superio); struct superio_devinfo *dinfo; if (ldn < -1 || ldn > UINT8_MAX) return (NULL); /* ERANGE */ if (type == SUPERIO_DEV_NONE && ldn == -1) return (NULL); /* EINVAL */ STAILQ_FOREACH(dinfo, &sc->devlist, link) { if (ldn != -1 && dinfo->ldn != ldn) continue; if (type != SUPERIO_DEV_NONE && dinfo->type != type) continue; return (dinfo->dev); } return (NULL); } static int superio_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct siosc *sc; struct superiocmd *s; sc = dev->si_drv1; s = (struct superiocmd *)data; switch (cmd) { case SUPERIO_CR_READ: sio_conf_enter(sc); s->val = sio_ldn_read(sc, s->ldn, s->cr); sio_conf_exit(sc); return (0); case SUPERIO_CR_WRITE: sio_conf_enter(sc); sio_ldn_write(sc, s->ldn, s->cr, s->val); sio_conf_exit(sc); return (0); default: return (ENOTTY); } } static device_method_t superio_methods[] = { DEVMETHOD(device_identify, superio_identify), DEVMETHOD(device_probe, superio_probe), DEVMETHOD(device_attach, superio_attach), DEVMETHOD(device_detach, superio_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, superio_add_child), DEVMETHOD(bus_child_deleted, superio_child_deleted), DEVMETHOD(bus_child_detached, superio_child_detached), DEVMETHOD(bus_child_location, superio_child_location), DEVMETHOD(bus_child_pnpinfo, superio_child_pnp), DEVMETHOD(bus_print_child, superio_print_child), DEVMETHOD(bus_read_ivar, superio_read_ivar), DEVMETHOD(bus_write_ivar, superio_write_ivar), DEVMETHOD(bus_get_resource_list, superio_get_resource_list), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t superio_driver = { "superio", superio_methods, sizeof(struct siosc) }; DRIVER_MODULE(superio, isa, superio_driver, 0, 0); MODULE_VERSION(superio, 1); diff --git a/sys/dev/xen/bus/xenpv.c b/sys/dev/xen/bus/xenpv.c index 5073b93b24f1..cc4b4bea4537 100644 --- a/sys/dev/xen/bus/xenpv.c +++ b/sys/dev/xen/bus/xenpv.c @@ -1,271 +1,269 @@ /* * Copyright (c) 2014 Roger Pau Monné * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenmem_if.h" /* * Allocate unused physical memory above 4GB in order to map memory * from foreign domains. We use memory starting at 4GB in order to * prevent clashes with MMIO/ACPI regions. * * Since this is not possible on i386 just use any available memory * chunk above 1MB and hope we don't clash with anything else. * * Other architectures better document MMIO regions and drivers more * reliably reserve them. As such, allow using any unpopulated memory * region. */ #ifdef __amd64__ #define LOW_MEM_LIMIT 0x100000000ul #elif defined(__i386__) #define LOW_MEM_LIMIT 0x100000ul #else #define LOW_MEM_LIMIT 0 #endif /* * Memory ranges available for creating external mappings (foreign or grant * pages for example). */ static struct rman unpopulated_mem = { .rm_end = ~0, .rm_type = RMAN_ARRAY, .rm_descr = "Xen scratch memory", }; static void xenpv_identify(driver_t *driver, device_t parent) { if (!xen_domain()) return; /* Make sure there's only one xenpv device. */ if (devclass_get_device(devclass_find(driver->name), 0)) return; /* * The xenpv bus should be the last to attach in order * to properly detect if an ISA bus has already been added. */ if (BUS_ADD_CHILD(parent, UINT_MAX, driver->name, 0) == NULL) panic("Unable to attach xenpv bus."); } static int xenpv_probe(device_t dev) { device_set_desc(dev, "Xen PV bus"); return (BUS_PROBE_NOWILDCARD); } /* Dummy init for arches that don't have a specific implementation. */ int __weak_symbol xen_arch_init_physmem(device_t dev, struct rman *mem) { return (0); } static int xenpv_attach(device_t dev) { int error = rman_init(&unpopulated_mem); if (error != 0) return (error); error = xen_arch_init_physmem(dev, &unpopulated_mem); if (error != 0) return (error); /* * Let our child drivers identify any child devices that they * can find. Once that is done attach any devices that we * found. */ - error = bus_generic_probe(dev); - if (error) - return (error); + bus_identify_children(dev); error = bus_generic_attach(dev); return (error); } static int release_unpopulated_mem(device_t dev, struct resource *res) { return (rman_is_region_manager(res, &unpopulated_mem) ? rman_release_resource(res) : bus_release_resource(dev, res)); } static struct resource * xenpv_alloc_physmem(device_t dev, device_t child, int *res_id, size_t size) { struct resource *res; vm_paddr_t phys_addr; void *virt_addr; int error; const unsigned int flags = RF_ACTIVE | RF_UNMAPPED | RF_ALIGNMENT_LOG2(PAGE_SHIFT); KASSERT((size & PAGE_MASK) == 0, ("unaligned size requested")); size = round_page(size); /* Attempt to allocate from arch resource manager. */ res = rman_reserve_resource(&unpopulated_mem, 0, ~0, size, flags, child); if (res != NULL) { rman_set_rid(res, *res_id); rman_set_type(res, SYS_RES_MEMORY); } else { static bool warned = false; /* Fallback to generic MMIO allocator. */ if (__predict_false(!warned)) { warned = true; device_printf(dev, "unable to allocate from arch specific routine, " "fall back to unused memory areas\n"); } res = bus_alloc_resource(child, SYS_RES_MEMORY, res_id, LOW_MEM_LIMIT, ~0, size, flags); } if (res == NULL) { device_printf(dev, "failed to allocate Xen unpopulated memory\n"); return (NULL); } phys_addr = rman_get_start(res); error = vm_phys_fictitious_reg_range(phys_addr, phys_addr + size, VM_MEMATTR_XEN); if (error) { int error = release_unpopulated_mem(child, res); if (error != 0) device_printf(dev, "failed to release resource: %d\n", error); return (NULL); } virt_addr = pmap_mapdev_attr(phys_addr, size, VM_MEMATTR_XEN); KASSERT(virt_addr != NULL, ("Failed to create linear mappings")); rman_set_virtual(res, virt_addr); return (res); } static int xenpv_free_physmem(device_t dev, device_t child, int res_id, struct resource *res) { vm_paddr_t phys_addr; void *virt_addr; size_t size; phys_addr = rman_get_start(res); size = rman_get_size(res); virt_addr = rman_get_virtual(res); pmap_unmapdev(virt_addr, size); vm_phys_fictitious_unreg_range(phys_addr, phys_addr + size); return (release_unpopulated_mem(child, res)); } static device_method_t xenpv_methods[] = { /* Device interface */ DEVMETHOD(device_identify, xenpv_identify), DEVMETHOD(device_probe, xenpv_probe), DEVMETHOD(device_attach, xenpv_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), /* Interface to allocate memory for foreign mappings */ DEVMETHOD(xenmem_alloc, xenpv_alloc_physmem), DEVMETHOD(xenmem_free, xenpv_free_physmem), DEVMETHOD_END }; static driver_t xenpv_driver = { "xenpv", xenpv_methods, 0, }; DRIVER_MODULE(xenpv, nexus, xenpv_driver, 0, 0); struct resource * xenmem_alloc(device_t dev, int *res_id, size_t size) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (XENMEM_ALLOC(parent, dev, res_id, size)); } int xenmem_free(device_t dev, int res_id, struct resource *res) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (ENXIO); return (XENMEM_FREE(parent, dev, res_id, res)); } diff --git a/sys/dev/xen/xenstore/xenstore.c b/sys/dev/xen/xenstore/xenstore.c index 811aa1859d41..ddb064843227 100644 --- a/sys/dev/xen/xenstore/xenstore.c +++ b/sys/dev/xen/xenstore/xenstore.c @@ -1,1657 +1,1657 @@ /****************************************************************************** * xenstore.c * * Low-level kernel interface to the XenStore. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2009,2010 Spectra Logic Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * \file xenstore.c * \brief XenStore interface * * The XenStore interface is a simple storage system that is a means of * communicating state and configuration data between the Xen Domain 0 * and the various guest domains. All configuration data other than * a small amount of essential information required during the early * boot process of launching a Xen aware guest, is managed using the * XenStore. * * The XenStore is ASCII string based, and has a structure and semantics * similar to a filesystem. There are files and directories, the directories * able to contain files or other directories. The depth of the hierarchy * is only limited by the XenStore's maximum path length. * * The communication channel between the XenStore service and other * domains is via two, guest specific, ring buffers in a shared memory * area. One ring buffer is used for communicating in each direction. * The grant table references for this shared memory are given to the * guest either via the xen_start_info structure for a fully para- * virtualized guest, or via HVM hypercalls for a hardware virtualized * guest. * * The XenStore communication relies on an event channel and thus * interrupts. For this reason, the attachment of the XenStore * relies on an interrupt driven configuration hook to hold off * boot processing until communication with the XenStore service * can be established. * * Several Xen services depend on the XenStore, most notably the * XenBus used to discover and manage Xen devices. These services * are implemented as NewBus child attachments to a bus exported * by this XenStore driver. */ static struct xs_watch *find_watch(const char *token); MALLOC_DEFINE(M_XENSTORE, "xenstore", "XenStore data and results"); /** * Pointer to shared memory communication structures allowing us * to communicate with the XenStore service. * * When operating in full PV mode, this pointer is set early in kernel * startup from within xen_machdep.c. In HVM mode, we use hypercalls * to get the guest frame number for the shared page and then map it * into kva. See xs_init() for details. */ static struct xenstore_domain_interface *xen_store; /*-------------------------- Private Data Structures ------------------------*/ /** * Structure capturing messages received from the XenStore service. */ struct xs_stored_msg { TAILQ_ENTRY(xs_stored_msg) list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xs_watch *handle; const char **vec; u_int vec_size; } watch; } u; }; TAILQ_HEAD(xs_stored_msg_list, xs_stored_msg); /** * Container for all XenStore related state. */ struct xs_softc { /** Newbus device for the XenStore. */ device_t xs_dev; /** * Lock serializing access to ring producer/consumer * indexes. Use of this lock guarantees that wakeups * of blocking readers/writers are not missed due to * races with the XenStore service. */ struct mtx ring_lock; /* * Mutex used to insure exclusive access to the outgoing * communication ring. We use a lock type that can be * held while sleeping so that xs_write() can block waiting * for space in the ring to free up, without allowing another * writer to come in and corrupt a partial message write. */ struct sx request_mutex; /** * A list of replies to our requests. * * The reply list is filled by xs_rcv_thread(). It * is consumed by the context that issued the request * to which a reply is made. The requester blocks in * xs_read_reply(). * * /note Only one requesting context can be active at a time. * This is guaranteed by the request_mutex and insures * that the requester sees replies matching the order * of its requests. */ struct xs_stored_msg_list reply_list; /** Lock protecting the reply list. */ struct mtx reply_lock; /** * List of registered watches. */ struct xs_watch_list registered_watches; /** Lock protecting the registered watches list. */ struct mtx registered_watches_lock; /** * List of pending watch callback events. */ struct xs_stored_msg_list watch_events; /** Lock protecting the watch calback list. */ struct mtx watch_events_lock; /** * The processid of the xenwatch thread. */ pid_t xenwatch_pid; /** * Sleepable mutex used to gate the execution of XenStore * watch event callbacks. * * xenwatch_thread holds an exclusive lock on this mutex * while delivering event callbacks, and xenstore_unregister_watch() * uses an exclusive lock of this mutex to guarantee that no * callbacks of the just unregistered watch are pending * before returning to its caller. */ struct sx xenwatch_mutex; /** * The HVM guest pseudo-physical frame number. This is Xen's mapping * of the true machine frame number into our "physical address space". */ unsigned long gpfn; /** * The event channel for communicating with the * XenStore service. */ int evtchn; /** Handle for XenStore interrupts. */ xen_intr_handle_t xen_intr_handle; /** * Interrupt driven config hook allowing us to defer * attaching children until interrupts (and thus communication * with the XenStore service) are available. */ struct intr_config_hook xs_attachcb; /** * Xenstore is a user-space process that usually runs in Dom0, * so if this domain is booting as Dom0, xenstore wont we accessible, * and we have to defer the initialization of xenstore related * devices to later (when xenstore is started). */ bool initialized; /** * Task to run when xenstore is initialized (Dom0 only), will * take care of attaching xenstore related devices. */ struct task xs_late_init; }; /*-------------------------------- Global Data ------------------------------*/ static struct xs_softc xs; /*------------------------- Private Utility Functions -----------------------*/ /** * Count and optionally record pointers to a number of NUL terminated * strings in a buffer. * * \param strings A pointer to a contiguous buffer of NUL terminated strings. * \param dest An array to store pointers to each string found in strings. * \param len The length of the buffer pointed to by strings. * * \return A count of the number of strings found. */ static u_int extract_strings(const char *strings, const char **dest, u_int len) { u_int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) { if (dest != NULL) *dest++ = p; num++; } return (num); } /** * Convert a contiguous buffer containing a series of NUL terminated * strings into an array of pointers to strings. * * The returned pointer references the array of string pointers which * is followed by the storage for the string data. It is the client's * responsibility to free this storage. * * The storage addressed by strings is free'd prior to split returning. * * \param strings A pointer to a contiguous buffer of NUL terminated strings. * \param len The length of the buffer pointed to by strings. * \param num The number of strings found and returned in the strings * array. * * \return An array of pointers to the strings found in the input buffer. */ static const char ** split(char *strings, u_int len, u_int *num) { const char **ret; /* Protect against unterminated buffers. */ if (len > 0) strings[len - 1] = '\0'; /* Count the strings. */ *num = extract_strings(strings, /*dest*/NULL, len); /* Transfer to one big alloc for easy freeing by the caller. */ ret = malloc(*num * sizeof(char *) + len, M_XENSTORE, M_WAITOK); memcpy(&ret[*num], strings, len); free(strings, M_XENSTORE); /* Extract pointers to newly allocated array. */ strings = (char *)&ret[*num]; (void)extract_strings(strings, /*dest*/ret, len); return (ret); } /*------------------------- Public Utility Functions -------------------------*/ /*------- API comments for these methods can be found in xenstorevar.h -------*/ struct sbuf * xs_join(const char *dir, const char *name) { struct sbuf *sb; sb = sbuf_new_auto(); sbuf_cat(sb, dir); if (name[0] != '\0') { sbuf_putc(sb, '/'); sbuf_cat(sb, name); } sbuf_finish(sb); return (sb); } /*-------------------- Low Level Communication Management --------------------*/ /** * Interrupt handler for the XenStore event channel. * * XenStore reads and writes block on "xen_store" for buffer * space. Wakeup any blocking operations when the XenStore * service has modified the queues. */ static void xs_intr(void * arg __unused /*__attribute__((unused))*/) { /* If xenstore has not been initialized, initialize it now */ if (!xs.initialized) { xs.initialized = true; /* * Since this task is probing and attaching devices we * have to hold the Giant lock. */ taskqueue_enqueue(taskqueue_swi_giant, &xs.xs_late_init); } /* * Hold ring lock across wakeup so that clients * cannot miss a wakeup. */ mtx_lock(&xs.ring_lock); wakeup(xen_store); mtx_unlock(&xs.ring_lock); } /** * Verify that the indexes for a ring are valid. * * The difference between the producer and consumer cannot * exceed the size of the ring. * * \param cons The consumer index for the ring to test. * \param prod The producer index for the ring to test. * * \retval 1 If indexes are in range. * \retval 0 If the indexes are out of range. */ static int xs_check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } /** * Return a pointer to, and the length of, the contiguous * free region available for output in a ring buffer. * * \param cons The consumer index for the ring. * \param prod The producer index for the ring. * \param buf The base address of the ring's storage. * \param len The amount of contiguous storage available. * * \return A pointer to the start location of the free region. */ static void * xs_get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return (buf + MASK_XENSTORE_IDX(prod)); } /** * Return a pointer to, and the length of, the contiguous * data available to read from a ring buffer. * * \param cons The consumer index for the ring. * \param prod The producer index for the ring. * \param buf The base address of the ring's storage. * \param len The amount of contiguous data available to read. * * \return A pointer to the start location of the available data. */ static const void * xs_get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return (buf + MASK_XENSTORE_IDX(cons)); } /** * Transmit data to the XenStore service. * * \param tdata A pointer to the contiguous data to send. * \param len The amount of data to send. * * \return On success 0, otherwise an errno value indicating the * cause of failure. * * \invariant Called from thread context. * \invariant The buffer pointed to by tdata is at least len bytes * in length. * \invariant xs.request_mutex exclusively locked. */ static int xs_write_store(const void *tdata, unsigned len) { XENSTORE_RING_IDX cons, prod; const char *data = (const char *)tdata; int error; sx_assert(&xs.request_mutex, SX_XLOCKED); while (len != 0) { void *dst; u_int avail; /* Hold lock so we can't miss wakeups should we block. */ mtx_lock(&xs.ring_lock); cons = xen_store->req_cons; prod = xen_store->req_prod; if ((prod - cons) == XENSTORE_RING_SIZE) { /* * Output ring is full. Wait for a ring event. * * Note that the events from both queues * are combined, so being woken does not * guarantee that data exist in the read * ring. * * To simplify error recovery and the retry, * we specify PDROP so our lock is *not* held * when msleep returns. */ error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP, "xbwrite", /*timeout*/0); if (error && error != EWOULDBLOCK) return (error); /* Try again. */ continue; } mtx_unlock(&xs.ring_lock); /* Verify queue sanity. */ if (!xs_check_indexes(cons, prod)) { xen_store->req_cons = xen_store->req_prod = 0; return (EIO); } dst = xs_get_output_chunk(cons, prod, xen_store->req, &avail); if (avail > len) avail = len; memcpy(dst, data, avail); data += avail; len -= avail; /* * The store to the producer index, which indicates * to the other side that new data has arrived, must * be visible only after our copy of the data into the * ring has completed. */ wmb(); xen_store->req_prod += avail; /* * xen_intr_signal() implies mb(). The other side will see * the change to req_prod at the time of the interrupt. */ xen_intr_signal(xs.xen_intr_handle); } return (0); } /** * Receive data from the XenStore service. * * \param tdata A pointer to the contiguous buffer to receive the data. * \param len The amount of data to receive. * * \return On success 0, otherwise an errno value indicating the * cause of failure. * * \invariant Called from thread context. * \invariant The buffer pointed to by tdata is at least len bytes * in length. * * \note xs_read does not perform any internal locking to guarantee * serial access to the incoming ring buffer. However, there * is only one context processing reads: xs_rcv_thread(). */ static int xs_read_store(void *tdata, unsigned len) { XENSTORE_RING_IDX cons, prod; char *data = (char *)tdata; int error; while (len != 0) { u_int avail; const char *src; /* Hold lock so we can't miss wakeups should we block. */ mtx_lock(&xs.ring_lock); cons = xen_store->rsp_cons; prod = xen_store->rsp_prod; if (cons == prod) { /* * Nothing to read. Wait for a ring event. * * Note that the events from both queues * are combined, so being woken does not * guarantee that data exist in the read * ring. * * To simplify error recovery and the retry, * we specify PDROP so our lock is *not* held * when msleep returns. */ error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP, "xbread", /*timeout*/0); if (error && error != EWOULDBLOCK) return (error); continue; } mtx_unlock(&xs.ring_lock); /* Verify queue sanity. */ if (!xs_check_indexes(cons, prod)) { xen_store->rsp_cons = xen_store->rsp_prod = 0; return (EIO); } src = xs_get_input_chunk(cons, prod, xen_store->rsp, &avail); if (avail > len) avail = len; /* * Insure the data we read is related to the indexes * we read above. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* * Insure that the producer of this ring does not see * the ring space as free until after we have copied it * out. */ mb(); xen_store->rsp_cons += avail; /* * xen_intr_signal() implies mb(). The producer will see * the updated consumer index when the event is delivered. */ xen_intr_signal(xs.xen_intr_handle); } return (0); } /*----------------------- Received Message Processing ------------------------*/ /** * Block reading the next message from the XenStore service and * process the result. * * \param type The returned type of the XenStore message received. * * \return 0 on success. Otherwise an errno value indicating the * type of failure encountered. */ static int xs_process_msg(enum xsd_sockmsg_type *type) { struct xs_stored_msg *msg; char *body; int error; msg = malloc(sizeof(*msg), M_XENSTORE, M_WAITOK); error = xs_read_store(&msg->hdr, sizeof(msg->hdr)); if (error) { free(msg, M_XENSTORE); return (error); } body = malloc(msg->hdr.len + 1, M_XENSTORE, M_WAITOK); error = xs_read_store(body, msg->hdr.len); if (error) { free(body, M_XENSTORE); free(msg, M_XENSTORE); return (error); } body[msg->hdr.len] = '\0'; *type = msg->hdr.type; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); mtx_lock(&xs.registered_watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); mtx_lock(&xs.watch_events_lock); if (msg->u.watch.handle != NULL && (!msg->u.watch.handle->max_pending || msg->u.watch.handle->pending < msg->u.watch.handle->max_pending)) { msg->u.watch.handle->pending++; TAILQ_INSERT_TAIL(&xs.watch_events, msg, list); wakeup(&xs.watch_events); mtx_unlock(&xs.watch_events_lock); } else { mtx_unlock(&xs.watch_events_lock); free(msg->u.watch.vec, M_XENSTORE); free(msg, M_XENSTORE); } mtx_unlock(&xs.registered_watches_lock); } else { msg->u.reply.body = body; mtx_lock(&xs.reply_lock); TAILQ_INSERT_TAIL(&xs.reply_list, msg, list); wakeup(&xs.reply_list); mtx_unlock(&xs.reply_lock); } return (0); } /** * Thread body of the XenStore receive thread. * * This thread blocks waiting for data from the XenStore service * and processes and received messages. */ static void xs_rcv_thread(void *arg __unused) { int error; enum xsd_sockmsg_type type; for (;;) { error = xs_process_msg(&type); if (error) printf("XENSTORE error %d while reading message\n", error); } } /*---------------- XenStore Message Request/Reply Processing -----------------*/ #define xsd_error_count (sizeof(xsd_errors) / sizeof(xsd_errors[0])) /** * Convert a XenStore error string into an errno number. * * \param errorstring The error string to convert. * * \return The errno best matching the input string. * * \note Unknown error strings are converted to EINVAL. */ static int xs_get_error(const char *errorstring) { u_int i; for (i = 0; i < xsd_error_count; i++) { if (!strcmp(errorstring, xsd_errors[i].errstring)) return (xsd_errors[i].errnum); } log(LOG_WARNING, "XENSTORE xen store gave: unknown error %s", errorstring); return (EINVAL); } /** * Block waiting for a reply to a message request. * * \param type The returned type of the reply. * \param len The returned body length of the reply. * \param result The returned body of the reply. * * \return 0 on success. Otherwise an errno indicating the * cause of failure. */ static int xs_read_reply(enum xsd_sockmsg_type *type, u_int *len, void **result) { struct xs_stored_msg *msg; char *body; int error; mtx_lock(&xs.reply_lock); while (TAILQ_EMPTY(&xs.reply_list)) { error = mtx_sleep(&xs.reply_list, &xs.reply_lock, 0, "xswait", hz/10); if (error && error != EWOULDBLOCK) { mtx_unlock(&xs.reply_lock); return (error); } } msg = TAILQ_FIRST(&xs.reply_list); TAILQ_REMOVE(&xs.reply_list, msg, list); mtx_unlock(&xs.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; free(msg, M_XENSTORE); *result = body; return (0); } /** * Pass-thru interface for XenStore access by userland processes * via the XenStore device. * * Reply type and length data are returned by overwriting these * fields in the passed in request message. * * \param msg A properly formatted message to transmit to * the XenStore service. * \param result The returned body of the reply. * * \return 0 on success. Otherwise an errno indicating the cause * of failure. * * \note The returned result is provided in malloced storage and thus * must be free'd by the caller with 'free(result, M_XENSTORE); */ int xs_dev_request_and_reply(struct xsd_sockmsg *msg, void **result) { int error; sx_xlock(&xs.request_mutex); if ((error = xs_write_store(msg, sizeof(*msg) + msg->len)) == 0) error = xs_read_reply(&msg->type, &msg->len, result); sx_xunlock(&xs.request_mutex); return (error); } /** * Send a message with an optionally muti-part body to the XenStore service. * * \param t The transaction to use for this request. * \param request_type The type of message to send. * \param iovec Pointers to the body sections of the request. * \param num_vecs The number of body sections in the request. * \param len The returned length of the reply. * \param result The returned body of the reply. * * \return 0 on success. Otherwise an errno indicating * the cause of failure. * * \note The returned result is provided in malloced storage and thus * must be free'd by the caller with 'free(*result, M_XENSTORE); */ static int xs_talkv(struct xs_transaction t, enum xsd_sockmsg_type request_type, const struct iovec *iovec, u_int num_vecs, u_int *len, void **result) { struct xsd_sockmsg msg; void *ret = NULL; u_int i; int error; msg.tx_id = t.id; msg.req_id = 0; msg.type = request_type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; sx_xlock(&xs.request_mutex); error = xs_write_store(&msg, sizeof(msg)); if (error) { printf("xs_talkv failed %d\n", error); goto error_lock_held; } for (i = 0; i < num_vecs; i++) { error = xs_write_store(iovec[i].iov_base, iovec[i].iov_len); if (error) { printf("xs_talkv failed %d\n", error); goto error_lock_held; } } error = xs_read_reply(&msg.type, len, &ret); error_lock_held: sx_xunlock(&xs.request_mutex); if (error) return (error); if (msg.type == XS_ERROR) { error = xs_get_error(ret); free(ret, M_XENSTORE); return (error); } /* Reply is either error or an echo of our request message type. */ KASSERT(msg.type == request_type, ("bad xenstore message type")); if (result) *result = ret; else free(ret, M_XENSTORE); return (0); } /** * Wrapper for xs_talkv allowing easy transmission of a message with * a single, contiguous, message body. * * \param t The transaction to use for this request. * \param request_type The type of message to send. * \param body The body of the request. * \param len The returned length of the reply. * \param result The returned body of the reply. * * \return 0 on success. Otherwise an errno indicating * the cause of failure. * * \note The returned result is provided in malloced storage and thus * must be free'd by the caller with 'free(*result, M_XENSTORE); */ static int xs_single(struct xs_transaction t, enum xsd_sockmsg_type request_type, const char *body, u_int *len, void **result) { struct iovec iovec; iovec.iov_base = (void *)(uintptr_t)body; iovec.iov_len = strlen(body) + 1; return (xs_talkv(t, request_type, &iovec, 1, len, result)); } /*------------------------- XenStore Watch Support ---------------------------*/ /** * Transmit a watch request to the XenStore service. * * \param path The path in the XenStore to watch. * \param tocken A unique identifier for this watch. * * \return 0 on success. Otherwise an errno indicating the * cause of failure. */ static int xs_watch(const char *path, const char *token) { struct iovec iov[2]; iov[0].iov_base = (void *)(uintptr_t) path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)(uintptr_t) token; iov[1].iov_len = strlen(token) + 1; return (xs_talkv(XST_NIL, XS_WATCH, iov, 2, NULL, NULL)); } /** * Transmit an uwatch request to the XenStore service. * * \param path The path in the XenStore to watch. * \param tocken A unique identifier for this watch. * * \return 0 on success. Otherwise an errno indicating the * cause of failure. */ static int xs_unwatch(const char *path, const char *token) { struct iovec iov[2]; iov[0].iov_base = (void *)(uintptr_t) path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)(uintptr_t) token; iov[1].iov_len = strlen(token) + 1; return (xs_talkv(XST_NIL, XS_UNWATCH, iov, 2, NULL, NULL)); } /** * Convert from watch token (unique identifier) to the associated * internal tracking structure for this watch. * * \param tocken The unique identifier for the watch to find. * * \return A pointer to the found watch structure or NULL. */ static struct xs_watch * find_watch(const char *token) { struct xs_watch *i, *cmp; cmp = (void *)strtoul(token, NULL, 16); LIST_FOREACH(i, &xs.registered_watches, list) if (i == cmp) return (i); return (NULL); } /** * Thread body of the XenStore watch event dispatch thread. */ static void xenwatch_thread(void *unused) { struct xs_stored_msg *msg; for (;;) { mtx_lock(&xs.watch_events_lock); while (TAILQ_EMPTY(&xs.watch_events)) mtx_sleep(&xs.watch_events, &xs.watch_events_lock, PWAIT | PCATCH, "waitev", hz/10); mtx_unlock(&xs.watch_events_lock); sx_xlock(&xs.xenwatch_mutex); mtx_lock(&xs.watch_events_lock); msg = TAILQ_FIRST(&xs.watch_events); if (msg) { TAILQ_REMOVE(&xs.watch_events, msg, list); msg->u.watch.handle->pending--; } mtx_unlock(&xs.watch_events_lock); if (msg != NULL) { /* * XXX There are messages coming in with a NULL * XXX callback. This deserves further investigation; * XXX the workaround here simply prevents the kernel * XXX from panic'ing on startup. */ if (msg->u.watch.handle->callback != NULL) msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); free(msg->u.watch.vec, M_XENSTORE); free(msg, M_XENSTORE); } sx_xunlock(&xs.xenwatch_mutex); } } /*----------- XenStore Configuration, Initialization, and Control ------------*/ /** * Setup communication channels with the XenStore service. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xs_init_comms(void) { int error; if (xen_store->rsp_prod != xen_store->rsp_cons) { log(LOG_WARNING, "XENSTORE response ring is not quiescent " "(%08x:%08x): fixing up\n", xen_store->rsp_cons, xen_store->rsp_prod); xen_store->rsp_cons = xen_store->rsp_prod; } xen_intr_unbind(&xs.xen_intr_handle); error = xen_intr_bind_local_port(xs.xs_dev, xs.evtchn, /*filter*/NULL, xs_intr, /*arg*/NULL, INTR_TYPE_NET|INTR_MPSAFE, &xs.xen_intr_handle); if (error) { log(LOG_WARNING, "XENSTORE request irq failed %i\n", error); return (error); } return (0); } /*------------------ Private Device Attachment Functions --------------------*/ static void xs_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "xenstore", 0); } /** * Probe for the existence of the XenStore. * * \param dev */ static int xs_probe(device_t dev) { /* * We are either operating within a PV kernel or being probed * as the child of the successfully attached xenpci device. * Thus we are in a Xen environment and there will be a XenStore. * Unconditionally return success. */ device_set_desc(dev, "XenStore"); return (BUS_PROBE_NOWILDCARD); } static void xs_attach_deferred(void *arg) { - bus_generic_probe(xs.xs_dev); + bus_identify_children(xs.xs_dev); bus_generic_attach(xs.xs_dev); config_intrhook_disestablish(&xs.xs_attachcb); } static void xs_attach_late(void *arg, int pending) { KASSERT((pending == 1), ("xs late attach queued several times")); - bus_generic_probe(xs.xs_dev); + bus_identify_children(xs.xs_dev); bus_generic_attach(xs.xs_dev); } /** * Attach to the XenStore. * * This routine also prepares for the probe/attach of drivers that rely * on the XenStore. */ static int xs_attach(device_t dev) { int error; /* Allow us to get device_t from softc and vice-versa. */ xs.xs_dev = dev; device_set_softc(dev, &xs); /* Initialize the interface to xenstore. */ struct proc *p; xs.initialized = false; xs.evtchn = xen_get_xenstore_evtchn(); if (xs.evtchn == 0) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate a local event channel for xenstore */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; error = HYPERVISOR_event_channel_op( EVTCHNOP_alloc_unbound, &alloc_unbound); if (error != 0) panic( "unable to alloc event channel for Dom0: %d", error); xs.evtchn = alloc_unbound.port; /* Allocate memory for the xs shared ring */ xen_store = malloc(PAGE_SIZE, M_XENSTORE, M_WAITOK | M_ZERO); xs.gpfn = atop(pmap_kextract((vm_offset_t)xen_store)); } else { xs.gpfn = xen_get_xenstore_mfn(); xen_store = pmap_mapdev_attr(ptoa(xs.gpfn), PAGE_SIZE, VM_MEMATTR_XEN); xs.initialized = true; } TAILQ_INIT(&xs.reply_list); TAILQ_INIT(&xs.watch_events); mtx_init(&xs.ring_lock, "ring lock", NULL, MTX_DEF); mtx_init(&xs.reply_lock, "reply lock", NULL, MTX_DEF); sx_init(&xs.xenwatch_mutex, "xenwatch"); sx_init(&xs.request_mutex, "xenstore request"); mtx_init(&xs.registered_watches_lock, "watches", NULL, MTX_DEF); mtx_init(&xs.watch_events_lock, "watch events", NULL, MTX_DEF); /* Initialize the shared memory rings to talk to xenstored */ error = xs_init_comms(); if (error) return (error); error = kproc_create(xenwatch_thread, NULL, &p, RFHIGHPID, 0, "xenwatch"); if (error) return (error); xs.xenwatch_pid = p->p_pid; error = kproc_create(xs_rcv_thread, NULL, NULL, RFHIGHPID, 0, "xenstore_rcv"); xs.xs_attachcb.ich_func = xs_attach_deferred; xs.xs_attachcb.ich_arg = NULL; if (xs.initialized) { config_intrhook_establish(&xs.xs_attachcb); } else { TASK_INIT(&xs.xs_late_init, 0, xs_attach_late, NULL); } return (error); } /** * Prepare for suspension of this VM by halting XenStore access after * all transactions and individual requests have completed. */ static int xs_suspend(device_t dev) { int error; /* Suspend child Xen devices. */ error = bus_generic_suspend(dev); if (error != 0) return (error); sx_xlock(&xs.request_mutex); return (0); } /** * Resume XenStore operations after this VM is resumed. */ static int xs_resume(device_t dev __unused) { struct xs_watch *watch; char token[sizeof(watch) * 2 + 1]; xs_init_comms(); sx_xunlock(&xs.request_mutex); /* * NB: since xenstore childs have not been resumed yet, there's * no need to hold any watch mutex. Having clients try to add or * remove watches at this point (before xenstore is resumed) is * clearly a violantion of the resume order. */ LIST_FOREACH(watch, &xs.registered_watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } /* Resume child Xen devices. */ bus_generic_resume(dev); return (0); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t xenstore_methods[] = { /* Device interface */ DEVMETHOD(device_identify, xs_identify), DEVMETHOD(device_probe, xs_probe), DEVMETHOD(device_attach, xs_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xs_suspend), DEVMETHOD(device_resume, xs_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD_END }; DEFINE_CLASS_0(xenstore, xenstore_driver, xenstore_methods, 0); DRIVER_MODULE(xenstore, xenpv, xenstore_driver, 0, 0); /*------------------------------- Sysctl Data --------------------------------*/ /* XXX Shouldn't the node be somewhere else? */ SYSCTL_NODE(_dev, OID_AUTO, xen, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xen"); SYSCTL_INT(_dev_xen, OID_AUTO, xsd_port, CTLFLAG_RD, &xs.evtchn, 0, ""); SYSCTL_ULONG(_dev_xen, OID_AUTO, xsd_kva, CTLFLAG_RD, (u_long *) &xen_store, 0, ""); /*-------------------------------- Public API --------------------------------*/ /*------- API comments for these methods can be found in xenstorevar.h -------*/ bool xs_initialized(void) { return (xs.initialized); } evtchn_port_t xs_evtchn(void) { return (xs.evtchn); } vm_paddr_t xs_address(void) { return (ptoa(xs.gpfn)); } int xs_directory(struct xs_transaction t, const char *dir, const char *node, u_int *num, const char ***result) { struct sbuf *path; char *strings; u_int len = 0; int error; path = xs_join(dir, node); error = xs_single(t, XS_DIRECTORY, sbuf_data(path), &len, (void **)&strings); sbuf_delete(path); if (error) return (error); *result = split(strings, len, num); return (0); } int xs_exists(struct xs_transaction t, const char *dir, const char *node) { const char **d; int error, dir_n; error = xs_directory(t, dir, node, &dir_n, &d); if (error) return (0); free(d, M_XENSTORE); return (1); } int xs_read(struct xs_transaction t, const char *dir, const char *node, u_int *len, void **result) { struct sbuf *path; void *ret; int error; path = xs_join(dir, node); error = xs_single(t, XS_READ, sbuf_data(path), len, &ret); sbuf_delete(path); if (error) return (error); *result = ret; return (0); } int xs_write(struct xs_transaction t, const char *dir, const char *node, const char *string) { struct sbuf *path; struct iovec iovec[2]; int error; path = xs_join(dir, node); iovec[0].iov_base = (void *)(uintptr_t) sbuf_data(path); iovec[0].iov_len = sbuf_len(path) + 1; iovec[1].iov_base = (void *)(uintptr_t) string; iovec[1].iov_len = strlen(string); error = xs_talkv(t, XS_WRITE, iovec, 2, NULL, NULL); sbuf_delete(path); return (error); } int xs_mkdir(struct xs_transaction t, const char *dir, const char *node) { struct sbuf *path; int ret; path = xs_join(dir, node); ret = xs_single(t, XS_MKDIR, sbuf_data(path), NULL, NULL); sbuf_delete(path); return (ret); } int xs_rm(struct xs_transaction t, const char *dir, const char *node) { struct sbuf *path; int ret; path = xs_join(dir, node); ret = xs_single(t, XS_RM, sbuf_data(path), NULL, NULL); sbuf_delete(path); return (ret); } int xs_rm_tree(struct xs_transaction xbt, const char *base, const char *node) { struct xs_transaction local_xbt; struct sbuf *root_path_sbuf; struct sbuf *cur_path_sbuf; char *root_path; char *cur_path; const char **dir; int error; retry: root_path_sbuf = xs_join(base, node); cur_path_sbuf = xs_join(base, node); root_path = sbuf_data(root_path_sbuf); cur_path = sbuf_data(cur_path_sbuf); dir = NULL; local_xbt.id = 0; if (xbt.id == 0) { error = xs_transaction_start(&local_xbt); if (error != 0) goto out; xbt = local_xbt; } while (1) { u_int count; u_int i; error = xs_directory(xbt, cur_path, "", &count, &dir); if (error) goto out; for (i = 0; i < count; i++) { error = xs_rm(xbt, cur_path, dir[i]); if (error == ENOTEMPTY) { struct sbuf *push_dir; /* * Descend to clear out this sub directory. * We'll return to cur_dir once push_dir * is empty. */ push_dir = xs_join(cur_path, dir[i]); sbuf_delete(cur_path_sbuf); cur_path_sbuf = push_dir; cur_path = sbuf_data(cur_path_sbuf); break; } else if (error != 0) { goto out; } } free(dir, M_XENSTORE); dir = NULL; if (i == count) { char *last_slash; /* Directory is empty. It is now safe to remove. */ error = xs_rm(xbt, cur_path, ""); if (error != 0) goto out; if (!strcmp(cur_path, root_path)) break; /* Return to processing the parent directory. */ last_slash = strrchr(cur_path, '/'); KASSERT(last_slash != NULL, ("xs_rm_tree: mangled path %s", cur_path)); *last_slash = '\0'; } } out: sbuf_delete(cur_path_sbuf); sbuf_delete(root_path_sbuf); if (dir != NULL) free(dir, M_XENSTORE); if (local_xbt.id != 0) { int terror; terror = xs_transaction_end(local_xbt, /*abort*/error != 0); xbt.id = 0; if (terror == EAGAIN && error == 0) goto retry; } return (error); } int xs_transaction_start(struct xs_transaction *t) { char *id_str; int error; error = xs_single(XST_NIL, XS_TRANSACTION_START, "", NULL, (void **)&id_str); if (error == 0) { t->id = strtoul(id_str, NULL, 0); free(id_str, M_XENSTORE); } return (error); } int xs_transaction_end(struct xs_transaction t, int abort) { char abortstr[2]; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); return (xs_single(t, XS_TRANSACTION_END, abortstr, NULL, NULL)); } int xs_scanf(struct xs_transaction t, const char *dir, const char *node, int *scancountp, const char *fmt, ...) { va_list ap; int error, ns; char *val; error = xs_read(t, dir, node, NULL, (void **) &val); if (error) return (error); va_start(ap, fmt); ns = vsscanf(val, fmt, ap); va_end(ap); free(val, M_XENSTORE); /* Distinctive errno. */ if (ns == 0) return (ERANGE); if (scancountp) *scancountp = ns; return (0); } int xs_vprintf(struct xs_transaction t, const char *dir, const char *node, const char *fmt, va_list ap) { struct sbuf *sb; int error; sb = sbuf_new_auto(); sbuf_vprintf(sb, fmt, ap); sbuf_finish(sb); error = xs_write(t, dir, node, sbuf_data(sb)); sbuf_delete(sb); return (error); } int xs_printf(struct xs_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int error; va_start(ap, fmt); error = xs_vprintf(t, dir, node, fmt, ap); va_end(ap); return (error); } int xs_gather(struct xs_transaction t, const char *dir, ...) { va_list ap; const char *name; int error; va_start(ap, dir); error = 0; while (error == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; error = xs_read(t, dir, name, NULL, (void **) &p); if (error) break; if (fmt) { if (sscanf(p, fmt, result) == 0) error = EINVAL; free(p, M_XENSTORE); } else *(char **)result = p; } va_end(ap); return (error); } int xs_register_watch(struct xs_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int error; watch->pending = 0; sprintf(token, "%lX", (long)watch); mtx_lock(&xs.registered_watches_lock); KASSERT(find_watch(token) == NULL, ("watch already registered")); LIST_INSERT_HEAD(&xs.registered_watches, watch, list); mtx_unlock(&xs.registered_watches_lock); error = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if (error == EEXIST) error = 0; if (error != 0) { mtx_lock(&xs.registered_watches_lock); LIST_REMOVE(watch, list); mtx_unlock(&xs.registered_watches_lock); } return (error); } void xs_unregister_watch(struct xs_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int error; sprintf(token, "%lX", (long)watch); mtx_lock(&xs.registered_watches_lock); if (find_watch(token) == NULL) { mtx_unlock(&xs.registered_watches_lock); return; } LIST_REMOVE(watch, list); mtx_unlock(&xs.registered_watches_lock); error = xs_unwatch(watch->node, token); if (error) log(LOG_WARNING, "XENSTORE Failed to release watch %s: %i\n", watch->node, error); /* Cancel pending watch events. */ mtx_lock(&xs.watch_events_lock); TAILQ_FOREACH_SAFE(msg, &xs.watch_events, list, tmp) { if (msg->u.watch.handle != watch) continue; TAILQ_REMOVE(&xs.watch_events, msg, list); free(msg->u.watch.vec, M_XENSTORE); free(msg, M_XENSTORE); } mtx_unlock(&xs.watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (curproc->p_pid != xs.xenwatch_pid) { sx_xlock(&xs.xenwatch_mutex); sx_xunlock(&xs.xenwatch_mutex); } } void xs_lock(void) { sx_xlock(&xs.request_mutex); return; } void xs_unlock(void) { sx_xunlock(&xs.request_mutex); return; } diff --git a/sys/i386/acpica/acpi_machdep.c b/sys/i386/acpica/acpi_machdep.c index 4aef3b58fee3..f36ee90e33a5 100644 --- a/sys/i386/acpica/acpi_machdep.c +++ b/sys/i386/acpica/acpi_machdep.c @@ -1,305 +1,305 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2001 Mitsuru IWASAKI * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include uint32_t acpi_resume_beep; SYSCTL_UINT(_debug_acpi, OID_AUTO, resume_beep, CTLFLAG_RWTUN, &acpi_resume_beep, 0, "Beep the PC speaker when resuming"); uint32_t acpi_reset_video; TUNABLE_INT("hw.acpi.reset_video", &acpi_reset_video); static int intr_model = ACPI_INTR_PIC; int acpi_machdep_init(device_t dev) { struct acpi_softc *sc; sc = device_get_softc(dev); acpi_apm_init(sc); acpi_install_wakeup_handler(sc); if (intr_model == ACPI_INTR_PIC) BUS_CONFIG_INTR(dev, AcpiGbl_FADT.SciInterrupt, INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW); else acpi_SetIntrModel(intr_model); SYSCTL_ADD_UINT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "reset_video", CTLFLAG_RW, &acpi_reset_video, 0, "Call the VESA reset BIOS vector on the resume path"); return (0); } void acpi_SetDefaultIntrModel(int model) { intr_model = model; } /* Check BIOS date. If 1998 or older, disable ACPI. */ int acpi_machdep_quirks(int *quirks) { char *va; int year; /* BIOS address 0xffff5 contains the date in the format mm/dd/yy. */ va = pmap_mapbios(0xffff0, 16); sscanf(va + 11, "%2d", &year); pmap_unmapbios(va, 16); /* * Date must be >= 1/1/1999 or we don't trust ACPI. Note that this * check must be changed by my 114th birthday. */ if (year > 90 && year < 99) *quirks = ACPI_Q_BROKEN; return (0); } /* * Map a table. First map the header to determine the table length and then map * the entire table. */ static void * map_table(vm_paddr_t pa, const char *sig) { ACPI_TABLE_HEADER *header; vm_size_t length; void *table; header = pmap_mapbios(pa, sizeof(ACPI_TABLE_HEADER)); if (strncmp(header->Signature, sig, ACPI_NAMESEG_SIZE) != 0) { pmap_unmapbios(header, sizeof(ACPI_TABLE_HEADER)); return (NULL); } length = header->Length; pmap_unmapbios(header, sizeof(ACPI_TABLE_HEADER)); table = pmap_mapbios(pa, length); if (ACPI_FAILURE(AcpiUtChecksum(table, length))) { if (bootverbose) printf("ACPI: Failed checksum for table %s\n", sig); #if (ACPI_CHECKSUM_ABORT) pmap_unmapbios(table, length); return (NULL); #endif } return (table); } /* * See if a given ACPI table is the requested table. Returns the * length of the table if it matches or zero on failure. */ static int probe_table(vm_paddr_t address, const char *sig) { ACPI_TABLE_HEADER *table; int ret; table = pmap_mapbios(address, sizeof(ACPI_TABLE_HEADER)); ret = strncmp(table->Signature, sig, ACPI_NAMESEG_SIZE) == 0; pmap_unmapbios(table, sizeof(ACPI_TABLE_HEADER)); return (ret); } /* * Try to map a table at a given physical address previously returned * by acpi_find_table(). */ void * acpi_map_table(vm_paddr_t pa, const char *sig) { return (map_table(pa, sig)); } /* Unmap a table previously mapped via acpi_map_table(). */ void acpi_unmap_table(void *table) { ACPI_TABLE_HEADER *header; header = (ACPI_TABLE_HEADER *)table; pmap_unmapbios(table, header->Length); } /* * Return the physical address of the requested table or zero if one * is not found. */ vm_paddr_t acpi_find_table(const char *sig) { ACPI_PHYSICAL_ADDRESS rsdp_ptr; ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_RSDT *rsdt; ACPI_TABLE_XSDT *xsdt; ACPI_TABLE_HEADER *table; vm_paddr_t addr; int i, count; if (resource_disabled("acpi", 0)) return (0); /* * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn * calls pmap_mapbios() to find the RSDP, we assume that we can use * pmap_mapbios() to map the RSDP. */ if ((rsdp_ptr = AcpiOsGetRootPointer()) == 0) return (0); rsdp = pmap_mapbios(rsdp_ptr, sizeof(ACPI_TABLE_RSDP)); if (rsdp == NULL) { if (bootverbose) printf("ACPI: Failed to map RSDP\n"); return (0); } /* * For ACPI >= 2.0, use the XSDT if it is available. * Otherwise, use the RSDT. */ addr = 0; if (rsdp->Revision >= 2 && rsdp->XsdtPhysicalAddress != 0) { /* * AcpiOsGetRootPointer only verifies the checksum for * the version 1.0 portion of the RSDP. Version 2.0 has * an additional checksum that we verify first. */ if (AcpiUtChecksum((UINT8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH)) { if (bootverbose) printf("ACPI: RSDP failed extended checksum\n"); return (0); } xsdt = map_table(rsdp->XsdtPhysicalAddress, ACPI_SIG_XSDT); if (xsdt == NULL) { if (bootverbose) printf("ACPI: Failed to map XSDT\n"); return (0); } count = (xsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(UINT64); for (i = 0; i < count; i++) if (probe_table(xsdt->TableOffsetEntry[i], sig)) { addr = xsdt->TableOffsetEntry[i]; break; } acpi_unmap_table(xsdt); } else { rsdt = map_table(rsdp->RsdtPhysicalAddress, ACPI_SIG_RSDT); if (rsdt == NULL) { if (bootverbose) printf("ACPI: Failed to map RSDT\n"); return (0); } count = (rsdt->Header.Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(UINT32); for (i = 0; i < count; i++) if (probe_table(rsdt->TableOffsetEntry[i], sig)) { addr = rsdt->TableOffsetEntry[i]; break; } acpi_unmap_table(rsdt); } pmap_unmapbios(rsdp, sizeof(ACPI_TABLE_RSDP)); if (addr == 0) return (0); /* * Verify that we can map the full table and that its checksum is * correct, etc. */ table = map_table(addr, sig); if (table == NULL) return (0); acpi_unmap_table(table); return (addr); } /* * ACPI nexus(4) driver. */ static int nexus_acpi_probe(device_t dev) { int error; error = acpi_identify(); if (error) return (error); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_acpi_attach(device_t dev) { nexus_init_resources(); - bus_generic_probe(dev); + bus_identify_children(dev); if (BUS_ADD_CHILD(dev, 10, "acpi", 0) == NULL) panic("failed to add acpi0 device"); return (bus_generic_attach(dev)); } static device_method_t nexus_acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_acpi_probe), DEVMETHOD(device_attach, nexus_acpi_attach), { 0, 0 } }; DEFINE_CLASS_1(nexus, nexus_acpi_driver, nexus_acpi_methods, 1, nexus_driver); DRIVER_MODULE(nexus_acpi, root, nexus_acpi_driver, 0, 0); diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c index 519f9353b794..c4b3d8d6a0b7 100644 --- a/sys/isa/isa_common.c +++ b/sys/isa/isa_common.c @@ -1,1141 +1,1141 @@ /*- * SPDX-License-Identifier: BSD-2-Clause AND MIT * * Copyright (c) 1999 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Modifications for Intel architecture by Garrett A. Wollman. * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Parts of the ISA bus implementation common to all architectures. */ #include #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int isa_print_child(device_t bus, device_t dev); static MALLOC_DEFINE(M_ISADEV, "isadev", "ISA device"); static int isa_running; /* * At 'probe' time, we add all the devices which we know about to the * bus. The generic attach routine will probe and attach them if they * are alive. */ static int isa_probe(device_t dev) { device_set_desc(dev, "ISA bus"); isa_init(dev); /* Allow machdep code to initialise */ return (0); } extern device_t isa_bus_device; static int isa_attach(device_t dev) { /* * Arrange for isa_probe_children(dev) to be called later. XXX */ isa_bus_device = dev; return (0); } /* * Find a working set of memory regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_memory(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NMEM]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NMEM; i++) { bus_delete_resource(child, SYS_RES_MEMORY, i); res[i] = NULL; } success = 1; result->ic_nmem = config->ic_nmem; for (i = 0; i < config->ic_nmem; i++) { uint32_t start, end, size, align; size = config->ic_mem[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_mem[i].ir_start = 0; result->ic_mem[i].ir_end = 0; result->ic_mem[i].ir_size = 0; result->ic_mem[i].ir_align = 0; continue; } for (start = config->ic_mem[i].ir_start, end = config->ic_mem[i].ir_end, align = config->ic_mem[i].ir_align; start + size - 1 <= end && start + size > start; start += MAX(align, 1)) { bus_set_resource(child, SYS_RES_MEMORY, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_MEMORY, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_mem[i].ir_start = start; result->ic_mem[i].ir_end = start + size - 1; result->ic_mem[i].ir_size = size; result->ic_mem[i].ir_align = align; break; } } /* * If we didn't find a place for memory range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NMEM; i++) { if (res[i]) bus_release_resource(child, SYS_RES_MEMORY, i, res[i]); } return (success); } /* * Find a working set of port regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_port(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NPORT]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NPORT; i++) { bus_delete_resource(child, SYS_RES_IOPORT, i); res[i] = NULL; } success = 1; result->ic_nport = config->ic_nport; for (i = 0; i < config->ic_nport; i++) { uint32_t start, end, size, align; size = config->ic_port[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_port[i].ir_start = 0; result->ic_port[i].ir_end = 0; result->ic_port[i].ir_size = 0; result->ic_port[i].ir_align = 0; continue; } for (start = config->ic_port[i].ir_start, end = config->ic_port[i].ir_end, align = config->ic_port[i].ir_align; start + size - 1 <= end; start += align) { bus_set_resource(child, SYS_RES_IOPORT, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_IOPORT, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_port[i].ir_start = start; result->ic_port[i].ir_end = start + size - 1; result->ic_port[i].ir_size = size; result->ic_port[i].ir_align = align; break; } } /* * If we didn't find a place for port range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NPORT; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IOPORT, i, res[i]); } return success; } /* * Return the index of the first bit in the mask (or -1 if mask is empty. */ static int find_first_bit(uint32_t mask) { return (ffs(mask) - 1); } /* * Return the index of the next bit in the mask, or -1 if there are no more. */ static int find_next_bit(uint32_t mask, int bit) { return (find_first_bit(mask & (-2 << bit))); } /* * Find a working set of irqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * irqs was found. */ static int isa_find_irq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NIRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NIRQ; i++) { bus_delete_resource(child, SYS_RES_IRQ, i); res[i] = NULL; } success = 1; result->ic_nirq = config->ic_nirq; for (i = 0; i < config->ic_nirq; i++) { uint32_t mask = config->ic_irqmask[i]; int irq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_irqmask[i] = 0; continue; } for (irq = find_first_bit(mask); irq != -1; irq = find_next_bit(mask, irq)) { bus_set_resource(child, SYS_RES_IRQ, i, irq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_IRQ, &i, 0 /* !RF_ACTIVE */ ); if (res[i]) { result->ic_irqmask[i] = (1 << irq); break; } } /* * If we didn't find a place for irq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NIRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IRQ, i, res[i]); } return (success); } /* * Find a working set of drqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * drqs was found. */ static int isa_find_drq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NDRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NDRQ; i++) { bus_delete_resource(child, SYS_RES_DRQ, i); res[i] = NULL; } success = 1; result->ic_ndrq = config->ic_ndrq; for (i = 0; i < config->ic_ndrq; i++) { uint32_t mask = config->ic_drqmask[i]; int drq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_drqmask[i] = 0; continue; } for (drq = find_first_bit(mask); drq != -1; drq = find_next_bit(mask, drq)) { bus_set_resource(child, SYS_RES_DRQ, i, drq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_DRQ, &i, 0 /* !RF_ACTIVE */); if (res[i]) { result->ic_drqmask[i] = (1 << drq); break; } } /* * If we didn't find a place for drq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NDRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_DRQ, i, res[i]); } return (success); } /* * Attempt to find a working set of resources for a device. Return * non-zero if a working configuration is found. */ static int isa_assign_resources(device_t child) { struct isa_device *idev = DEVTOISA(child); struct isa_config_entry *ice; struct isa_config *cfg; const char *reason; reason = "Empty ISA id_configs"; cfg = malloc(sizeof(struct isa_config), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) return(0); TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { reason = "memory"; if (!isa_find_memory(child, &ice->ice_config, cfg)) continue; reason = "port"; if (!isa_find_port(child, &ice->ice_config, cfg)) continue; reason = "irq"; if (!isa_find_irq(child, &ice->ice_config, cfg)) continue; reason = "drq"; if (!isa_find_drq(child, &ice->ice_config, cfg)) continue; /* * A working configuration was found enable the device * with this configuration. */ reason = "no callback"; if (idev->id_config_cb) { idev->id_config_cb(idev->id_config_arg, cfg, 1); free(cfg, M_TEMP); return (1); } } /* * Disable the device. */ bus_print_child_header(device_get_parent(child), child); printf(" can't assign resources (%s)\n", reason); if (bootverbose) isa_print_child(device_get_parent(child), child); bzero(cfg, sizeof (*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); device_disable(child); free(cfg, M_TEMP); return (0); } /* * Claim any unallocated resources to keep other devices from using * them. */ static void isa_claim_resources(device_t dev, device_t child) { struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; int rid; STAILQ_FOREACH(rle, rl, link) { if (!rle->res) { rid = rle->rid; resource_list_alloc(rl, dev, child, rle->type, &rid, 0, ~0, 1, 0); } } } /* * Called after other devices have initialised to probe for isa devices. */ void isa_probe_children(device_t dev) { struct isa_device *idev; device_t *children, child; struct isa_config *cfg; int nchildren, i, err; /* * Create all the non-hinted children by calling drivers' * identify methods. */ - bus_generic_probe(dev); + bus_identify_children(dev); if (device_get_children(dev, &children, &nchildren)) return; /* * First disable all pnp devices so that they don't get * matched by legacy probes. */ if (bootverbose) printf("isa_probe_children: disabling PnP devices\n"); cfg = malloc(sizeof(*cfg), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) { free(children, M_TEMP); return; } for (i = 0; i < nchildren; i++) { idev = DEVTOISA(children[i]); bzero(cfg, sizeof(*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); } free(cfg, M_TEMP); /* * Next, probe all the PnP BIOS devices so they can subsume any * hints. */ for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (idev->id_order > ISA_ORDER_PNPBIOS) continue; if (!TAILQ_EMPTY(&idev->id_configs) && !isa_assign_resources(child)) continue; if (device_probe_and_attach(child) == 0) isa_claim_resources(dev, child); } free(children, M_TEMP); /* * Next, enumerate hinted devices and probe all non-pnp devices so * that they claim their resources first. */ bus_enumerate_hinted_children(dev); if (device_get_children(dev, &children, &nchildren)) return; if (bootverbose) printf("isa_probe_children: probing non-PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || !TAILQ_EMPTY(&idev->id_configs)) continue; err = device_probe_and_attach(child); if (err == 0 && idev->id_vendorid == 0 && strcmp(kern_ident, "GENERIC") == 0 && device_is_attached(child)) device_printf(child, "non-PNP ISA device will be removed from GENERIC in FreeBSD 15.\n"); } /* * Finally assign resource to pnp devices and probe them. */ if (bootverbose) printf("isa_probe_children: probing PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || TAILQ_EMPTY(&idev->id_configs)) continue; if (isa_assign_resources(child)) { device_probe_and_attach(child); isa_claim_resources(dev, child); } } free(children, M_TEMP); isa_running = 1; } /* * Add a new child with default ivars. */ static device_t isa_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct isa_device *idev; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); idev = malloc(sizeof(struct isa_device), M_ISADEV, M_NOWAIT | M_ZERO); if (!idev) return (0); resource_list_init(&idev->id_resources); TAILQ_INIT(&idev->id_configs); idev->id_order = order; device_set_ivars(child, idev); return (child); } static void isa_child_deleted(device_t dev, device_t child) { free(device_get_ivars(child), M_ISADEV); } static int isa_print_all_resources(device_t dev) { struct isa_device *idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; int retval = 0; if (STAILQ_FIRST(rl) || device_get_flags(dev)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); if (idev->id_vendorid) retval += printf(" pnpid %s", pnp_eisaformat(idev->id_vendorid)); return (retval); } static int isa_print_child(device_t bus, device_t dev) { int retval = 0; retval += bus_print_child_header(bus, dev); retval += isa_print_all_resources(dev); retval += bus_print_child_footer(bus, dev); return (retval); } static void isa_probe_nomatch(device_t dev, device_t child) { if (bootverbose) { bus_print_child_header(dev, child); printf(" failed to probe"); isa_print_all_resources(child); bus_print_child_footer(dev, child); } return; } static int isa_read_ivar(device_t bus, device_t dev, int index, uintptr_t * result) { struct isa_device* idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; switch (index) { case ISA_IVAR_PORT_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORT_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORTSIZE_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_PORTSIZE_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MADDR_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MADDR_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MEMSIZE_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MEMSIZE_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_IRQ_0: rle = resource_list_find(rl, SYS_RES_IRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_IRQ_1: rle = resource_list_find(rl, SYS_RES_IRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_0: rle = resource_list_find(rl, SYS_RES_DRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_1: rle = resource_list_find(rl, SYS_RES_DRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_VENDORID: *result = idev->id_vendorid; break; case ISA_IVAR_SERIAL: *result = idev->id_serial; break; case ISA_IVAR_LOGICALID: *result = idev->id_logicalid; break; case ISA_IVAR_COMPATID: *result = idev->id_compatid; break; case ISA_IVAR_CONFIGATTR: *result = idev->id_config_attr; break; case ISA_IVAR_PNP_CSN: *result = idev->id_pnp_csn; break; case ISA_IVAR_PNP_LDN: *result = idev->id_pnp_ldn; break; case ISA_IVAR_PNPBIOS_HANDLE: *result = idev->id_pnpbios_handle; break; default: return (ENOENT); } return (0); } static int isa_write_ivar(device_t bus, device_t dev, int index, uintptr_t value) { struct isa_device* idev = DEVTOISA(dev); switch (index) { case ISA_IVAR_PORT_0: case ISA_IVAR_PORT_1: case ISA_IVAR_PORTSIZE_0: case ISA_IVAR_PORTSIZE_1: case ISA_IVAR_MADDR_0: case ISA_IVAR_MADDR_1: case ISA_IVAR_MEMSIZE_0: case ISA_IVAR_MEMSIZE_1: case ISA_IVAR_IRQ_0: case ISA_IVAR_IRQ_1: case ISA_IVAR_DRQ_0: case ISA_IVAR_DRQ_1: return (EINVAL); case ISA_IVAR_VENDORID: idev->id_vendorid = value; break; case ISA_IVAR_SERIAL: idev->id_serial = value; break; case ISA_IVAR_LOGICALID: idev->id_logicalid = value; break; case ISA_IVAR_COMPATID: idev->id_compatid = value; break; case ISA_IVAR_CONFIGATTR: idev->id_config_attr = value; break; default: return (ENOENT); } return (0); } /* * Free any resources which the driver missed or which we were holding for * it (see isa_probe_children). */ static void isa_child_detached(device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } static void isa_driver_added(device_t dev, driver_t *driver) { device_t *children; int nchildren, i; /* * Don't do anything if drivers are dynamically * added during autoconfiguration (cf. ymf724). * since that would end up calling identify * twice. */ if (!isa_running) return; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { device_t child = children[i]; struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; if (device_get_state(child) != DS_NOTPRESENT) continue; if (!device_is_enabled(child)) continue; /* * Free resources which we were holding on behalf of * the device. */ STAILQ_FOREACH(rle, &idev->id_resources, link) { if (rle->res) resource_list_release(rl, dev, child, rle->res); } if (TAILQ_FIRST(&idev->id_configs)) if (!isa_assign_resources(child)) continue; device_probe_and_attach(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } free(children, M_TEMP); } static int isa_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= ISA_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= ISA_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= ISA_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= ISA_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); return (0); } static struct resource_list * isa_get_resource_list (device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (!rl) return (NULL); return (rl); } static int isa_add_config(device_t dev, device_t child, int priority, struct isa_config *config) { struct isa_device* idev = DEVTOISA(child); struct isa_config_entry *newice, *ice; newice = malloc(sizeof *ice, M_DEVBUF, M_NOWAIT); if (!newice) return (ENOMEM); newice->ice_priority = priority; newice->ice_config = *config; TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { if (ice->ice_priority > priority) break; } if (ice) TAILQ_INSERT_BEFORE(ice, newice, ice_link); else TAILQ_INSERT_TAIL(&idev->id_configs, newice, ice_link); return (0); } static void isa_set_config_callback(device_t dev, device_t child, isa_config_cb *fn, void *arg) { struct isa_device* idev = DEVTOISA(child); idev->id_config_cb = fn; idev->id_config_arg = arg; } static int isa_pnp_probe(device_t dev, device_t child, struct isa_pnp_id *ids) { struct isa_device* idev = DEVTOISA(child); if (!idev->id_vendorid) return (ENOENT); while (ids && ids->ip_id) { /* * Really ought to support >1 compat id per device. */ if (idev->id_logicalid == ids->ip_id || idev->id_compatid == ids->ip_id) { if (ids->ip_desc) device_set_desc(child, ids->ip_desc); return (0); } ids++; } return (ENXIO); } static int isa_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) sbuf_printf(sb, "pnpid=%s", pnp_eisaformat(idev->id_vendorid)); return (0); } static int isa_child_location(device_t bus, device_t child, struct sbuf *sb) { #if 0 /* id_pnphandle isn't there yet */ struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) sbuf_printf(sbuf, "pnphandle=%d", idev->id_pnphandle); #endif return (0); } static device_method_t isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isa_probe), DEVMETHOD(device_attach, isa_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, isa_add_child), DEVMETHOD(bus_child_deleted, isa_child_deleted), DEVMETHOD(bus_print_child, isa_print_child), DEVMETHOD(bus_probe_nomatch, isa_probe_nomatch), DEVMETHOD(bus_read_ivar, isa_read_ivar), DEVMETHOD(bus_write_ivar, isa_write_ivar), DEVMETHOD(bus_child_detached, isa_child_detached), DEVMETHOD(bus_driver_added, isa_driver_added), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_resource_list,isa_get_resource_list), DEVMETHOD(bus_alloc_resource, isa_alloc_resource), DEVMETHOD(bus_release_resource, isa_release_resource), DEVMETHOD(bus_set_resource, isa_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_child_pnpinfo, isa_child_pnpinfo), DEVMETHOD(bus_child_location, isa_child_location), DEVMETHOD(bus_hinted_child, isa_hinted_child), DEVMETHOD(bus_hint_device_unit, isa_hint_device_unit), /* ISA interface */ DEVMETHOD(isa_add_config, isa_add_config), DEVMETHOD(isa_set_config_callback, isa_set_config_callback), DEVMETHOD(isa_pnp_probe, isa_pnp_probe), { 0, 0 } }; DEFINE_CLASS_0(isa, isa_driver, isa_methods, 0); /* * ISA can be attached to a PCI-ISA bridge, or other locations on some * platforms. */ DRIVER_MODULE(isa, isab, isa_driver, 0, 0); DRIVER_MODULE(isa, eisab, isa_driver, 0, 0); MODULE_VERSION(isa, 1); /* * Code common to ISA bridges. */ int isab_attach(device_t dev) { device_t child; child = device_add_child(dev, "isa", 0); if (child != NULL) return (bus_generic_attach(dev)); return (ENXIO); } char * pnp_eisaformat(uint32_t id) { uint8_t *data; static char idbuf[8]; const char hextoascii[] = "0123456789abcdef"; id = htole32(id); data = (uint8_t *)&id; idbuf[0] = '@' + ((data[0] & 0x7c) >> 2); idbuf[1] = '@' + (((data[0] & 0x3) << 3) + ((data[1] & 0xe0) >> 5)); idbuf[2] = '@' + (data[1] & 0x1f); idbuf[3] = hextoascii[(data[2] >> 4)]; idbuf[4] = hextoascii[(data[2] & 0xf)]; idbuf[5] = hextoascii[(data[3] >> 4)]; idbuf[6] = hextoascii[(data[3] & 0xf)]; idbuf[7] = 0; return(idbuf); } diff --git a/sys/isa/vga_isa.c b/sys/isa/vga_isa.c index e80ec2cee81a..f42845745873 100644 --- a/sys/isa/vga_isa.c +++ b/sys/isa/vga_isa.c @@ -1,313 +1,313 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_vga.h" #include "opt_fb.h" #include "opt_syscons.h" /* should be removed in the future, XXX */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #endif #include #include #include #include #define VGA_ID 0x0009d041 /* PNP0900 */ static struct isa_pnp_id vga_ids[] = { { VGA_ID, NULL }, /* PNP0900 */ { 0, NULL }, }; static void vga_suspend(device_t dev) { vga_softc_t *sc; int nbytes; sc = device_get_softc(dev); /* Save the video state across the suspend. */ if (sc->state_buf != NULL) goto save_palette; nbytes = vidd_save_state(sc->adp, NULL, 0); if (nbytes <= 0) goto save_palette; sc->state_buf = malloc(nbytes, M_TEMP, M_NOWAIT); if (sc->state_buf == NULL) goto save_palette; if (bootverbose) device_printf(dev, "saving %d bytes of video state\n", nbytes); if (vidd_save_state(sc->adp, sc->state_buf, nbytes) != 0) { device_printf(dev, "failed to save state (nbytes=%d)\n", nbytes); free(sc->state_buf, M_TEMP); sc->state_buf = NULL; } save_palette: /* Save the color palette across the suspend. */ if (sc->pal_buf != NULL) return; sc->pal_buf = malloc(256 * 3, M_TEMP, M_NOWAIT); if (sc->pal_buf == NULL) return; if (bootverbose) device_printf(dev, "saving color palette\n"); if (vidd_save_palette(sc->adp, sc->pal_buf) != 0) { device_printf(dev, "failed to save palette\n"); free(sc->pal_buf, M_TEMP); sc->pal_buf = NULL; } } static void vga_resume(device_t dev) { vga_softc_t *sc; sc = device_get_softc(dev); if (sc->state_buf != NULL) { if (vidd_load_state(sc->adp, sc->state_buf) != 0) device_printf(dev, "failed to reload state\n"); free(sc->state_buf, M_TEMP); sc->state_buf = NULL; } if (sc->pal_buf != NULL) { if (vidd_load_palette(sc->adp, sc->pal_buf) != 0) device_printf(dev, "failed to reload palette\n"); free(sc->pal_buf, M_TEMP); sc->pal_buf = NULL; } } static void isavga_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, VGA_DRIVER_NAME, 0); } static int isavga_probe(device_t dev) { video_adapter_t adp; int error; /* No pnp support */ if (isa_get_vendorid(dev)) return (ENXIO); error = vga_probe_unit(device_get_unit(dev), &adp, device_get_flags(dev)); if (error == 0) { device_set_desc(dev, "Generic ISA VGA"); bus_set_resource(dev, SYS_RES_IOPORT, 0, adp.va_io_base, adp.va_io_size); bus_set_resource(dev, SYS_RES_MEMORY, 0, adp.va_mem_base, adp.va_mem_size); isa_set_vendorid(dev, VGA_ID); isa_set_logicalid(dev, VGA_ID); #if 0 isa_set_port(dev, adp.va_io_base); isa_set_portsize(dev, adp.va_io_size); isa_set_maddr(dev, adp.va_mem_base); isa_set_msize(dev, adp.va_mem_size); #endif } return (error); } static int isavga_attach(device_t dev) { vga_softc_t *sc; int unit; int rid; int error; unit = device_get_unit(dev); sc = device_get_softc(dev); rid = 0; bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE | RF_SHAREABLE); rid = 0; bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_SHAREABLE); error = vga_attach_unit(unit, sc, device_get_flags(dev)); if (error) return (error); if (0 && bootverbose) vidd_diag(sc->adp, bootverbose); #if 0 /* experimental */ device_add_child(dev, "fb", DEVICE_UNIT_ANY); bus_generic_attach(dev); #endif return (0); } static int isavga_suspend(device_t dev) { int error; error = bus_generic_suspend(dev); if (error != 0) return (error); vga_suspend(dev); return (error); } static int isavga_resume(device_t dev) { vga_resume(dev); return (bus_generic_resume(dev)); } static device_method_t isavga_methods[] = { DEVMETHOD(device_identify, isavga_identify), DEVMETHOD(device_probe, isavga_probe), DEVMETHOD(device_attach, isavga_attach), DEVMETHOD(device_suspend, isavga_suspend), DEVMETHOD(device_resume, isavga_resume), DEVMETHOD_END }; static driver_t isavga_driver = { VGA_DRIVER_NAME, isavga_methods, sizeof(vga_softc_t), }; DRIVER_MODULE(vga, isa, isavga_driver, 0, 0); static void vgapm_identify(driver_t *driver, device_t parent) { if (device_get_flags(parent) != 0) device_add_child(parent, "vgapm", 0); } static int vgapm_probe(device_t dev) { device_set_desc(dev, "VGA suspend/resume"); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int vgapm_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int vgapm_suspend(device_t dev) { device_t vga_dev; int error; error = bus_generic_suspend(dev); if (error != 0) return (error); vga_dev = devclass_get_device(devclass_find(VGA_DRIVER_NAME), 0); if (vga_dev == NULL) return (0); vga_suspend(vga_dev); return (0); } static int vgapm_resume(device_t dev) { device_t vga_dev; vga_dev = devclass_get_device(devclass_find(VGA_DRIVER_NAME), 0); if (vga_dev != NULL) vga_resume(vga_dev); return (bus_generic_resume(dev)); } static device_method_t vgapm_methods[] = { DEVMETHOD(device_identify, vgapm_identify), DEVMETHOD(device_probe, vgapm_probe), DEVMETHOD(device_attach, vgapm_attach), DEVMETHOD(device_suspend, vgapm_suspend), DEVMETHOD(device_resume, vgapm_resume), { 0, 0 } }; static driver_t vgapm_driver = { "vgapm", vgapm_methods, 0 }; DRIVER_MODULE(vgapm, vgapci, vgapm_driver, 0, 0); ISA_PNP_INFO(vga_ids); diff --git a/sys/powerpc/mambo/mambo.c b/sys/powerpc/mambo/mambo.c index 99228f81145d..b00375684717 100644 --- a/sys/powerpc/mambo/mambo.c +++ b/sys/powerpc/mambo/mambo.c @@ -1,95 +1,95 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 2008 by Nathan Whitehorn. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include /* * Mambo interface */ static int mambobus_probe(device_t); static int mambobus_attach(device_t); static device_method_t mambobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mambobus_probe), DEVMETHOD(device_attach, mambobus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), DEVMETHOD_END }; static driver_t mambobus_driver = { "mambo", mambobus_methods, 0 }; DRIVER_MODULE(mambo, ofwbus, mambobus_driver, 0, 0); static int mambobus_probe(device_t dev) { const char *name = ofw_bus_get_name(dev); if (name && !strcmp(name, "mambo")) { device_set_desc(dev, "Mambo Simulator"); return (0); } return (ENXIO); } static int mambobus_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); return (bus_generic_attach(dev)); } diff --git a/sys/powerpc/powerpc/nexus.c b/sys/powerpc/powerpc/nexus.c index 2f7b2ba055ff..be94583fd41e 100644 --- a/sys/powerpc/powerpc/nexus.c +++ b/sys/powerpc/powerpc/nexus.c @@ -1,321 +1,321 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * Copyright 2001 by Thomas Moestl . * Copyright 2006 by Marius Strobl . * All rights reserved. * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/nexus.c,v 1.43 2001/02/09 */ /* * This code implements a `root nexus' for Power ISA Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct rman intr_rman; static struct rman mem_rman; static device_probe_t nexus_probe; static device_attach_t nexus_attach; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; static ofw_bus_map_intr_t nexus_ofw_map_intr; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_activate_resource, bus_generic_rman_activate_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rman_alloc_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), /* ofw_bus interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), DEVMETHOD_END }; DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1); EARLY_DRIVER_MODULE(nexus, root, nexus_driver, 0, 0, BUS_PASS_BUS); MODULE_VERSION(nexus, 1); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { intr_rman.rm_type = RMAN_ARRAY; intr_rman.rm_descr = "Interrupts"; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&intr_rman) != 0 || rman_init(&mem_rman) != 0 || rman_manage_region(&intr_rman, 0, ~0) != 0 || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR) != 0) panic("%s: failed to set up rmans.", __func__); /* Add ofwbus0. */ device_add_child(dev, "ofwbus", 0); /* Now, probe children. */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int nexus_setup_intr(device_t bus __unused, device_t child, struct resource *r, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error, domain; if (r == NULL) panic("%s: NULL interrupt resource!", __func__); if (cookiep != NULL) *cookiep = NULL; if ((rman_get_flags(r) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(r); if (error) return (error); if (bus_get_domain(child, &domain) != 0) { if(bootverbose) device_printf(child, "no domain found\n"); domain = 0; } error = powerpc_setup_intr(device_get_nameunit(child), rman_get_start(r), filt, intr, arg, flags, cookiep, domain); return (error); } static int nexus_teardown_intr(device_t bus __unused, device_t child __unused, struct resource *r, void *ih) { if (r == NULL) return (EINVAL); return (powerpc_teardown_intr(ih)); } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { #if BYTE_ORDER == LITTLE_ENDIAN return(&bs_le_tag); #else return(&bs_be_tag); #endif } #ifdef SMP static int nexus_bind_intr(device_t bus __unused, device_t child __unused, struct resource *r, int cpu) { return (powerpc_bind_intr(rman_get_start(r), cpu)); } #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (powerpc_config_intr(irq, trig, pol)); } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *irq) { u_int intr = MAP_IRQ(iparent, irq[0]); if (icells > 1) powerpc_fw_config_intr(intr, irq[1]); return (intr); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&intr_rman); case SYS_RES_MEMORY: return (&mem_rman); default: return (NULL); } } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); /* * If this is a memory resource, map it into the kernel. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: panic("%s:%d SYS_RES_IOPORT handling not implemented", __func__, __LINE__); /* XXX: untested map->r_bushandle = start; if ((rman_get_flags(r) & RF_LITTLEENDIAN) != 0) map->r_bustag = &bs_le_tag; else map->r_bustag = nexus_get_bus_tag(NULL, NULL); map->r_size = length; map->r_vaddr = NULL; */ break; case SYS_RES_MEMORY: map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); if ((rman_get_flags(r) & RF_LITTLEENDIAN) != 0) map->r_bustag = &bs_le_tag; else map->r_bustag = nexus_get_bus_tag(NULL, NULL); map->r_size = length; map->r_bushandle = (bus_space_handle_t)map->r_vaddr; break; } return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); /* FALLTHROUGH */ case SYS_RES_IOPORT: break; default: return (EINVAL); } return (0); } diff --git a/sys/riscv/riscv/nexus.c b/sys/riscv/riscv/nexus.c index b8b4eb6604ae..b018a9969bfa 100644 --- a/sys/riscv/riscv/nexus.c +++ b/sys/riscv/riscv/nexus.c @@ -1,431 +1,431 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for RISC-V Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and I/O memory address space. */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #include "ofw_bus_if.h" #endif extern struct bus_space memmap_bus; static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman mem_rman; static struct rman irq_rman; static device_probe_t nexus_fdt_probe; static device_attach_t nexus_attach; static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_activate_resource_t nexus_activate_resource; static bus_alloc_resource_t nexus_alloc_resource; static bus_deactivate_resource_t nexus_deactivate_resource; static bus_get_resource_list_t nexus_get_reslist; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_bus_tag_t nexus_get_bus_tag; static ofw_bus_map_intr_t nexus_ofw_map_intr; static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_attach), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), DEVMETHOD_END }; static driver_t nexus_fdt_driver = { "nexus", nexus_methods, 1 /* no softc */ }; EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); static int nexus_fdt_probe(device_t dev) { device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); /* * Add direct children of nexus. Devices will be probed and attached * through ofwbus0. */ nexus_add_child(dev, 0, "timer", 0); nexus_add_child(dev, 1, "rcons", 0); nexus_add_child(dev, 2, "ofwbus", 0); - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_MEMORY: case SYS_RES_IOPORT: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource_list_entry *rle; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return (NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (EOPNOTSUPP); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error; if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(res); if (error != 0) return (error); error = intr_setup_irq(child, res, filt, intr, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_teardown_irq(child, r, ih)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe_irq(child, irq, cookie, descr)); } static bus_space_tag_t nexus_get_bus_tag(device_t bus __unused, device_t child __unused) { return (&memmap_bus); } static int nexus_activate_resource(device_t bus, device_t child, struct resource *r) { int error; switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = bus_generic_rman_activate_resource(bus, child, r); break; case SYS_RES_IRQ: error = rman_activate_resource(r); if (error != 0) return (error); error = intr_activate_irq(child, r); if (error != 0) { rman_deactivate_resource(r); return (error); } break; default: error = EINVAL; break; } return (error); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_deactivate_resource(device_t bus, device_t child, struct resource *r) { int error; switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = bus_generic_rman_deactivate_resource(bus, child, r); break; case SYS_RES_IRQ: error = rman_deactivate_resource(r); if (error != 0) return (error); intr_deactivate_irq(child, r); break; default: error = EINVAL; break; } return (error); } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error; /* Resources must be active to be mapped. */ if ((rman_get_flags(r) & RF_ACTIVE) == 0) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ switch (rman_get_type(r)) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); map->r_vaddr = pmap_mapdev(start, length); map->r_bustag = &memmap_bus; map->r_size = length; /* * The handle is the virtual address. */ map->r_bushandle = (bus_space_handle_t)map->r_vaddr; return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { switch (rman_get_type(r)) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: pmap_unmapdev(map->r_vaddr, map->r_size); return (0); default: return (EINVAL); } } static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { struct intr_map_data_fdt *fdt_data; size_t len; u_int irq; len = sizeof(*fdt_data) + icells * sizeof(pcell_t); fdt_data = (struct intr_map_data_fdt *)intr_alloc_map_data( INTR_MAP_DATA_FDT, len, M_WAITOK | M_ZERO); fdt_data->iparent = iparent; fdt_data->ncells = icells; memcpy(fdt_data->cells, intr, icells * sizeof(pcell_t)); irq = intr_map_irq(NULL, iparent, (struct intr_map_data *)fdt_data); return (irq); } diff --git a/sys/x86/x86/legacy.c b/sys/x86/x86/legacy.c index 23439a4b549f..f8f3517238f0 100644 --- a/sys/x86/x86/legacy.c +++ b/sys/x86/x86/legacy.c @@ -1,403 +1,403 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * This code implements a system driver for legacy systems that do not * support ACPI or when ACPI support is not present in the kernel. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_LEGACYDEV, "legacydrv", "legacy system device"); struct legacy_device { int lg_pcibus; int lg_pcislot; int lg_pcifunc; }; #define DEVTOAT(dev) ((struct legacy_device *)device_get_ivars(dev)) static int legacy_probe(device_t); static int legacy_attach(device_t); static int legacy_print_child(device_t, device_t); static device_t legacy_add_child(device_t bus, u_int order, const char *name, int unit); static int legacy_read_ivar(device_t, device_t, int, uintptr_t *); static int legacy_write_ivar(device_t, device_t, int, uintptr_t); static device_method_t legacy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, legacy_probe), DEVMETHOD(device_attach, legacy_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, legacy_print_child), DEVMETHOD(bus_add_child, legacy_add_child), DEVMETHOD(bus_read_ivar, legacy_read_ivar), DEVMETHOD(bus_write_ivar, legacy_write_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t legacy_driver = { "legacy", legacy_methods, 1, /* no softc */ }; DRIVER_MODULE(legacy, nexus, legacy_driver, 0, 0); static int legacy_probe(device_t dev) { device_set_desc(dev, "legacy system"); device_quiet(dev); return (0); } /* * Grope around in the PCI config space to see if this is a chipset * that is capable of doing memory-mapped config cycles. This also * implies that it can do PCIe extended config cycles. */ static void legacy_pci_cfgregopen(device_t dev) { uint64_t pciebar; u_int16_t did, vid; if (cfgmech == CFGMECH_NONE || cfgmech == CFGMECH_PCIE) return; /* Check for supported chipsets */ vid = pci_cfgregread(0, 0, 0, 0, PCIR_VENDOR, 2); did = pci_cfgregread(0, 0, 0, 0, PCIR_DEVICE, 2); switch (vid) { case 0x8086: switch (did) { case 0x3590: case 0x3592: /* Intel 7520 or 7320 */ pciebar = pci_cfgregread(0, 0, 0, 0, 0xce, 2) << 16; pcie_cfgregopen(pciebar, 0, 0, 255); break; case 0x2580: case 0x2584: case 0x2590: /* Intel 915, 925, or 915GM */ pciebar = pci_cfgregread(0, 0, 0, 0, 0x48, 4); pcie_cfgregopen(pciebar, 0, 0, 255); break; } } if (bootverbose && cfgmech == CFGMECH_PCIE) device_printf(dev, "Enabled ECAM PCIe accesses\n"); } static int legacy_attach(device_t dev) { device_t child; legacy_pci_cfgregopen(dev); /* * Let our child drivers identify any child devices that they * can find. Once that is done attach any devices that we * found. */ - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); /* * If we didn't see ISA on a PCI bridge, add a top-level bus. */ if (!devclass_get_device(devclass_find("isa"), 0)) { child = BUS_ADD_CHILD(dev, 0, "isa", 0); if (child == NULL) panic("legacy_attach isa"); device_probe_and_attach(child); } return 0; } static int legacy_print_child(device_t bus, device_t child) { struct legacy_device *atdev = DEVTOAT(child); int retval = 0; retval += bus_print_child_header(bus, child); if (atdev->lg_pcibus != -1) retval += printf(" pcibus %d", atdev->lg_pcibus); retval += printf("\n"); return (retval); } static device_t legacy_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct legacy_device *atdev; atdev = malloc(sizeof(struct legacy_device), M_LEGACYDEV, M_NOWAIT | M_ZERO); if (atdev == NULL) return(NULL); atdev->lg_pcibus = -1; atdev->lg_pcislot = -1; atdev->lg_pcifunc = -1; child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) free(atdev, M_LEGACYDEV); else /* should we free this in legacy_child_detached? */ device_set_ivars(child, atdev); return (child); } static int legacy_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct legacy_device *atdev = DEVTOAT(child); switch (which) { case LEGACY_IVAR_PCIDOMAIN: *result = 0; break; case LEGACY_IVAR_PCIBUS: *result = atdev->lg_pcibus; break; case LEGACY_IVAR_PCISLOT: *result = atdev->lg_pcislot; break; case LEGACY_IVAR_PCIFUNC: *result = atdev->lg_pcifunc; break; default: return ENOENT; } return 0; } static int legacy_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct legacy_device *atdev = DEVTOAT(child); switch (which) { case LEGACY_IVAR_PCIDOMAIN: return EINVAL; case LEGACY_IVAR_PCIBUS: atdev->lg_pcibus = value; break; case LEGACY_IVAR_PCISLOT: atdev->lg_pcislot = value; break; case LEGACY_IVAR_PCIFUNC: atdev->lg_pcifunc = value; break; default: return ENOENT; } return 0; } /* * Legacy CPU attachment when ACPI is not available. Drivers like * cpufreq(4) hang off this. */ static void cpu_identify(driver_t *driver, device_t parent); static device_probe_t cpu_probe; static device_attach_t cpu_attach; static int cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static device_t cpu_add_child(device_t bus, u_int order, const char *name, int unit); static struct resource_list *cpu_get_rlist(device_t dev, device_t child); struct cpu_device { struct resource_list cd_rl; struct pcpu *cd_pcpu; }; static device_method_t cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, cpu_identify), DEVMETHOD(device_probe, cpu_probe), DEVMETHOD(device_attach, cpu_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, cpu_add_child), DEVMETHOD(bus_read_ivar, cpu_read_ivar), DEVMETHOD(bus_get_resource_list, cpu_get_rlist), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t cpu_driver = { "cpu", cpu_methods, 1, /* no softc */ }; DRIVER_MODULE(cpu, legacy, cpu_driver, 0, 0); static void cpu_identify(driver_t *driver, device_t parent) { device_t child; int i; /* * Attach a cpuX device for each CPU. We use an order of 150 * so that these devices are attached after the Host-PCI * bridges (which are added at order 100). */ CPU_FOREACH(i) { child = BUS_ADD_CHILD(parent, 150, "cpu", i); if (child == NULL) panic("legacy_attach cpu"); } } static int cpu_probe(device_t dev) { device_set_desc(dev, "legacy CPU"); return (BUS_PROBE_DEFAULT); } static int cpu_attach(device_t dev) { - bus_generic_probe(dev); + bus_identify_children(dev); bus_generic_attach(dev); return (0); } static device_t cpu_add_child(device_t bus, u_int order, const char *name, int unit) { struct cpu_device *cd; device_t child; struct pcpu *pc; if ((cd = malloc(sizeof(*cd), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&cd->cd_rl); pc = pcpu_find(device_get_unit(bus)); cd->cd_pcpu = pc; child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) { pc->pc_device = child; device_set_ivars(child, cd); } else free(cd, M_DEVBUF); return (child); } static struct resource_list * cpu_get_rlist(device_t dev, device_t child) { struct cpu_device *cpdev; cpdev = device_get_ivars(child); return (&cpdev->cd_rl); } static int cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct cpu_device *cpdev; switch (index) { case CPU_IVAR_PCPU: cpdev = device_get_ivars(child); *result = (uintptr_t)cpdev->cd_pcpu; break; case CPU_IVAR_NOMINAL_MHZ: if (tsc_is_invariant) { *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000); break; } /* FALLTHROUGH */ default: return (ENOENT); } return (0); } diff --git a/sys/x86/x86/nexus.c b/sys/x86/x86/nexus.c index d7bb0292157e..0fe8e80147ca 100644 --- a/sys/x86/x86/nexus.c +++ b/sys/x86/x86/nexus.c @@ -1,776 +1,776 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * This code implements a `root nexus' for Intel Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #ifdef __amd64__ #define DEV_APIC #else #include "opt_apic.h" #endif #include "opt_isa.h" #include "opt_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #endif #ifdef DEV_ISA #include #include #endif #define ELF_KERN_STR ("elf"__XSTRING(__ELF_WORD_SIZE)" kernel") static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) struct rman irq_rman, drq_rman, port_rman, mem_rman; static int nexus_print_all_resources(device_t dev); static device_probe_t nexus_probe; static device_attach_t nexus_attach; static bus_add_child_t nexus_add_child; static bus_print_child_t nexus_print_child; static bus_alloc_resource_t nexus_alloc_resource; static bus_get_resource_list_t nexus_get_reslist; static bus_get_rman_t nexus_get_rman; static bus_map_resource_t nexus_map_resource; static bus_unmap_resource_t nexus_unmap_resource; #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif static bus_config_intr_t nexus_config_intr; static bus_describe_intr_t nexus_describe_intr; static bus_resume_intr_t nexus_resume_intr; static bus_setup_intr_t nexus_setup_intr; static bus_suspend_intr_t nexus_suspend_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_get_cpus_t nexus_get_cpus; #if defined(DEV_APIC) && defined(DEV_PCI) static pcib_alloc_msi_t nexus_alloc_msi; static pcib_release_msi_t nexus_release_msi; static pcib_alloc_msix_t nexus_alloc_msix; static pcib_release_msix_t nexus_release_msix; static pcib_map_msi_t nexus_map_msi; #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_activate_resource, bus_generic_rman_activate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_rman_adjust_resource), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_get_rman, nexus_get_rman), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_map_resource, nexus_map_resource), DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_unmap_resource, nexus_unmap_resource), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_resume_intr, nexus_resume_intr), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_suspend_intr, nexus_suspend_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_get_cpus, nexus_get_cpus), /* pcib interface */ #if defined(DEV_APIC) && defined(DEV_PCI) DEVMETHOD(pcib_alloc_msi, nexus_alloc_msi), DEVMETHOD(pcib_release_msi, nexus_release_msi), DEVMETHOD(pcib_alloc_msix, nexus_alloc_msix), DEVMETHOD(pcib_release_msix, nexus_release_msix), DEVMETHOD(pcib_map_msi, nexus_map_msi), #endif DEVMETHOD_END }; DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1); DRIVER_MODULE(nexus, root, nexus_driver, 0, 0); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_GENERIC); } void nexus_init_resources(void) { int irq; /* * XXX working notes: * * - IRQ resource creation should be moved to the PIC/APIC driver. * - DRQ resource creation should be moved to the DMAC driver. * - The above should be sorted to probe earlier than any child buses. * * - Leave I/O and memory creation here, as child probes may need them. * (especially eg. ACPI) */ /* * IRQ's are on the mainboard on old systems, but on the ISA part * of PCI->ISA bridges. There would be multiple sets of IRQs on * multi-ISA-bus systems. PCI interrupts are routed to the ISA * component, so in a way, PCI can be a partial child of an ISA bus(!). * APIC interrupts are global though. */ irq_rman.rm_start = 0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt request lines"; irq_rman.rm_end = num_io_irqs - 1; if (rman_init(&irq_rman)) panic("nexus_init_resources irq_rman"); /* * We search for regions of existing IRQs and add those to the IRQ * resource manager. */ for (irq = 0; irq < num_io_irqs; irq++) if (intr_lookup_source(irq) != NULL) if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("nexus_init_resources irq_rman add"); /* * ISA DMA on PCI systems is implemented in the ISA part of each * PCI->ISA bridge and the channels can be duplicated if there are * multiple bridges. (eg: laptops with docking stations) */ drq_rman.rm_start = 0; drq_rman.rm_end = 7; drq_rman.rm_type = RMAN_ARRAY; drq_rman.rm_descr = "DMA request lines"; /* XXX drq 0 not available on some machines */ if (rman_init(&drq_rman) || rman_manage_region(&drq_rman, drq_rman.rm_start, drq_rman.rm_end)) panic("nexus_init_resources drq_rman"); /* * However, IO ports and Memory truely are global at this level, * as are APIC interrupts (however many IO APICS there turn out * to be on large systems..) */ port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff)) panic("nexus_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = cpu_getmaxphyaddr(); mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, mem_rman.rm_end)) panic("nexus_init_resources mem_rman"); } static int nexus_attach(device_t dev) { nexus_init_resources(); - bus_generic_probe(dev); + bus_identify_children(dev); /* * Explicitly add the legacy0 device here. Other platform * types (such as ACPI), use their own nexus(4) subclass * driver to override this routine and add their own root bus. */ if (BUS_ADD_CHILD(dev, 10, "legacy", 0) == NULL) panic("legacy: could not attach"); bus_generic_attach(dev); return (0); } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); return (retval); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf("\n"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } static struct rman * nexus_get_rman(device_t bus, int type, u_int flags) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_DRQ: return (&drq_rman); case SYS_RES_IOPORT: return (&port_rman); case SYS_RES_MEMORY: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource_list_entry *rle; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return (NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); start = rle->start; end = rle->end; count = rle->count; } return (bus_generic_rman_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int nexus_map_resource(device_t bus, device_t child, struct resource *r, struct resource_map_request *argsp, struct resource_map *map) { struct resource_map_request args; rman_res_t length, start; int error, type; /* Resources must be active to be mapped. */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (ENXIO); /* Mappings are only supported on I/O and memory resources. */ type = rman_get_type(r); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: break; default: return (EINVAL); } resource_init_map_request(&args); error = resource_validate_map_request(r, argsp, &args, &start, &length); if (error) return (error); /* * If this is a memory resource, map it into the kernel. */ switch (type) { case SYS_RES_IOPORT: map->r_bushandle = start; map->r_bustag = X86_BUS_SPACE_IO; map->r_size = length; map->r_vaddr = NULL; break; case SYS_RES_MEMORY: map->r_vaddr = pmap_mapdev_attr(start, length, args.memattr); map->r_bustag = X86_BUS_SPACE_MEM; map->r_size = length; /* * The handle is the virtual address. */ map->r_bushandle = (bus_space_handle_t)map->r_vaddr; break; } return (0); } static int nexus_unmap_resource(device_t bus, device_t child, struct resource *r, struct resource_map *map) { /* * If this is a memory resource, unmap it. */ switch (rman_get_type(r)) { case SYS_RES_MEMORY: pmap_unmapdev(map->r_vaddr, map->r_size); /* FALLTHROUGH */ case SYS_RES_IOPORT: break; default: return (EINVAL); } return (0); } /* * Currently this uses the really grody interface from kern/kern_intr.c * (which really doesn't belong in kern/anything.c). Eventually, all of * the code in kern_intr.c and machdep_intr.c should get moved here, since * this is going to be the official interface. */ static int nexus_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t filter, void (*ihand)(void *), void *arg, void **cookiep) { int error, domain; struct intsrc *isrc; /* somebody tried to setup an irq that failed to allocate! */ if (irq == NULL) panic("nexus_setup_intr: NULL irq resource!"); *cookiep = NULL; if ((rman_get_flags(irq) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* * We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(irq); if (error != 0) return (error); if (bus_get_domain(child, &domain) != 0) domain = 0; isrc = intr_lookup_source(rman_get_start(irq)); if (isrc == NULL) return (EINVAL); error = intr_add_handler(isrc, device_get_nameunit(child), filter, ihand, arg, flags, cookiep, domain); if (error == 0) rman_set_irq_cookie(irq, *cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { int error; error = intr_remove_handler(ih); if (error == 0) rman_set_irq_cookie(r, NULL); return (error); } static int nexus_suspend_intr(device_t dev, device_t child, struct resource *irq) { return (intr_event_suspend_handler(rman_get_irq_cookie(irq))); } static int nexus_resume_intr(device_t dev, device_t child, struct resource *irq) { return (intr_event_resume_handler(rman_get_irq_cookie(irq))); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct intsrc *isrc; isrc = intr_lookup_source(rman_get_start(irq)); if (isrc == NULL) return (EINVAL); return (intr_event_bind(isrc->is_event, cpu)); } #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(irq); if (isrc == NULL) return (EINVAL); return (intr_config_intr(isrc, trig, pol)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct intsrc *isrc; isrc = intr_lookup_source(rman_get_start(irq)); if (isrc == NULL) return (EINVAL); return (intr_describe(isrc, cookie, descr)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { #ifdef SMP case INTR_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = intr_cpus; return (0); #endif default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* Called from the MSI code to add new IRQs to the IRQ rman. */ void nexus_add_irq(u_long irq) { if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("%s: failed", __func__); } #if defined(DEV_APIC) && defined(DEV_PCI) static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq) { return (msix_alloc(dev, irq)); } static int nexus_release_msix(device_t pcib, device_t dev, int irq) { return (msix_release(irq)); } static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (msi_alloc(dev, count, maxcount, irqs)); } static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (msi_release(irqs, count)); } static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (msi_map(irq, addr, data)); } #endif /* DEV_APIC && DEV_PCI */ /* Placeholder for system RAM. */ static void ram_identify(driver_t *driver, device_t parent) { if (resource_disabled("ram", 0)) return; if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL) panic("ram_identify"); } static int ram_probe(device_t dev) { device_quiet(dev); device_set_desc(dev, "System RAM"); return (0); } static int ram_attach(device_t dev) { struct bios_smap *smapbase, *smap, *smapend; struct resource *res; rman_res_t length; vm_paddr_t *p; caddr_t kmdp; uint32_t smapsize; int error, rid; /* Retrieve the system memory map from the loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type(ELF_KERN_STR); smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (smapbase != NULL) { smapsize = *((u_int32_t *)smapbase - 1); smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); rid = 0; for (smap = smapbase; smap < smapend; smap++) { if (smap->type != SMAP_TYPE_MEMORY || smap->length == 0) continue; if (smap->base > mem_rman.rm_end) continue; length = smap->base + smap->length > mem_rman.rm_end ? mem_rman.rm_end - smap->base : smap->length; error = bus_set_resource(dev, SYS_RES_MEMORY, rid, smap->base, length); if (error) panic( "ram_attach: resource %d failed set with %d", rid, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", rid); rid++; } return (0); } /* * If the system map is not available, fall back to using * dump_avail[]. We use the dump_avail[] array rather than * phys_avail[] for the memory map as phys_avail[] contains * holes for kernel memory, page 0, the message buffer, and * the dcons buffer. We test the end address in the loop * instead of the start since the start address for the first * segment is 0. */ for (rid = 0, p = dump_avail; p[1] != 0; rid++, p += 2) { if (p[0] > mem_rman.rm_end) break; length = (p[1] > mem_rman.rm_end ? mem_rman.rm_end : p[1]) - p[0]; error = bus_set_resource(dev, SYS_RES_MEMORY, rid, p[0], length); if (error) panic("ram_attach: resource %d failed set with %d", rid, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", rid); } return (0); } static device_method_t ram_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ram_identify), DEVMETHOD(device_probe, ram_probe), DEVMETHOD(device_attach, ram_attach), DEVMETHOD_END }; static driver_t ram_driver = { "ram", ram_methods, 1, /* no softc */ }; DRIVER_MODULE(ram, nexus, ram_driver, 0, 0); #ifdef DEV_ISA /* * Placeholder which claims PnP 'devices' which describe system * resources. */ static struct isa_pnp_id sysresource_ids[] = { { 0x010cd041 /* PNP0c01 */, "System Memory" }, { 0x020cd041 /* PNP0c02 */, "System Resource" }, { 0 } }; static int sysresource_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, sysresource_ids)) <= 0) { device_quiet(dev); } return (result); } static int sysresource_attach(device_t dev) { return (0); } static device_method_t sysresource_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sysresource_probe), DEVMETHOD(device_attach, sysresource_attach), DEVMETHOD_END }; static driver_t sysresource_driver = { "sysresource", sysresource_methods, 1, /* no softc */ }; DRIVER_MODULE(sysresource, isa, sysresource_driver, 0, 0); ISA_PNP_INFO(sysresource_ids); #endif /* DEV_ISA */