Index: stable/12/sys/arm64/arm64/efirt_machdep.c =================================================================== --- stable/12/sys/arm64/arm64/efirt_machdep.c (revision 362239) +++ stable/12/sys/arm64/arm64/efirt_machdep.c (revision 362240) @@ -1,289 +1,283 @@ /*- * Copyright (c) 2004 Marcel Moolenaar * Copyright (c) 2001 Doug Rabson * Copyright (c) 2016 The FreeBSD Foundation * Copyright (c) 2017 Andrew Turner * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static vm_object_t obj_1t1_pt; static vm_page_t efi_l0_page; static pd_entry_t *efi_l0; static vm_pindex_t efi_1t1_idx; void efi_destroy_1t1_map(void) { vm_page_t m; if (obj_1t1_pt != NULL) { VM_OBJECT_RLOCK(obj_1t1_pt); TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq) m->wire_count = 0; vm_wire_sub(obj_1t1_pt->resident_page_count); VM_OBJECT_RUNLOCK(obj_1t1_pt); vm_object_deallocate(obj_1t1_pt); } obj_1t1_pt = NULL; efi_l0 = NULL; efi_l0_page = NULL; } static vm_page_t efi_1t1_page(void) { return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO)); } static pt_entry_t * efi_1t1_l3(vm_offset_t va) { pd_entry_t *l0, *l1, *l2; pt_entry_t *l3; vm_pindex_t l0_idx, l1_idx, l2_idx; vm_page_t m; vm_paddr_t mphys; l0_idx = pmap_l0_index(va); l0 = &efi_l0[l0_idx]; if (*l0 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); *l0 = mphys | L0_TABLE; } else { mphys = *l0 & ~ATTR_MASK; } l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys); l1_idx = pmap_l1_index(va); l1 += l1_idx; if (*l1 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); *l1 = mphys | L1_TABLE; } else { mphys = *l1 & ~ATTR_MASK; } l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys); l2_idx = pmap_l2_index(va); l2 += l2_idx; if (*l2 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); *l2 = mphys | L2_TABLE; } else { mphys = *l2 & ~ATTR_MASK; } l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys); l3 += pmap_l3_index(va); KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__, va, *l3)); return (l3); } /* * Map a physical address from EFI runtime space into KVA space. Returns 0 to * indicate a failed mapping so that the caller may handle error. */ vm_offset_t efi_phys_to_kva(vm_paddr_t paddr) { if (!PHYS_IN_DMAP(paddr)) return (0); return (PHYS_TO_DMAP(paddr)); } /* * Create the 1:1 virtual to physical map for EFI */ bool efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz) { struct efi_md *p; pt_entry_t *l3, l3_attr; vm_offset_t va; uint64_t idx; int i, mode; obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES + L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES, VM_PROT_ALL, 0, NULL); VM_OBJECT_WLOCK(obj_1t1_pt); efi_1t1_idx = 0; efi_l0_page = efi_1t1_page(); VM_OBJECT_WUNLOCK(obj_1t1_pt); efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page)); bzero(efi_l0, L0_ENTRIES * sizeof(*efi_l0)); for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, descsz)) { if ((p->md_attr & EFI_MD_ATTR_RT) == 0) continue; if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) { if (bootverbose) printf("EFI Runtime entry %d is mapped\n", i); goto fail; } if ((p->md_phys & EFI_PAGE_MASK) != 0) { if (bootverbose) printf("EFI Runtime entry %d is not aligned\n", i); goto fail; } if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys || p->md_phys + p->md_pages * EFI_PAGE_SIZE >= VM_MAXUSER_ADDRESS) { printf("EFI Runtime entry %d is not in mappable for RT:" "base %#016jx %#jx pages\n", i, (uintmax_t)p->md_phys, (uintmax_t)p->md_pages); goto fail; } if ((p->md_attr & EFI_MD_ATTR_WB) != 0) mode = VM_MEMATTR_WRITE_BACK; else if ((p->md_attr & EFI_MD_ATTR_WT) != 0) mode = VM_MEMATTR_WRITE_THROUGH; else if ((p->md_attr & EFI_MD_ATTR_WC) != 0) mode = VM_MEMATTR_WRITE_COMBINING; - else if ((p->md_attr & EFI_MD_ATTR_UC) != 0) + else mode = VM_MEMATTR_DEVICE; - else { - if (bootverbose) - printf("EFI Runtime entry %d mapping " - "attributes unsupported\n", i); - mode = VM_MEMATTR_UNCACHEABLE; - } printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages); l3_attr = ATTR_DEFAULT | ATTR_IDX(mode) | ATTR_AP(ATTR_AP_RW) | L3_PAGE; - if (mode == VM_MEMATTR_DEVICE) + if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP) l3_attr |= ATTR_UXN | ATTR_PXN; VM_OBJECT_WLOCK(obj_1t1_pt); for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++, va += PAGE_SIZE) { l3 = efi_1t1_l3(va); *l3 = va | l3_attr; } VM_OBJECT_WUNLOCK(obj_1t1_pt); } return (true); fail: efi_destroy_1t1_map(); return (false); } int efi_arch_enter(void) { __asm __volatile( "msr ttbr0_el1, %0 \n" "isb \n" "dsb ishst \n" "tlbi vmalle1is \n" "dsb ish \n" "isb \n" : : "r"(VM_PAGE_TO_PHYS(efi_l0_page))); return (0); } void efi_arch_leave(void) { struct thread *td; /* * Restore the pcpu pointer. Some UEFI implementations trash it and * we don't store it before calling into them. To fix this we need * to restore it after returning to the kernel context. As reading * curthread will access x18 we need to restore it before loading * the thread pointer. */ __asm __volatile( "mrs x18, tpidr_el1 \n" ); td = curthread; __asm __volatile( "msr ttbr0_el1, %0 \n" "isb \n" "dsb ishst \n" "tlbi vmalle1is \n" "dsb ish \n" "isb \n" : : "r"(td->td_proc->p_md.md_l0addr)); } int efi_rt_arch_call(struct efirt_callinfo *ec) { panic("not implemented"); } Index: stable/12/sys/dev/ahci/ahci_generic.c =================================================================== --- stable/12/sys/dev/ahci/ahci_generic.c (revision 362239) +++ stable/12/sys/dev/ahci/ahci_generic.c (revision 362240) @@ -1,196 +1,203 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #include #include #endif #ifdef FDT #include #include static struct ofw_compat_data compat_data[] = { {"generic-ahci", 1}, {"snps,dwc-ahci", 1}, {"marvell,armada-3700-ahci", 1}, {NULL, 0} }; static int ahci_fdt_probe(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); node = ofw_bus_get_node(dev); ctlr->dma_coherent = OF_hasprop(node, "dma-coherent"); return (BUS_PROBE_DEFAULT); } #endif #ifdef DEV_ACPI static int ahci_acpi_probe(device_t dev) { + struct ahci_controller *ctlr = device_get_softc(dev); ACPI_HANDLE h; if ((h = acpi_get_handle(dev)) == NULL) return (ENXIO); if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_SATA && pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) { device_set_desc_copy(dev, "AHCI SATA controller"); + if (ACPI_FAILURE(acpi_GetInteger(h, "_CCA", + &ctlr->dma_coherent))) + ctlr->dma_coherent = 0; + if (bootverbose) + device_printf(dev, "Bus is%s cache-coherent\n", + ctlr->dma_coherent ? "" : " not"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } #endif static int ahci_gen_ctlr_reset(device_t dev) { return ahci_ctlr_reset(dev); } static int ahci_gen_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error; ctlr->r_rid = 0; ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE); if (ctlr->r_mem == NULL) return (ENXIO); /* Setup controller defaults. */ ctlr->numirqs = 1; /* Reset controller */ if ((error = ahci_gen_ctlr_reset(dev)) == 0) error = ahci_attach(dev); if (error != 0) { if (ctlr->r_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); } return error; } static int ahci_gen_detach(device_t dev) { ahci_detach(dev); return (0); } #ifdef FDT static devclass_t ahci_gen_fdt_devclass; static device_method_t ahci_fdt_methods[] = { DEVMETHOD(device_probe, ahci_fdt_probe), DEVMETHOD(device_attach, ahci_gen_attach), DEVMETHOD(device_detach, ahci_gen_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_fdt_driver = { "ahci", ahci_fdt_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci_fdt, simplebus, ahci_fdt_driver, ahci_gen_fdt_devclass, NULL, NULL); #endif #ifdef DEV_ACPI static devclass_t ahci_gen_acpi_devclass; static device_method_t ahci_acpi_methods[] = { DEVMETHOD(device_probe, ahci_acpi_probe), DEVMETHOD(device_attach, ahci_gen_attach), DEVMETHOD(device_detach, ahci_gen_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), DEVMETHOD(bus_child_location_str, ahci_child_location_str), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_acpi_driver = { "ahci", ahci_acpi_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci_acpi, acpi, ahci_acpi_driver, ahci_gen_acpi_devclass, NULL, NULL); #endif Index: stable/12/sys/dev/pci/pci_host_generic.c =================================================================== --- stable/12/sys/dev/pci/pci_host_generic.c (revision 362239) +++ stable/12/sys/dev/pci/pci_host_generic.c (revision 362240) @@ -1,471 +1,472 @@ /*- * Copyright (c) 2015 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Assembling ECAM Configuration Address */ #define PCIE_BUS_SHIFT 20 #define PCIE_SLOT_SHIFT 15 #define PCIE_FUNC_SHIFT 12 #define PCIE_BUS_MASK 0xFF #define PCIE_SLOT_MASK 0x1F #define PCIE_FUNC_MASK 0x07 #define PCIE_REG_MASK 0xFFF #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ ((reg) & PCIE_REG_MASK)) /* Forward prototypes */ static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes); static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes); static int generic_pcie_maxslots(device_t dev); static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value); int pci_host_generic_core_attach(device_t dev) { struct generic_pcie_core_softc *sc; int error; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not map memory.\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PCIe Memory"; sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PCIe IO window"; /* Initialize rman and allocate memory regions */ error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } return (0); } static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct generic_pcie_core_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; uint32_t data; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return (~0U); if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return (~0U); offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: data = bus_space_read_1(t, h, offset); break; case 2: data = le16toh(bus_space_read_2(t, h, offset)); break; case 4: data = le32toh(bus_space_read_4(t, h, offset)); break; default: return (~0U); } return (data); } static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int bytes) { struct generic_pcie_core_softc *sc; bus_space_handle_t h; bus_space_tag_t t; uint64_t offset; sc = device_get_softc(dev); if ((bus < sc->bus_start) || (bus > sc->bus_end)) return; if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX)) return; offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); t = sc->bst; h = sc->bsh; switch (bytes) { case 1: bus_space_write_1(t, h, offset, val); break; case 2: bus_space_write_2(t, h, offset, htole16(val)); break; case 4: bus_space_write_4(t, h, offset, htole32(val)); break; default: return; } } static int generic_pcie_maxslots(device_t dev) { return (31); /* max slots per bus acc. to standard */ } static int generic_pcie_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { *result = sc->bus_start; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } static struct rman * generic_pcie_rman(struct generic_pcie_core_softc *sc, int type) { switch (type) { case SYS_RES_IOPORT: return (&sc->io_rman); case SYS_RES_MEMORY: return (&sc->mem_rman); default: break; } return (NULL); } int pci_host_generic_core_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct generic_pcie_core_softc *sc; struct rman *rm; sc = device_get_softc(dev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) { return (pci_domain_release_bus(sc->ecam, child, rid, res)); } #endif rm = generic_pcie_rman(sc, type); if (rm != NULL) { KASSERT(rman_is_region_manager(res, rm), ("rman mismatch")); rman_release_resource(res); } return (bus_generic_release_resource(dev, child, type, rid, res)); } struct resource * pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct generic_pcie_core_softc *sc; struct resource *res; struct rman *rm; sc = device_get_softc(dev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) { return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end, count, flags)); } #endif rm = generic_pcie_rman(sc, type); if (rm == NULL) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); if (bootverbose) { device_printf(dev, "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n", start, end, count); } res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) goto fail; rman_set_rid(res, *rid); if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res)) { rman_release_resource(res); goto fail; } return (res); fail: device_printf(dev, "%s FAIL: type=%d, rid=%d, " "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } static int generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct generic_pcie_core_softc *sc; uint64_t phys_base; uint64_t pci_base; uint64_t size; int found; int res; int i; sc = device_get_softc(dev); if ((res = rman_activate_resource(r)) != 0) return (res); switch (type) { case SYS_RES_IOPORT: + case SYS_RES_MEMORY: found = 0; for (i = 0; i < MAX_RANGES_TUPLES; i++) { pci_base = sc->ranges[i].pci_base; phys_base = sc->ranges[i].phys_base; size = sc->ranges[i].size; - if ((rid > pci_base) && (rid < (pci_base + size))) { + if ((rman_get_start(r) >= pci_base) && (rman_get_start(r) < (pci_base + size))) { found = 1; break; } } if (found) { - rman_set_start(r, rman_get_start(r) + phys_base); - rman_set_end(r, rman_get_end(r) + phys_base); + rman_set_start(r, rman_get_start(r) - pci_base + phys_base); + rman_set_end(r, rman_get_end(r) - pci_base + phys_base); res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); } else { device_printf(dev, - "Failed to activate IOPORT resource\n"); + "Failed to activate %s resource\n", + type == SYS_RES_IOPORT ? "IOPORT" : "MEMORY"); res = 0; } break; - case SYS_RES_MEMORY: case SYS_RES_IRQ: res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); break; default: break; } return (res); } static int generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { int res; if ((res = rman_deactivate_resource(r)) != 0) return (res); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: case SYS_RES_IRQ: res = BUS_DEACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r); break; default: break; } return (res); } static int generic_pcie_adjust_resource(device_t dev, device_t child, int type, struct resource *res, rman_res_t start, rman_res_t end) { struct generic_pcie_core_softc *sc; struct rman *rm; sc = device_get_softc(dev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_adjust_bus(sc->ecam, child, res, start, end)); #endif rm = generic_pcie_rman(sc, type); if (rm != NULL) return (rman_adjust_resource(res, start, end)); return (bus_generic_adjust_resource(dev, child, type, res, start, end)); } static bus_dma_tag_t generic_pcie_get_dma_tag(device_t dev, device_t child) { struct generic_pcie_core_softc *sc; sc = device_get_softc(dev); return (sc->dmat); } static device_method_t generic_pcie_methods[] = { DEVMETHOD(device_attach, pci_host_generic_core_attach), DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), DEVMETHOD(pcib_read_config, generic_pcie_read_config), DEVMETHOD(pcib_write_config, generic_pcie_write_config), DEVMETHOD_END }; DEFINE_CLASS_0(pcib, generic_pcie_core_driver, generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); Index: stable/12/sys/dev/pci/pci_host_generic_acpi.c =================================================================== --- stable/12/sys/dev/pci/pci_host_generic_acpi.c (revision 362239) +++ stable/12/sys/dev/pci/pci_host_generic_acpi.c (revision 362240) @@ -1,486 +1,480 @@ /*- * Copyright (C) 2018 Cavium Inc. * Copyright (c) 2015 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Generic ECAM PCIe driver */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "acpi_bus_if.h" /* Assembling ECAM Configuration Address */ #define PCIE_BUS_SHIFT 20 #define PCIE_SLOT_SHIFT 15 #define PCIE_FUNC_SHIFT 12 #define PCIE_BUS_MASK 0xFF #define PCIE_SLOT_MASK 0x1F #define PCIE_FUNC_MASK 0x07 #define PCIE_REG_MASK 0xFFF #define PCIE_ADDR_OFFSET(bus, slot, func, reg) \ ((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT) | \ (((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT) | \ (((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT) | \ ((reg) & PCIE_REG_MASK)) #define PCI_IO_WINDOW_OFFSET 0x1000 #define SPACE_CODE_SHIFT 24 #define SPACE_CODE_MASK 0x3 #define SPACE_CODE_IO_SPACE 0x1 #define PROPS_CELL_SIZE 1 #define PCI_ADDR_CELL_SIZE 2 struct generic_pcie_acpi_softc { struct generic_pcie_core_softc base; ACPI_BUFFER ap_prt; /* interrupt routing table */ }; /* Forward prototypes */ static int generic_pcie_acpi_probe(device_t dev); static ACPI_STATUS pci_host_generic_acpi_parse_resource(ACPI_RESOURCE *, void *); static int generic_pcie_acpi_read_ivar(device_t, device_t, int, uintptr_t *); /* * generic_pcie_acpi_probe - look for root bridge flag */ static int generic_pcie_acpi_probe(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; int root; if (acpi_disabled("pcib") || (h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ENXIO); root = (devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0; AcpiOsFree(devinfo); if (!root) return (ENXIO); device_set_desc(dev, "Generic PCI host controller"); return (BUS_PROBE_GENERIC); } /* * pci_host_generic_acpi_parse_resource - parse PCI memory, IO and bus spaces * 'produced' by this bridge */ static ACPI_STATUS pci_host_generic_acpi_parse_resource(ACPI_RESOURCE *res, void *arg) { device_t dev = (device_t)arg; struct generic_pcie_acpi_softc *sc; struct rman *rm; rman_res_t min, max, off; int r; rm = NULL; sc = device_get_softc(dev); r = sc->base.nranges; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: min = res->Data.Address16.Address.Minimum; max = res->Data.Address16.Address.Maximum; break; case ACPI_RESOURCE_TYPE_ADDRESS32: min = res->Data.Address32.Address.Minimum; max = res->Data.Address32.Address.Maximum; off = res->Data.Address32.Address.TranslationOffset; break; case ACPI_RESOURCE_TYPE_ADDRESS64: - if (res->Data.Address.ResourceType != ACPI_MEMORY_RANGE) - break; min = res->Data.Address64.Address.Minimum; max = res->Data.Address64.Address.Maximum; off = res->Data.Address64.Address.TranslationOffset; break; default: return (AE_OK); } /* Save detected ranges */ if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE || res->Data.Address.ResourceType == ACPI_IO_RANGE) { sc->base.ranges[r].pci_base = min; sc->base.ranges[r].phys_base = min + off; sc->base.ranges[r].size = max - min + 1; if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) sc->base.ranges[r].flags |= FLAG_MEM; else if (res->Data.Address.ResourceType == ACPI_IO_RANGE) sc->base.ranges[r].flags |= FLAG_IO; sc->base.nranges++; } else if (res->Data.Address.ResourceType == ACPI_BUS_NUMBER_RANGE) { sc->base.bus_start = min; sc->base.bus_end = max; } return (AE_OK); } static int pci_host_acpi_get_ecam_resource(device_t dev) { struct generic_pcie_acpi_softc *sc; struct acpi_device *ad; struct resource_list *rl; ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *mcfg_entry, *mcfg_end; ACPI_HANDLE handle; ACPI_STATUS status; rman_res_t base, start, end; int found, val; sc = device_get_softc(dev); handle = acpi_get_handle(dev); /* Try MCFG first */ status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_SUCCESS(status)) { found = FALSE; mcfg_end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); mcfg_entry = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (mcfg_entry < mcfg_end && !found) { if (mcfg_entry->PciSegment == sc->base.ecam && mcfg_entry->StartBusNumber <= sc->base.bus_start && mcfg_entry->EndBusNumber >= sc->base.bus_start) found = TRUE; else mcfg_entry++; } if (found) { - if (mcfg_entry->EndBusNumber < sc->base.bus_end) { - device_printf(dev, "bus end mismatch! expected %d found %d.\n", - sc->base.bus_end, (int)mcfg_entry->EndBusNumber); - sc->base.bus_end = mcfg_entry->EndBusNumber; - } + sc->base.bus_end = mcfg_entry->EndBusNumber; base = mcfg_entry->Address; } else { device_printf(dev, "MCFG exists, but does not have bus %d-%d\n", sc->base.bus_start, sc->base.bus_end); return (ENXIO); } } else { status = acpi_GetInteger(handle, "_CBA", &val); - if (ACPI_SUCCESS(status)) + if (ACPI_SUCCESS(status)) { base = val; - else + sc->base.bus_end = 255; + } else return (ENXIO); } /* add as MEM rid 0 */ ad = device_get_ivars(dev); rl = &ad->ad_rl; start = base + (sc->base.bus_start << PCIE_BUS_SHIFT); end = base + ((sc->base.bus_end + 1) << PCIE_BUS_SHIFT) - 1; resource_list_add(rl, SYS_RES_MEMORY, 0, start, end, end - start + 1); if (bootverbose) device_printf(dev, "ECAM for bus %d-%d at mem %jx-%jx\n", sc->base.bus_start, sc->base.bus_end, start, end); return (0); } static int pci_host_generic_acpi_attach(device_t dev) { struct generic_pcie_acpi_softc *sc; ACPI_HANDLE handle; uint64_t phys_base; uint64_t pci_base; uint64_t size; ACPI_STATUS status; int error; int tuple; sc = device_get_softc(dev); handle = acpi_get_handle(dev); /* Get Start bus number for the PCI host bus is from _BBN method */ status = acpi_GetInteger(handle, "_BBN", &sc->base.bus_start); if (ACPI_FAILURE(status)) { device_printf(dev, "No _BBN, using start bus 0\n"); sc->base.bus_start = 0; } - sc->base.bus_end = 255; /* Get PCI Segment (domain) needed for MCFG lookup */ status = acpi_GetInteger(handle, "_SEG", &sc->base.ecam); if (ACPI_FAILURE(status)) { device_printf(dev, "No _SEG for PCI Bus, using segment 0\n"); sc->base.ecam = 0; } /* Bus decode ranges */ status = AcpiWalkResources(handle, "_CRS", pci_host_generic_acpi_parse_resource, (void *)dev); if (ACPI_FAILURE(status)) return (ENXIO); /* Coherency attribute */ if (ACPI_FAILURE(acpi_GetInteger(handle, "_CCA", &sc->base.coherent))) sc->base.coherent = 0; if (bootverbose) device_printf(dev, "Bus is%s cache-coherent\n", sc->base.coherent ? "" : " not"); /* add config space resource */ pci_host_acpi_get_ecam_resource(dev); acpi_pcib_fetch_prt(dev, &sc->ap_prt); error = pci_host_generic_core_attach(dev); if (error != 0) return (error); for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { phys_base = sc->base.ranges[tuple].phys_base; pci_base = sc->base.ranges[tuple].pci_base; size = sc->base.ranges[tuple].size; if (phys_base == 0 || size == 0) continue; /* empty range element */ if (sc->base.ranges[tuple].flags & FLAG_MEM) { error = rman_manage_region(&sc->base.mem_rman, - phys_base, phys_base + size - 1); + pci_base, pci_base + size - 1); } else if (sc->base.ranges[tuple].flags & FLAG_IO) { error = rman_manage_region(&sc->base.io_rman, pci_base + PCI_IO_WINDOW_OFFSET, pci_base + PCI_IO_WINDOW_OFFSET + size - 1); } else continue; if (error) { device_printf(dev, "rman_manage_region() failed." "error = %d\n", error); rman_fini(&sc->base.mem_rman); return (error); } } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int generic_pcie_acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct generic_pcie_acpi_softc *sc; sc = device_get_softc(dev); if (index == PCIB_IVAR_BUS) { *result = sc->base.bus_start; return (0); } if (index == PCIB_IVAR_DOMAIN) { *result = sc->base.ecam; return (0); } if (bootverbose) device_printf(dev, "ERROR: Unknown index %d.\n", index); return (ENOENT); } static int generic_pcie_acpi_route_interrupt(device_t bus, device_t dev, int pin) { struct generic_pcie_acpi_softc *sc; sc = device_get_softc(bus); return (acpi_pcib_route_interrupt(bus, dev, pin, &sc->ap_prt)); } static u_int generic_pcie_get_xref(device_t pci, device_t child) { struct generic_pcie_acpi_softc *sc; uintptr_t rid; u_int xref, devid; int err; sc = device_get_softc(pci); err = pcib_get_id(pci, child, PCI_ID_RID, &rid); if (err != 0) return (ACPI_MSI_XREF); err = acpi_iort_map_pci_msi(sc->base.ecam, rid, &xref, &devid); if (err != 0) return (ACPI_MSI_XREF); return (xref); } static u_int generic_pcie_map_id(device_t pci, device_t child, uintptr_t *id) { struct generic_pcie_acpi_softc *sc; uintptr_t rid; u_int xref, devid; int err; sc = device_get_softc(pci); err = pcib_get_id(pci, child, PCI_ID_RID, &rid); if (err != 0) return (err); err = acpi_iort_map_pci_msi(sc->base.ecam, rid, &xref, &devid); if (err == 0) *id = devid; else *id = rid; /* RID not in IORT, likely FW bug, ignore */ return (0); } static int generic_pcie_acpi_alloc_msi(device_t pci, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (intr_alloc_msi(pci, child, generic_pcie_get_xref(pci, child), count, maxcount, irqs)); #else return (ENXIO); #endif } static int generic_pcie_acpi_release_msi(device_t pci, device_t child, int count, int *irqs) { #if defined(INTRNG) return (intr_release_msi(pci, child, generic_pcie_get_xref(pci, child), count, irqs)); #else return (ENXIO); #endif } static int generic_pcie_acpi_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (intr_map_msi(pci, child, generic_pcie_get_xref(pci, child), irq, addr, data)); #else return (ENXIO); #endif } static int generic_pcie_acpi_alloc_msix(device_t pci, device_t child, int *irq) { #if defined(INTRNG) return (intr_alloc_msix(pci, child, generic_pcie_get_xref(pci, child), irq)); #else return (ENXIO); #endif } static int generic_pcie_acpi_release_msix(device_t pci, device_t child, int irq) { #if defined(INTRNG) return (intr_release_msix(pci, child, generic_pcie_get_xref(pci, child), irq)); #else return (ENXIO); #endif } static int generic_pcie_acpi_get_id(device_t pci, device_t child, enum pci_id_type type, uintptr_t *id) { if (type == PCI_ID_MSI) return (generic_pcie_map_id(pci, child, id)); else return (pcib_get_id(pci, child, type, id)); } static device_method_t generic_pcie_acpi_methods[] = { DEVMETHOD(device_probe, generic_pcie_acpi_probe), DEVMETHOD(device_attach, pci_host_generic_acpi_attach), DEVMETHOD(bus_read_ivar, generic_pcie_acpi_read_ivar), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, generic_pcie_acpi_route_interrupt), DEVMETHOD(pcib_alloc_msi, generic_pcie_acpi_alloc_msi), DEVMETHOD(pcib_release_msi, generic_pcie_acpi_release_msi), DEVMETHOD(pcib_alloc_msix, generic_pcie_acpi_alloc_msix), DEVMETHOD(pcib_release_msix, generic_pcie_acpi_release_msix), DEVMETHOD(pcib_map_msi, generic_pcie_acpi_map_msi), DEVMETHOD(pcib_get_id, generic_pcie_acpi_get_id), DEVMETHOD_END }; DEFINE_CLASS_1(pcib, generic_pcie_acpi_driver, generic_pcie_acpi_methods, sizeof(struct generic_pcie_acpi_softc), generic_pcie_core_driver); static devclass_t generic_pcie_acpi_devclass; DRIVER_MODULE(pcib, acpi, generic_pcie_acpi_driver, generic_pcie_acpi_devclass, 0, 0); Index: stable/12 =================================================================== --- stable/12 (revision 362239) +++ stable/12 (revision 362240) Property changes on: stable/12 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r347440,347929-347930,349588