Index: head/sys/arm/versatile/versatile_pci.c =================================================================== --- head/sys/arm/versatile/versatile_pci.c (revision 287881) +++ head/sys/arm/versatile/versatile_pci.c (revision 287882) @@ -1,521 +1,521 @@ /* * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #include #include #include #define MEM_SYS 0 #define MEM_CORE 1 #define MEM_BASE 2 #define MEM_CONF_BASE 3 #define MEM_REGIONS 4 #define SYS_PCICTL 0x00 #define PCI_CORE_IMAP0 0x00 #define PCI_CORE_IMAP1 0x04 #define PCI_CORE_IMAP2 0x08 #define PCI_CORE_SELFID 0x0C #define PCI_CORE_SMAP0 0x10 #define PCI_CORE_SMAP1 0x14 #define PCI_CORE_SMAP2 0x18 #define VERSATILE_PCI_DEV 0x030010ee #define VERSATILE_PCI_CLASS 0x0b400000 #define PCI_IO_WINDOW 0x44000000 #define PCI_IO_SIZE 0x0c000000 #define PCI_NPREFETCH_WINDOW 0x50000000 #define PCI_NPREFETCH_SIZE 0x10000000 #define PCI_PREFETCH_WINDOW 0x60000000 #define PCI_PREFETCH_SIZE 0x10000000 #define VERSATILE_PCI_IRQ_START 27 #define VERSATILE_PCI_IRQ_END 30 #ifdef DEBUG #define dprintf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define dprintf(fmt, args...) #endif #define versatile_pci_sys_read_4(reg) \ bus_read_4(sc->mem_res[MEM_SYS], (reg)) #define versatile_pci_sys_write_4(reg, val) \ bus_write_4(sc->mem_res[MEM_SYS], (reg), (val)) #define versatile_pci_core_read_4(reg) \ bus_read_4(sc->mem_res[MEM_CORE], (reg)) #define versatile_pci_core_write_4(reg, val) \ bus_write_4(sc->mem_res[MEM_CORE], (reg), (val)) #define versatile_pci_read_4(reg) \ bus_read_4(sc->mem_res[MEM_BASE], (reg)) #define versatile_pci_write_4(reg, val) \ bus_write_4(sc->mem_res[MEM_BASE], (reg), (val)) #define versatile_pci_conf_read_4(reg) \ bus_read_4(sc->mem_res[MEM_CONF_BASE], (reg)) #define versatile_pci_conf_write_4(reg, val) \ bus_write_4(sc->mem_res[MEM_CONF_BASE], (reg), (val)) #define versatile_pci_conf_write_2(reg, val) \ bus_write_2(sc->mem_res[MEM_CONF_BASE], (reg), (val)) #define versatile_pci_conf_write_1(reg, val) \ bus_write_1(sc->mem_res[MEM_CONF_BASE], (reg), (val)) struct versatile_pci_softc { struct resource* mem_res[MEM_REGIONS]; struct resource* irq_res; void* intr_hl; int pcib_slot; /* Bus part */ int busno; struct rman io_rman; struct rman irq_rman; struct rman mem_rman; struct mtx mtx; }; static struct resource_spec versatile_pci_mem_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE }, { SYS_RES_MEMORY, 3, RF_ACTIVE }, { -1, 0, 0 } }; static int versatile_pci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "versatile,pci")) { device_set_desc(dev, "Versatile PCI controller"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int versatile_pci_attach(device_t dev) { struct versatile_pci_softc *sc = device_get_softc(dev); int err; int slot; uint32_t vendordev_id, class_id; uint32_t val; /* Request memory resources */ err = bus_alloc_resources(dev, versatile_pci_mem_spec, sc->mem_res); if (err) { device_printf(dev, "Error: could not allocate memory resources\n"); return (ENXIO); } /* * Setup memory windows */ versatile_pci_core_write_4(PCI_CORE_IMAP0, (PCI_IO_WINDOW >> 28)); versatile_pci_core_write_4(PCI_CORE_IMAP1, (PCI_NPREFETCH_WINDOW >> 28)); versatile_pci_core_write_4(PCI_CORE_IMAP2, (PCI_PREFETCH_WINDOW >> 28)); /* * XXX: this is SDRAM offset >> 28 * Unused as of QEMU 1.5 */ versatile_pci_core_write_4(PCI_CORE_SMAP0, (PCI_IO_WINDOW >> 28)); versatile_pci_core_write_4(PCI_CORE_SMAP1, (PCI_NPREFETCH_WINDOW >> 28)); versatile_pci_core_write_4(PCI_CORE_SMAP2, (PCI_NPREFETCH_WINDOW >> 28)); versatile_pci_sys_write_4(SYS_PCICTL, 1); for (slot = 0; slot <= PCI_SLOTMAX; slot++) { vendordev_id = versatile_pci_read_4((slot << 11) + PCIR_DEVVENDOR); class_id = versatile_pci_read_4((slot << 11) + PCIR_REVID); if ((vendordev_id == VERSATILE_PCI_DEV) && (class_id == VERSATILE_PCI_CLASS)) break; } if (slot == (PCI_SLOTMAX + 1)) { bus_release_resources(dev, versatile_pci_mem_spec, sc->mem_res); device_printf(dev, "Versatile PCI core not found\n"); return (ENXIO); } sc->pcib_slot = slot; device_printf(dev, "PCI core at slot #%d\n", slot); versatile_pci_core_write_4(PCI_CORE_SELFID, slot); val = versatile_pci_conf_read_4((slot << 11) + PCIR_COMMAND); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_MWRICEN); versatile_pci_conf_write_4((slot << 11) + PCIR_COMMAND, val); /* Again SDRAM start >> 28 */ versatile_pci_write_4((slot << 11) + PCIR_BAR(0), 0); versatile_pci_write_4((slot << 11) + PCIR_BAR(1), 0); versatile_pci_write_4((slot << 11) + PCIR_BAR(2), 0); /* Prepare resource managers */ sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "versatile PCI memory window"; if (rman_init(&sc->mem_rman) != 0 || rman_manage_region(&sc->mem_rman, PCI_NPREFETCH_WINDOW, PCI_NPREFETCH_WINDOW + PCI_NPREFETCH_SIZE - 1) != 0) { panic("versatile_pci_attach: failed to set up memory rman"); } bootverbose = 1; sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "versatile PCI IO window"; if (rman_init(&sc->io_rman) != 0 || rman_manage_region(&sc->io_rman, PCI_IO_WINDOW, PCI_IO_WINDOW + PCI_IO_SIZE - 1) != 0) { panic("versatile_pci_attach: failed to set up I/O rman"); } sc->irq_rman.rm_type = RMAN_ARRAY; sc->irq_rman.rm_descr = "versatile PCI IRQs"; if (rman_init(&sc->irq_rman) != 0 || rman_manage_region(&sc->irq_rman, VERSATILE_PCI_IRQ_START, VERSATILE_PCI_IRQ_END) != 0) { panic("versatile_pci_attach: failed to set up IRQ rman"); } mtx_init(&sc->mtx, device_get_nameunit(dev), "versatilepci", MTX_SPIN); val = versatile_pci_conf_read_4((12 << 11) + PCIR_COMMAND); for (slot = 0; slot <= PCI_SLOTMAX; slot++) { vendordev_id = versatile_pci_read_4((slot << 11) + PCIR_DEVVENDOR); class_id = versatile_pci_read_4((slot << 11) + PCIR_REVID); if (slot == sc->pcib_slot) continue; if ((vendordev_id == 0xffffffff) && (class_id == 0xffffffff)) continue; val = versatile_pci_conf_read_4((slot << 11) + PCIR_COMMAND); val |= PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; versatile_pci_conf_write_4((slot << 11) + PCIR_COMMAND, val); } - device_add_child(dev, "pci", 0); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int versatile_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct versatile_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->busno; return (0); } return (ENOENT); } static int versatile_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct versatile_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->busno = result; return (0); } return (ENOENT); } static struct resource * versatile_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct versatile_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; dprintf("Alloc resources %d, %08lx..%08lx, %ld\n", type, start, end, count); switch (type) { case SYS_RES_IOPORT: rm = &sc->io_rman; break; case SYS_RES_IRQ: rm = &sc->irq_rman; break; case SYS_RES_MEMORY: rm = &sc->mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int versatile_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { vm_offset_t vaddr; int res; switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: vaddr = (vm_offset_t)pmap_mapdev(rman_get_start(r), rman_get_size(r)); rman_set_bushandle(r, vaddr); rman_set_bustag(r, arm_base_bs_tag); res = rman_activate_resource(r); break; case SYS_RES_IRQ: res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); break; default: res = ENXIO; break; } return (res); } static int versatile_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { return BUS_SETUP_INTR(device_get_parent(bus), bus, ires, flags, filt, handler, arg, cookiep); } static int versatile_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { return BUS_TEARDOWN_INTR(device_get_parent(dev), dev, ires, cookie); } static int versatile_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int versatile_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (27 + ((pci_get_slot(device) + pin - 1) & 3)); } static uint32_t versatile_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct versatile_pci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t shift, mask; uint32_t addr; if (sc->pcib_slot == slot) { switch (bytes) { case 4: return (0xffffffff); break; case 2: return (0xffff); break; case 1: return (0xff); break; } } addr = (bus << 16) | (slot << 11) | (func << 8) | (reg & ~3); /* register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); mtx_lock_spin(&sc->mtx); data = versatile_pci_conf_read_4(addr); mtx_unlock_spin(&sc->mtx); /* get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void versatile_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct versatile_pci_softc *sc = device_get_softc(dev); uint32_t addr; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if (sc->pcib_slot == slot) return; addr = (bus << 16) | (slot << 11) | (func << 8) | reg; mtx_lock_spin(&sc->mtx); switch (bytes) { case 4: versatile_pci_conf_write_4(addr, data); break; case 2: versatile_pci_conf_write_2(addr, data); break; case 1: versatile_pci_conf_write_1(addr, data); break; } mtx_unlock_spin(&sc->mtx); } static device_method_t versatile_pci_methods[] = { DEVMETHOD(device_probe, versatile_pci_probe), DEVMETHOD(device_attach, versatile_pci_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, versatile_pci_read_ivar), DEVMETHOD(bus_write_ivar, versatile_pci_write_ivar), DEVMETHOD(bus_alloc_resource, versatile_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, versatile_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, versatile_pci_setup_intr), DEVMETHOD(bus_teardown_intr, versatile_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, versatile_pci_maxslots), DEVMETHOD(pcib_read_config, versatile_pci_read_config), DEVMETHOD(pcib_write_config, versatile_pci_write_config), DEVMETHOD(pcib_route_interrupt, versatile_pci_route_interrupt), DEVMETHOD_END }; static driver_t versatile_pci_driver = { "pcib", versatile_pci_methods, sizeof(struct versatile_pci_softc), }; static devclass_t versatile_pci_devclass; DRIVER_MODULE(versatile_pci, simplebus, versatile_pci_driver, versatile_pci_devclass, 0, 0); Index: head/sys/arm/xscale/i80321/i80321_pci.c =================================================================== --- head/sys/arm/xscale/i80321/i80321_pci.c (revision 287881) +++ head/sys/arm/xscale/i80321/i80321_pci.c (revision 287882) @@ -1,401 +1,401 @@ /* $NetBSD: i80321_pci.c,v 1.4 2003/07/15 00:24:54 lukem Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PCI configuration support for i80321 I/O Processor chip. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include extern struct i80321_softc *i80321_softc; static int i80321_pci_probe(device_t dev) { device_set_desc(dev, "i80321 PCI bus"); return (0); } static int i80321_pci_attach(device_t dev) { uint32_t busno; struct i80321_pci_softc *sc = device_get_softc(dev); sc->sc_st = i80321_softc->sc_st; sc->sc_atu_sh = i80321_softc->sc_atu_sh; busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); if (busno == 0xff) busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_pciio = &i80321_softc->sc_pci_iot; sc->sc_pcimem = &i80321_softc->sc_pci_memt; sc->sc_mem = i80321_softc->sc_owin[0].owin_xlate_lo + VERDE_OUT_XLATE_MEM_WIN_SIZE; sc->sc_io = i80321_softc->sc_iow_vaddr; /* Initialize memory and i/o rmans. */ sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "I80321 PCI I/O Ports"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, sc->sc_io, sc->sc_io + VERDE_OUT_XLATE_IO_WIN_SIZE) != 0) { panic("i80321_pci_probe: failed to set up I/O rman"); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "I80321 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, VERDE_OUT_XLATE_MEM_WIN_SIZE) != 0) { panic("i80321_pci_probe: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "i80321 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 26, 32) != 0) panic("i80321_pci_probe: failed to set up IRQ rman"); - device_add_child(dev, "pci",busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int i80321_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int i80321_pci_conf_setup(struct i80321_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { uint32_t busno; busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); if (busno == 0xff) busno = 0; /* * If the bus # is the same as our own, then use Type 0 cycles, * else use Type 1. * * XXX We should filter out all non-private devices here! * XXX How does private space interact with PCI-PCI bridges? */ if (bus == busno) { if (slot > (31 - 16)) return (1); /* * NOTE: PCI-X requires that that devices updated their * PCIXSR on every config write with the device number * specified in AD[15:11]. If we don't set this field, * each device could end of thinking it is at device 0, * which can cause a number of problems. Doing this * unconditionally should be OK when only PCI devices * are present. */ bus &= 0xff; slot &= 0x1f; func &= 0x07; *addr = (1U << (slot + 16)) | (slot << 11) | (func << 8) | reg; } else { *addr = (bus << 16) | (slot << 11) | (func << 8) | reg | 1; } return (0); } static u_int32_t i80321_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct i80321_pci_softc *sc = device_get_softc(dev); uint32_t isr; uint32_t addr; u_int32_t ret = 0; vm_offset_t va; int err = 0; if (i80321_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return (-1); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OCCAR, addr); va = sc->sc_atu_sh; switch (bytes) { case 1: err = badaddr_read((void*)(va + ATU_OCCDR + (reg & 3)), 1, &ret); break; case 2: err = badaddr_read((void*)(va + ATU_OCCDR + (reg & 3)), 2, &ret); break; case 4: err = badaddr_read((void *)(va + ATU_OCCDR), 4, &ret); break; default: printf("i80321_read_config: invalid size %d\n", bytes); ret = -1; } if (err) { isr = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ATUISR); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ATUISR, isr & (ATUISR_P_SERR_DET|ATUISR_PMA|ATUISR_PTAM| ATUISR_PTAT|ATUISR_PMPE)); return (-1); } return (ret); } static void i80321_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, u_int32_t data, int bytes) { struct i80321_pci_softc *sc = device_get_softc(dev); uint32_t addr; if (i80321_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return; bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OCCAR, addr); switch (bytes) { case 1: bus_space_write_1(sc->sc_st, sc->sc_atu_sh, ATU_OCCDR + (reg & 3), data); break; case 2: bus_space_write_2(sc->sc_st, sc->sc_atu_sh, ATU_OCCDR + (reg & 3), data); break; case 4: bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OCCDR, data); break; default: printf("i80321_pci_write_config: Invalid size : %d\n", bytes); } } static int i80321_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct i80321_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int i80321_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct i80321_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * i80321_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct i80321_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; bus_space_tag_t bt = NULL; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bt = sc->sc_pcimem; bh = (start >= 0x80000000 && start < 0x84000000) ? 0x80000000 : sc->sc_mem; start &= (0x1000000 - 1); end &= (0x1000000 - 1); break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bt = sc->sc_pciio; bh = sc->sc_io; if (start < sc->sc_io) { start = start - 0x90000000 + sc->sc_io; end = end - 0x90000000 + sc->sc_io; } break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { if (type == SYS_RES_MEMORY) bh += (rman_get_start(rv)); rman_set_bustag(rv, bt); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); } static int i80321_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { u_long p; int error; if (type == SYS_RES_MEMORY) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int i80321_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { return (BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, filt, intr, arg, cookiep)); } static int i80321_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, res, cookie)); } static device_method_t i80321_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, i80321_pci_probe), DEVMETHOD(device_attach, i80321_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, i80321_read_ivar), DEVMETHOD(bus_write_ivar, i80321_write_ivar), DEVMETHOD(bus_alloc_resource, i80321_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, i80321_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, i80321_pci_setup_intr), DEVMETHOD(bus_teardown_intr, i80321_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, i80321_pci_maxslots), DEVMETHOD(pcib_read_config, i80321_pci_read_config), DEVMETHOD(pcib_write_config, i80321_pci_write_config), DEVMETHOD(pcib_route_interrupt, machdep_pci_route_interrupt), DEVMETHOD_END }; static driver_t i80321_pci_driver = { "pcib", i80321_pci_methods, sizeof(struct i80321_pci_softc), }; static devclass_t i80321_pci_devclass; DRIVER_MODULE(ipci, iq, i80321_pci_driver, i80321_pci_devclass, 0, 0); Index: head/sys/arm/xscale/i8134x/i81342_pci.c =================================================================== --- head/sys/arm/xscale/i8134x/i81342_pci.c (revision 287881) +++ head/sys/arm/xscale/i8134x/i81342_pci.c (revision 287882) @@ -1,544 +1,544 @@ /*- * Copyright (c) 2006 Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include static pcib_read_config_t i81342_pci_read_config; static pcib_write_config_t i81342_pci_write_config; static int i81342_pci_probe(device_t dev) { struct i81342_pci_softc *sc; sc = device_get_softc(dev); if (device_get_unit(dev) == 0) { device_set_desc(dev, "i81342 PCI-X bus"); sc->sc_is_atux = 1; } else { device_set_desc(dev, "i81342 PCIe bus"); sc->sc_is_atux = 0; } return (0); } #define PCI_MAPREG_MEM_PREFETCHABLE_MASK 0x00000008 #define PCI_MAPREG_MEM_TYPE_64BIT 0x00000004 static int i81342_pci_attach(device_t dev) { struct i81342_softc *parent_sc; struct i81342_pci_softc *sc; uint32_t memsize, memstart; uint32_t reg; int func; uint32_t busno; sc = device_get_softc(dev); parent_sc = device_get_softc(device_get_parent(dev)); sc->sc_atu_sh = sc->sc_is_atux ? parent_sc->sc_atux_sh : parent_sc->sc_atue_sh; sc->sc_st = parent_sc->sc_st; if (bus_space_read_4(sc->sc_st, parent_sc->sc_sh, IOP34X_ESSTSR0) & IOP34X_INT_SEL_PCIX) { if (sc->sc_is_atux) func = 5; else func = 0; } else { if (sc->sc_is_atux) func = 0; else func = 5; } i81342_io_bs_init(&sc->sc_pciio, sc); i81342_mem_bs_init(&sc->sc_pcimem, sc); i81342_sdram_bounds(sc->sc_st, IOP34X_VADDR, &memstart, &memsize); if (sc->sc_is_atux) { reg = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); if (reg & ATUX_P_RSTOUT) { bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR, reg &~ ATUX_P_RSTOUT); DELAY(200); } } /* Setup the Inbound windows. */ bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR0, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR0, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR0, 0); /* Set the mapping Physical address <=> PCI address */ bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR1, memstart | PCI_MAPREG_MEM_PREFETCHABLE_MASK | PCI_MAPREG_MEM_TYPE_64BIT); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR1, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR1, ~(memsize - 1) &~(0xfff)); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IATVR1, memstart); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUTVR1, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IABAR2, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR2, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR2, 0); /* Setup the Outbound IO Bar */ if (sc->sc_is_atux) bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OIOBAR, (IOP34X_PCIX_OIOBAR >> 4) | func); else bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OIOBAR, (IOP34X_PCIE_OIOBAR >> 4) | func); /* Setup the Outbound windows */ bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR0, 0); if (sc->sc_is_atux) bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR1, (IOP34X_PCIX_OMBAR >> 32) | (func << ATU_OUMBAR_FUNC) | ATU_OUMBAR_EN); else bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR1, (IOP34X_PCIE_OMBAR >> 32) | (func << ATU_OUMBAR_FUNC) | ATU_OUMBAR_EN); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMWTVR1, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR2, 0); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_OUMBAR3, 0); /* Enable the outbound windows. */ reg = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_CR); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_CR, reg | ATU_CR_OUT_EN); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR) & ATUX_ISR_ERRMSK); /* * Enable bus mastering, memory access, SERR, and parity * checking on the ATU. */ if (sc->sc_is_atux) { busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); } else { busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); busno = PCIE_BUSNO(busno); } reg = bus_space_read_2(sc->sc_st, sc->sc_atu_sh, ATU_CMD); reg |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN; bus_space_write_2(sc->sc_st, sc->sc_atu_sh, ATU_CMD, reg); sc->sc_busno = busno; /* Initialize memory and i/o rmans. */ sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "I81342 PCI I/O Ports"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : IOP34X_PCIE_OIOBAR_VADDR, (sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : IOP34X_PCIE_OIOBAR_VADDR) + IOP34X_OIOBAR_SIZE) != 0) { panic("i81342_pci_probe: failed to set up I/O rman"); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "I81342 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, 0xffffffff) != 0) { panic("i81342_pci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "i81342 PCI IRQs"; if (sc->sc_is_atux) { if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, ICU_INT_XINT0, ICU_INT_XINT3) != 0) panic("i83142_pci_attach: failed to set up IRQ rman"); } else { if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, ICU_INT_ATUE_MA, ICU_INT_ATUE_MD) != 0) panic("i81342_pci_attach: failed to set up IRQ rman"); } bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR) & ATUX_ISR_ERRMSK); - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int i81342_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static void i81342_pci_conf_setup(struct i81342_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { uint32_t busno; if (sc->sc_is_atux) { busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); } else { busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCSR); busno = PCIE_BUSNO(busno); } bus &= 0xff; slot &= 0x1f; func &= 0x7; if (sc->sc_is_atux) { if (busno == bus) *addr = (1 << (slot + 16)) | (slot << 11) | (func << 8) | reg; else *addr = (bus << 16) | (slot << 11) | (func << 11) | reg | 1; } else { *addr = (bus << 24) | (slot << 19) | (func << 16) | reg; if (bus != busno) *addr |= 1; } } static u_int32_t i81342_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct i81342_pci_softc *sc = device_get_softc(dev); uint32_t addr; uint32_t ret = 0; uint32_t isr; int err = 0; vm_offset_t va; i81342_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, sc->sc_is_atux ? ATUX_OCCAR : ATUE_OCCAR, addr); if (sc->sc_is_atux) va = sc->sc_atu_sh + ATUX_OCCDR; else va = sc->sc_atu_sh + ATUE_OCCDR; switch (bytes) { case 1: err = badaddr_read((void*)(va + (reg & 3)), 1, &ret); break; case 2: err = badaddr_read((void*)(va + (reg & 3)), 2, &ret); break; case 4: err = badaddr_read((void *)(va) , 4, &ret); break; default: printf("i81342_read_config: invalid size %d\n", bytes); ret = -1; } if (err) { isr = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR); if (sc->sc_is_atux) isr &= ATUX_ISR_ERRMSK; else isr &= ATUE_ISR_ERRMSK; bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_ISR, isr); ret = -1; } return (ret); } static void i81342_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, u_int32_t data, int bytes) { struct i81342_pci_softc *sc = device_get_softc(dev); uint32_t addr; vm_offset_t va; i81342_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr); bus_space_write_4(sc->sc_st, sc->sc_atu_sh, sc->sc_is_atux ? ATUX_OCCAR : ATUE_OCCAR, addr); va = sc->sc_is_atux ? ATUX_OCCDR : ATUE_OCCDR; switch (bytes) { case 1: bus_space_write_1(sc->sc_st, sc->sc_atu_sh, va + (reg & 3) , data); break; case 2: bus_space_write_2(sc->sc_st, sc->sc_atu_sh, va + (reg & 3) , data); break; case 4: bus_space_write_4(sc->sc_st, sc->sc_atu_sh, va, data); break; default: printf("i81342_pci_write_config: Invalid size : %d\n", bytes); } } static struct resource * i81342_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct i81342_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; bus_space_tag_t bt = NULL; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bt = &sc->sc_pcimem; bh = 0; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bt = &sc->sc_pciio; bh = sc->sc_is_atux ? IOP34X_PCIX_OIOBAR_VADDR : IOP34X_PCIE_OIOBAR_VADDR; start += bh; end += bh; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { if (type == SYS_RES_MEMORY) bh += (rman_get_start(rv)); rman_set_bustag(rv, bt); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); return (NULL); } static int i81342_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { u_long p; int error; if (type == SYS_RES_MEMORY) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int i81342_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { return (BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, filt, intr, arg, cookiep)); } static int i81342_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, res, cookie)); } static int i81342_pci_route_interrupt(device_t pcib, device_t dev, int pin) { struct i81342_pci_softc *sc; int device; device = pci_get_slot(dev); sc = device_get_softc(pcib); /* XXX: Is board specific */ if (sc->sc_is_atux) { /* PCI-X */ switch(device) { case 1: switch (pin) { case 1: return (ICU_INT_XINT1); case 2: return (ICU_INT_XINT2); case 3: return (ICU_INT_XINT3); case 4: return (ICU_INT_XINT0); default: break; } case 2: switch (pin) { case 1: return (ICU_INT_XINT2); case 2: return (ICU_INT_XINT3); case 3: return (ICU_INT_XINT2); case 4: return (ICU_INT_XINT3); default: break; } } } else { switch (pin) { case 1: return (ICU_INT_ATUE_MA); case 2: return (ICU_INT_ATUE_MB); case 3: return (ICU_INT_ATUE_MC); case 4: return (ICU_INT_ATUE_MD); default: break; } } printf("Warning: couldn't map %s IRQ for device %d pin %d\n", sc->sc_is_atux ? "PCI-X" : "PCIe", device, pin); return (-1); } static int i81342_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct i81342_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int i81342_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct i81342_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static device_method_t i81342_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, i81342_pci_probe), DEVMETHOD(device_attach, i81342_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, i81342_read_ivar), DEVMETHOD(bus_write_ivar, i81342_write_ivar), DEVMETHOD(bus_alloc_resource, i81342_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, i81342_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, i81342_pci_setup_intr), DEVMETHOD(bus_teardown_intr, i81342_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, i81342_pci_maxslots), DEVMETHOD(pcib_read_config, i81342_pci_read_config), DEVMETHOD(pcib_write_config, i81342_pci_write_config), DEVMETHOD(pcib_route_interrupt, i81342_pci_route_interrupt), DEVMETHOD_END }; static driver_t i81342_pci_driver = { "pcib", i81342_pci_methods, sizeof(struct i81342_pci_softc), }; static devclass_t i81342_pci_devclass; DRIVER_MODULE(ipci, iq, i81342_pci_driver, i81342_pci_devclass, 0, 0); Index: head/sys/dev/acpica/acpi_pcib.c =================================================================== --- head/sys/dev/acpica/acpi_pcib.c (revision 287881) +++ head/sys/dev/acpica/acpi_pcib.c (revision 287882) @@ -1,279 +1,279 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI") ACPI_SERIAL_DECL(pcib, "ACPI PCI bus methods"); /* * For locking, we assume the caller is not concurrent since this is * triggered by newbus methods. */ struct prt_lookup_request { ACPI_PCI_ROUTING_TABLE *pr_entry; u_int pr_pin; u_int pr_slot; }; typedef void prt_entry_handler(ACPI_PCI_ROUTING_TABLE *entry, void *arg); static void prt_attach_devices(ACPI_PCI_ROUTING_TABLE *entry, void *arg); static void prt_lookup_device(ACPI_PCI_ROUTING_TABLE *entry, void *arg); static void prt_walk_table(ACPI_BUFFER *prt, prt_entry_handler *handler, void *arg); static void prt_walk_table(ACPI_BUFFER *prt, prt_entry_handler *handler, void *arg) { ACPI_PCI_ROUTING_TABLE *entry; char *prtptr; /* First check to see if there is a table to walk. */ if (prt == NULL || prt->Pointer == NULL) return; /* Walk the table executing the handler function for each entry. */ prtptr = prt->Pointer; entry = (ACPI_PCI_ROUTING_TABLE *)prtptr; while (entry->Length != 0) { handler(entry, arg); prtptr += entry->Length; entry = (ACPI_PCI_ROUTING_TABLE *)prtptr; } } static void prt_attach_devices(ACPI_PCI_ROUTING_TABLE *entry, void *arg) { ACPI_HANDLE handle; device_t child, pcib; int error; /* We only care about entries that reference a link device. */ if (entry->Source[0] == '\0') return; /* * In practice, we only see SourceIndex's of 0 out in the wild. * When indices != 0 have been found, they've been bugs in the ASL. */ if (entry->SourceIndex != 0) return; /* Lookup the associated handle and device. */ pcib = (device_t)arg; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, entry->Source, &handle))) return; child = acpi_get_device(handle); if (child == NULL) return; /* If the device hasn't been probed yet, force it to do so. */ error = device_probe_and_attach(child); if (error != 0) { device_printf(pcib, "failed to force attach of %s\n", acpi_name(handle)); return; } /* Add a reference for a specific bus/device/pin tuple. */ acpi_pci_link_add_reference(child, entry->SourceIndex, pcib, ACPI_ADR_PCI_SLOT(entry->Address), entry->Pin); } int acpi_pcib_attach(device_t dev, ACPI_BUFFER *prt, int busno) { ACPI_STATUS status; int error; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Get the PCI interrupt routing table for this bus. If we can't * get it, this is not an error but may reduce functionality. There * are several valid bridges in the field that do not have a _PRT, so * only warn about missing tables if bootverbose is set. */ prt->Length = ACPI_ALLOCATE_BUFFER; status = AcpiGetIrqRoutingTable(acpi_get_handle(dev), prt); if (ACPI_FAILURE(status) && (bootverbose || status != AE_NOT_FOUND)) device_printf(dev, "could not get PCI interrupt routing table for %s - %s\n", acpi_name(acpi_get_handle(dev)), AcpiFormatException(status)); /* * Attach the PCI bus proper. */ - if (device_add_child(dev, "pci", busno) == NULL) { + if (device_add_child(dev, "pci", -1) == NULL) { device_printf(device_get_parent(dev), "couldn't attach pci bus\n"); return_VALUE(ENXIO); } /* * Now go scan the bus. */ prt_walk_table(prt, prt_attach_devices, dev); error = bus_generic_attach(dev); return_VALUE(error); } static void prt_lookup_device(ACPI_PCI_ROUTING_TABLE *entry, void *arg) { struct prt_lookup_request *pr; pr = (struct prt_lookup_request *)arg; if (pr->pr_entry != NULL) return; /* * Compare the slot number (high word of Address) and pin number * (note that ACPI uses 0 for INTA) to check for a match. * * Note that the low word of the Address field (function number) * is required by the specification to be 0xffff. We don't risk * checking it here. */ if (ACPI_ADR_PCI_SLOT(entry->Address) == pr->pr_slot && entry->Pin == pr->pr_pin) pr->pr_entry = entry; } /* * Route an interrupt for a child of the bridge. */ int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin, ACPI_BUFFER *prtbuf) { ACPI_PCI_ROUTING_TABLE *prt; struct prt_lookup_request pr; ACPI_HANDLE lnkdev; int interrupt; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); interrupt = PCI_INVALID_IRQ; /* ACPI numbers pins 0-3, not 1-4 like the BIOS. */ pin--; ACPI_SERIAL_BEGIN(pcib); /* Search for a matching entry in the routing table. */ pr.pr_entry = NULL; pr.pr_pin = pin; pr.pr_slot = pci_get_slot(dev); prt_walk_table(prtbuf, prt_lookup_device, &pr); if (pr.pr_entry == NULL) { device_printf(pcib, "no PRT entry for %d.%d.INT%c\n", pci_get_bus(dev), pci_get_slot(dev), 'A' + pin); goto out; } prt = pr.pr_entry; if (bootverbose) { device_printf(pcib, "matched entry for %d.%d.INT%c", pci_get_bus(dev), pci_get_slot(dev), 'A' + pin); if (prt->Source[0] != '\0') printf(" (src %s:%u)", prt->Source, prt->SourceIndex); printf("\n"); } /* * If source is empty/NULL, the source index is a global IRQ number * and it's hard-wired so we're done. * * XXX: If the source index is non-zero, ignore the source device and * assume that this is a hard-wired entry. */ if (prt->Source[0] == '\0' || prt->SourceIndex != 0) { if (bootverbose) device_printf(pcib, "slot %d INT%c hardwired to IRQ %d\n", pci_get_slot(dev), 'A' + pin, prt->SourceIndex); if (prt->SourceIndex) { interrupt = prt->SourceIndex; BUS_CONFIG_INTR(dev, interrupt, INTR_TRIGGER_LEVEL, INTR_POLARITY_LOW); } else device_printf(pcib, "error: invalid hard-wired IRQ of 0\n"); goto out; } /* * We have to find the source device (PCI interrupt link device). */ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, prt->Source, &lnkdev))) { device_printf(pcib, "couldn't find PCI interrupt link device %s\n", prt->Source); goto out; } interrupt = acpi_pci_link_route_interrupt(acpi_get_device(lnkdev), prt->SourceIndex); if (bootverbose && PCI_INTERRUPT_VALID(interrupt)) device_printf(pcib, "slot %d INT%c routed to irq %d via %s\n", pci_get_slot(dev), 'A' + pin, interrupt, acpi_name(lnkdev)); out: ACPI_SERIAL_END(pcib); return_VALUE(interrupt); } int acpi_pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t acpi_dev; acpi_dev = devclass_get_device(devclass_find("acpi"), 0); acpi_device_pwr_for_sleep(acpi_dev, dev, pstate); return (0); } Index: head/sys/dev/pci/pci_pci.c =================================================================== --- head/sys/dev/pci/pci_pci.c (revision 287881) +++ head/sys/dev/pci/pci_pci.c (revision 287882) @@ -1,2119 +1,2119 @@ /*- * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * PCI:PCI bridge support. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" static int pcib_probe(device_t dev); static int pcib_suspend(device_t dev); static int pcib_resume(device_t dev); static int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate); static uint16_t pcib_ari_get_rid(device_t pcib, device_t dev); static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width); static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width); static int pcib_ari_maxslots(device_t dev); static int pcib_ari_maxfuncs(device_t dev); static int pcib_try_enable_ari(device_t pcib, device_t dev); static int pcib_ari_enabled(device_t pcib); static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func); static device_method_t pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcib_probe), DEVMETHOD(device_attach, pcib_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pcib_suspend), DEVMETHOD(device_resume, pcib_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, pcib_read_ivar), DEVMETHOD(bus_write_ivar, pcib_write_ivar), DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), #ifdef NEW_PCIB DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), DEVMETHOD(bus_release_resource, pcib_release_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), #endif DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), DEVMETHOD(pcib_read_config, pcib_read_config), DEVMETHOD(pcib_write_config, pcib_write_config), DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, pcib_map_msi), DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), DEVMETHOD(pcib_get_rid, pcib_ari_get_rid), DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), DEVMETHOD_END }; static devclass_t pcib_devclass; DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL); #ifdef NEW_PCIB SYSCTL_DECL(_hw_pci); static int pci_clear_pcib; SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); /* * Is a resource from a child device sub-allocated from one of our * resource managers? */ static int pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r) { switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (rman_is_region_manager(r, &sc->bus.rman)); #endif case SYS_RES_IOPORT: return (rman_is_region_manager(r, &sc->io.rman)); case SYS_RES_MEMORY: /* Prefetchable resources may live in either memory rman. */ if (rman_get_flags(r) & RF_PREFETCHABLE && rman_is_region_manager(r, &sc->pmem.rman)) return (1); return (rman_is_region_manager(r, &sc->mem.rman)); } return (0); } static int pcib_is_window_open(struct pcib_window *pw) { return (pw->valid && pw->base < pw->limit); } /* * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and * handle for the resource, we could pass RF_ACTIVE up to the PCI bus * when allocating the resource windows and rely on the PCI bus driver * to do this for us. */ static void pcib_activate_window(struct pcib_softc *sc, int type) { PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); } static void pcib_write_windows(struct pcib_softc *sc, int mask) { device_t dev; uint32_t val; dev = sc->dev; if (sc->io.valid && mask & WIN_IO) { val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { pci_write_config(dev, PCIR_IOBASEH_1, sc->io.base >> 16, 2); pci_write_config(dev, PCIR_IOLIMITH_1, sc->io.limit >> 16, 2); } pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); } if (mask & WIN_MEM) { pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); } if (sc->pmem.valid && mask & WIN_PMEM) { val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { pci_write_config(dev, PCIR_PMBASEH_1, sc->pmem.base >> 32, 4); pci_write_config(dev, PCIR_PMLIMITH_1, sc->pmem.limit >> 32, 4); } pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); } } /* * This is used to reject I/O port allocations that conflict with an * ISA alias range. */ static int pcib_is_isa_range(struct pcib_softc *sc, u_long start, u_long end, u_long count) { u_long next_alias; if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) return (0); /* Only check fixed ranges for overlap. */ if (start + count - 1 != end) return (0); /* ISA aliases are only in the lower 64KB of I/O space. */ if (start >= 65536) return (0); /* Check for overlap with 0x000 - 0x0ff as a special case. */ if (start < 0x100) goto alias; /* * If the start address is an alias, the range is an alias. * Otherwise, compute the start of the next alias range and * check if it is before the end of the candidate range. */ if ((start & 0x300) != 0) goto alias; next_alias = (start & ~0x3fful) | 0x100; if (next_alias <= end) goto alias; return (0); alias: if (bootverbose) device_printf(sc->dev, "I/O range %#lx-%#lx overlaps with an ISA alias\n", start, end); return (1); } static void pcib_add_window_resources(struct pcib_window *w, struct resource **res, int count) { struct resource **newarray; int error, i; newarray = malloc(sizeof(struct resource *) * (w->count + count), M_DEVBUF, M_WAITOK); if (w->res != NULL) bcopy(w->res, newarray, sizeof(struct resource *) * w->count); bcopy(res, newarray + w->count, sizeof(struct resource *) * count); free(w->res, M_DEVBUF); w->res = newarray; w->count += count; for (i = 0; i < count; i++) { error = rman_manage_region(&w->rman, rman_get_start(res[i]), rman_get_end(res[i])); if (error) panic("Failed to add resource to rman"); } } typedef void (nonisa_callback)(u_long start, u_long end, void *arg); static void pcib_walk_nonisa_ranges(u_long start, u_long end, nonisa_callback *cb, void *arg) { u_long next_end; /* * If start is within an ISA alias range, move up to the start * of the next non-alias range. As a special case, addresses * in the range 0x000 - 0x0ff should also be skipped since * those are used for various system I/O devices in ISA * systems. */ if (start <= 65535) { if (start < 0x100 || (start & 0x300) != 0) { start &= ~0x3ff; start += 0x400; } } /* ISA aliases are only in the lower 64KB of I/O space. */ while (start <= MIN(end, 65535)) { next_end = MIN(start | 0xff, end); cb(start, next_end, arg); start += 0x400; } if (start <= end) cb(start, end, arg); } static void count_ranges(u_long start, u_long end, void *arg) { int *countp; countp = arg; (*countp)++; } struct alloc_state { struct resource **res; struct pcib_softc *sc; int count, error; }; static void alloc_ranges(u_long start, u_long end, void *arg) { struct alloc_state *as; struct pcib_window *w; int rid; as = arg; if (as->error != 0) return; w = &as->sc->io; rid = w->reg; if (bootverbose) device_printf(as->sc->dev, "allocating non-ISA range %#lx-%#lx\n", start, end); as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, &rid, start, end, end - start + 1, 0); if (as->res[as->count] == NULL) as->error = ENXIO; else as->count++; } static int pcib_alloc_nonisa_ranges(struct pcib_softc *sc, u_long start, u_long end) { struct alloc_state as; int i, new_count; /* First, see how many ranges we need. */ new_count = 0; pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); /* Second, allocate the ranges. */ as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, M_WAITOK); as.sc = sc; as.count = 0; as.error = 0; pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); if (as.error != 0) { for (i = 0; i < as.count; i++) bus_release_resource(sc->dev, SYS_RES_IOPORT, sc->io.reg, as.res[i]); free(as.res, M_DEVBUF); return (as.error); } KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); /* Third, add the ranges to the window. */ pcib_add_window_resources(&sc->io, as.res, as.count); free(as.res, M_DEVBUF); return (0); } static void pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, int flags, pci_addr_t max_address) { struct resource *res; char buf[64]; int error, rid; if (max_address != (u_long)max_address) max_address = ~0ul; w->rman.rm_start = 0; w->rman.rm_end = max_address; w->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s %s window", device_get_nameunit(sc->dev), w->name); w->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&w->rman); if (error) panic("Failed to initialize %s %s rman", device_get_nameunit(sc->dev), w->name); if (!pcib_is_window_open(w)) return; if (w->base > max_address || w->limit > max_address) { device_printf(sc->dev, "initial %s window has too many bits, ignoring\n", w->name); return; } if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); else { rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, w->limit - w->base + 1, flags); if (res != NULL) pcib_add_window_resources(w, &res, 1); } if (w->res == NULL) { device_printf(sc->dev, "failed to allocate initial %s window: %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); w->base = max_address; w->limit = 0; pcib_write_windows(sc, w->mask); return; } pcib_activate_window(sc, type); } /* * Initialize I/O windows. */ static void pcib_probe_windows(struct pcib_softc *sc) { pci_addr_t max; device_t dev; uint32_t val; dev = sc->dev; if (pci_clear_pcib) { pcib_bridge_init(dev); } /* Determine if the I/O port window is implemented. */ val = pci_read_config(dev, PCIR_IOBASEL_1, 1); if (val == 0) { /* * If 'val' is zero, then only 16-bits of I/O space * are supported. */ pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { sc->io.valid = 1; pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); } } else sc->io.valid = 1; /* Read the existing I/O port window. */ if (sc->io.valid) { sc->io.reg = PCIR_IOBASEL_1; sc->io.step = 12; sc->io.mask = WIN_IO; sc->io.name = "I/O port"; if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { sc->io.base = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), val); sc->io.limit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffffffff; } else { sc->io.base = PCI_PPBIOBASE(0, val); sc->io.limit = PCI_PPBIOLIMIT(0, pci_read_config(dev, PCIR_IOLIMITL_1, 1)); max = 0xffff; } pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); } /* Read the existing memory window. */ sc->mem.valid = 1; sc->mem.reg = PCIR_MEMBASE_1; sc->mem.step = 20; sc->mem.mask = WIN_MEM; sc->mem.name = "memory"; sc->mem.base = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->mem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); /* Determine if the prefetchable memory window is implemented. */ val = pci_read_config(dev, PCIR_PMBASEL_1, 2); if (val == 0) { /* * If 'val' is zero, then only 32-bits of memory space * are supported. */ pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { sc->pmem.valid = 1; pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); } } else sc->pmem.valid = 1; /* Read the existing prefetchable memory window. */ if (sc->pmem.valid) { sc->pmem.reg = PCIR_PMBASEL_1; sc->pmem.step = 20; sc->pmem.mask = WIN_PMEM; sc->pmem.name = "prefetch"; if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { sc->pmem.base = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), val); sc->pmem.limit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffffffffffff; } else { sc->pmem.base = PCI_PPBMEMBASE(0, val); sc->pmem.limit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_PMLIMITL_1, 2)); max = 0xffffffff; } pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, RF_PREFETCHABLE, max); } } #ifdef PCI_RES_BUS /* * Allocate a suitable secondary bus for this bridge if needed and * initialize the resource manager for the secondary bus range. Note * that the minimum count is a desired value and this may allocate a * smaller range. */ void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) { char buf[64]; int error, rid, sec_reg; switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; bus->sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; bus->sub_reg = PCIR_SUBBUS_2; break; default: panic("not a PCI bridge"); } bus->sec = pci_read_config(dev, sec_reg, 1); bus->sub = pci_read_config(dev, bus->sub_reg, 1); bus->dev = dev; bus->rman.rm_start = 0; bus->rman.rm_end = PCI_BUSMAX; bus->rman.rm_type = RMAN_ARRAY; snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); bus->rman.rm_descr = strdup(buf, M_DEVBUF); error = rman_init(&bus->rman); if (error) panic("Failed to initialize %s bus number rman", device_get_nameunit(dev)); /* * Allocate a bus range. This will return an existing bus range * if one exists, or a new bus range if one does not. */ rid = 0; bus->res = bus_alloc_resource(dev, PCI_RES_BUS, &rid, 0ul, ~0ul, min_count, 0); if (bus->res == NULL) { /* * Fall back to just allocating a range of a single bus * number. */ bus->res = bus_alloc_resource(dev, PCI_RES_BUS, &rid, 0ul, ~0ul, 1, 0); } else if (rman_get_size(bus->res) < min_count) /* * Attempt to grow the existing range to satisfy the * minimum desired count. */ (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), rman_get_start(bus->res) + min_count - 1); /* * Add the initial resource to the rman. */ if (bus->res != NULL) { error = rman_manage_region(&bus->rman, rman_get_start(bus->res), rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sec = rman_get_start(bus->res); bus->sub = rman_get_end(bus->res); } } static struct resource * pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct resource *res; res = rman_reserve_resource(&bus->rman, start, end, count, flags, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(bus->dev, "allocated bus range (%lu-%lu) for rid %d of %s\n", rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); return (res); } /* * Attempt to grow the secondary bus range. This is much simpler than * for I/O windows as the range can only be grown by increasing * subbus. */ static int pcib_grow_subbus(struct pcib_secbus *bus, u_long new_end) { u_long old_end; int error; old_end = rman_get_end(bus->res); KASSERT(new_end > old_end, ("attempt to shrink subbus")); error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, rman_get_start(bus->res), new_end); if (error) return (error); if (bootverbose) device_printf(bus->dev, "grew bus range to %lu-%lu\n", rman_get_start(bus->res), rman_get_end(bus->res)); error = rman_manage_region(&bus->rman, old_end + 1, rman_get_end(bus->res)); if (error) panic("Failed to add resource to rman"); bus->sub = rman_get_end(bus->res); pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); return (0); } struct resource * pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct resource *res; u_long start_free, end_free, new_end; /* * First, see if the request can be satisified by the existing * bus range. */ res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); if (res != NULL) return (res); /* * Figure out a range to grow the bus range. First, find the * first bus number after the last allocated bus in the rman and * enforce that as a minimum starting point for the range. */ if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || end_free != bus->sub) start_free = bus->sub + 1; if (start_free < start) start_free = start; new_end = start_free + count - 1; /* * See if this new range would satisfy the request if it * succeeds. */ if (new_end > end) return (NULL); /* Finally, attempt to grow the existing resource. */ if (bootverbose) { device_printf(bus->dev, "attempting to grow bus range for %lu buses\n", count); printf("\tback candidate range: %lu-%lu\n", start_free, new_end); } if (pcib_grow_subbus(bus, new_end) == 0) return (pcib_suballoc_bus(bus, child, rid, start, end, count, flags)); return (NULL); } #endif #else /* * Is the prefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_prefetch_open(struct pcib_softc *sc) { return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); } /* * Is the nonprefetch window open (eg, can we allocate memory in it?) */ static int pcib_is_nonprefetch_open(struct pcib_softc *sc) { return (sc->membase > 0 && sc->membase < sc->memlimit); } /* * Is the io window open (eg, can we allocate ports in it?) */ static int pcib_is_io_open(struct pcib_softc *sc) { return (sc->iobase > 0 && sc->iobase < sc->iolimit); } /* * Get current I/O decode. */ static void pcib_get_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iolow; dev = sc->dev; iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iobase = PCI_PPBIOBASE( pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); else sc->iobase = PCI_PPBIOBASE(0, iolow); iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) sc->iolimit = PCI_PPBIOLIMIT( pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); else sc->iolimit = PCI_PPBIOLIMIT(0, iolow); } /* * Get current memory decode. */ static void pcib_get_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemlow; dev = sc->dev; sc->membase = PCI_PPBMEMBASE(0, pci_read_config(dev, PCIR_MEMBASE_1, 2)); sc->memlimit = PCI_PPBMEMLIMIT(0, pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmembase = PCI_PPBMEMBASE( pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); else sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) sc->pmemlimit = PCI_PPBMEMLIMIT( pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); else sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); } /* * Restore previous I/O decode. */ static void pcib_set_io_decode(struct pcib_softc *sc) { device_t dev; uint32_t iohi; dev = sc->dev; iohi = sc->iobase >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); iohi = sc->iolimit >> 16; if (iohi > 0) pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); } /* * Restore previous memory decode. */ static void pcib_set_mem_decode(struct pcib_softc *sc) { device_t dev; pci_addr_t pmemhi; dev = sc->dev; pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); pmemhi = sc->pmembase >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); pmemhi = sc->pmemlimit >> 32; if (pmemhi > 0) pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); } #endif /* * Get current bridge configuration. */ static void pcib_cfg_save(struct pcib_softc *sc) { #ifndef NEW_PCIB device_t dev; uint16_t command; dev = sc->dev; command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_get_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_get_mem_decode(sc); #endif } /* * Restore previous bridge configuration. */ static void pcib_cfg_restore(struct pcib_softc *sc) { device_t dev; #ifndef NEW_PCIB uint16_t command; #endif dev = sc->dev; #ifdef NEW_PCIB pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); #else command = pci_read_config(dev, PCIR_COMMAND, 2); if (command & PCIM_CMD_PORTEN) pcib_set_io_decode(sc); if (command & PCIM_CMD_MEMEN) pcib_set_mem_decode(sc); #endif } /* * Generic device interface */ static int pcib_probe(device_t dev) { if ((pci_get_class(dev) == PCIC_BRIDGE) && (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { device_set_desc(dev, "PCI-PCI bridge"); return(-10000); } return(ENXIO); } void pcib_attach_common(device_t dev) { struct pcib_softc *sc; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int comma; sc = device_get_softc(dev); sc->dev = dev; /* * Get current bridge configuration. */ sc->domain = pci_get_domain(dev); #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); #endif sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); pcib_cfg_save(sc); /* * The primary bus register should always be the bus of the * parent. */ sc->pribus = pci_get_bus(dev); pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); /* * Setup sysctl reporting nodes */ sctx = device_get_sysctl_ctx(dev); soid = device_get_sysctl_tree(dev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); /* * Quirk handling. */ switch (pci_get_devid(dev)) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) case 0x12258086: /* Intel 82454KX/GX (Orion) */ { uint8_t supbus; supbus = pci_read_config(dev, 0x41, 1); if (supbus != 0xff) { sc->bus.sec = supbus + 1; sc->bus.sub = supbus + 1; } break; } #endif /* * The i82380FB mobile docking controller is a PCI-PCI bridge, * and it is a subtractive bridge. However, the ProgIf is wrong * so the normal setting of PCIB_SUBTRACTIVE bit doesn't * happen. There are also Toshiba and Cavium ThunderX bridges * that behave this way. */ case 0xa002177d: /* Cavium ThunderX */ case 0x124b8086: /* Intel 82380FB Mobile */ case 0x060513d7: /* Toshiba ???? */ sc->flags |= PCIB_SUBTRACTIVE; break; #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* Compaq R3000 BIOS sets wrong subordinate bus number. */ case 0x00dd10de: { char *cp; if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sc->bus.sub < 0xa) { pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); } break; } #endif } if (pci_msi_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSI; if (pci_msix_device_blacklisted(dev)) sc->flags |= PCIB_DISABLE_MSIX; /* * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. * This means they act as if they were subtractively decoding * bridges and pass all transactions. Mark them and real ProgIf 1 * parts as subtractive. */ if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) sc->flags |= PCIB_SUBTRACTIVE; #ifdef NEW_PCIB #ifdef PCI_RES_BUS pcib_setup_secbus(dev, &sc->bus, 1); #endif pcib_probe_windows(sc); #endif if (bootverbose) { device_printf(dev, " domain %d\n", sc->domain); device_printf(dev, " secondary bus %d\n", sc->bus.sec); device_printf(dev, " subordinate bus %d\n", sc->bus.sub); #ifdef NEW_PCIB if (pcib_is_window_open(&sc->io)) device_printf(dev, " I/O decode 0x%jx-0x%jx\n", (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); if (pcib_is_window_open(&sc->mem)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); if (pcib_is_window_open(&sc->pmem)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); #else if (pcib_is_io_open(sc)) device_printf(dev, " I/O decode 0x%x-0x%x\n", sc->iobase, sc->iolimit); if (pcib_is_nonprefetch_open(sc)) device_printf(dev, " memory decode 0x%jx-0x%jx\n", (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); if (pcib_is_prefetch_open(sc)) device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); #endif if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || sc->flags & PCIB_SUBTRACTIVE) { device_printf(dev, " special decode "); comma = 0; if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { printf("ISA"); comma = 1; } if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { printf("%sVGA", comma ? ", " : ""); comma = 1; } if (sc->flags & PCIB_SUBTRACTIVE) printf("%ssubtractive", comma ? ", " : ""); printf("\n"); } } /* * Always enable busmastering on bridges so that transactions * initiated on the secondary bus are passed through to the * primary bus. */ pci_enable_busmaster(dev); } int pcib_attach(device_t dev) { struct pcib_softc *sc; device_t child; pcib_attach_common(dev); sc = device_get_softc(dev); if (sc->bus.sec != 0) { - child = device_add_child(dev, "pci", sc->bus.sec); + child = device_add_child(dev, "pci", -1); if (child != NULL) return(bus_generic_attach(dev)); } /* no secondary bus; we should have fixed this */ return(0); } int pcib_suspend(device_t dev) { pcib_cfg_save(device_get_softc(dev)); return (bus_generic_suspend(dev)); } int pcib_resume(device_t dev) { pcib_cfg_restore(device_get_softc(dev)); return (bus_generic_resume(dev)); } void pcib_bridge_init(device_t dev) { pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); } int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pcib_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->domain; return(0); case PCIB_IVAR_BUS: *result = sc->bus.sec; return(0); } return(ENOENT); } int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return(EINVAL); case PCIB_IVAR_BUS: return(EINVAL); } return(ENOENT); } #ifdef NEW_PCIB /* * Attempt to allocate a resource from the existing resources assigned * to a window. */ static struct resource * pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct resource *res; if (!pcib_is_window_open(w)) return (NULL); res = rman_reserve_resource(&w->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); if (bootverbose) device_printf(sc->dev, "allocated %s range (%#lx-%#lx) for rid %x of %s\n", w->name, rman_get_start(res), rman_get_end(res), *rid, pcib_child_name(child)); rman_set_rid(res, *rid); /* * If the resource should be active, pass that request up the * tree. This assumes the parent drivers can handle * activating sub-allocated resources. */ if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } /* Allocate a fresh resource range for an unconfigured window. */ static int pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, u_long start, u_long end, u_long count, u_int flags) { struct resource *res; u_long base, limit, wmask; int rid; /* * If this is an I/O window on a bridge with ISA enable set * and the start address is below 64k, then try to allocate an * initial window of 0x1000 bytes long starting at address * 0xf000 and walking down. Note that if the original request * was larger than the non-aliased range size of 0x100 our * caller would have raised the start address up to 64k * already. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && start < 65536) { for (base = 0xf000; (long)base >= 0; base -= 0x1000) { limit = base + 0xfff; /* * Skip ranges that wouldn't work for the * original request. Note that the actual * window that overlaps are the non-alias * ranges within [base, limit], so this isn't * quite a simple comparison. */ if (start + count > limit - 0x400) continue; if (base == 0) { /* * The first open region for the window at * 0 is 0x400-0x4ff. */ if (end - count + 1 < 0x400) continue; } else { if (end - count + 1 < base) continue; } if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { w->base = base; w->limit = limit; return (0); } } return (ENOSPC); } wmask = (1ul << w->step) - 1; if (RF_ALIGNMENT(flags) < w->step) { flags &= ~RF_ALIGNMENT_MASK; flags |= RF_ALIGNMENT_LOG2(w->step); } start &= ~wmask; end |= wmask; count = roundup2(count, 1ul << w->step); rid = w->reg; res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) return (ENOSPC); pcib_add_window_resources(w, &res, 1); pcib_activate_window(sc, type); w->base = rman_get_start(res); w->limit = rman_get_end(res); return (0); } /* Try to expand an existing window to the requested base and limit. */ static int pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, u_long base, u_long limit) { struct resource *res; int error, i, force_64k_base; KASSERT(base <= w->base && limit >= w->limit, ("attempting to shrink window")); /* * XXX: pcib_grow_window() doesn't try to do this anyway and * the error handling for all the edge cases would be tedious. */ KASSERT(limit == w->limit || base == w->base, ("attempting to grow both ends of a window")); /* * Yet more special handling for requests to expand an I/O * window behind an ISA-enabled bridge. Since I/O windows * have to grow in 0x1000 increments and the end of the 0xffff * range is an alias, growing a window below 64k will always * result in allocating new resources and never adjusting an * existing resource. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && (limit <= 65535 || (base <= 65535 && base != w->base))) { KASSERT(limit == w->limit || limit <= 65535, ("attempting to grow both ends across 64k ISA alias")); if (base != w->base) error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); else error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, limit); if (error == 0) { w->base = base; w->limit = limit; } return (error); } /* * Find the existing resource to adjust. Usually there is only one, * but for an ISA-enabled bridge we might be growing the I/O window * above 64k and need to find the existing resource that maps all * of the area above 64k. */ for (i = 0; i < w->count; i++) { if (rman_get_end(w->res[i]) == w->limit) break; } KASSERT(i != w->count, ("did not find existing resource")); res = w->res[i]; /* * Usually the resource we found should match the window's * existing range. The one exception is the ISA-enabled case * mentioned above in which case the resource should start at * 64k. */ if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && w->base <= 65535) { KASSERT(rman_get_start(res) == 65536, ("existing resource mismatch")); force_64k_base = 1; } else { KASSERT(w->base == rman_get_start(res), ("existing resource mismatch")); force_64k_base = 0; } error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : base, limit); if (error) return (error); /* Add the newly allocated region to the resource manager. */ if (w->base != base) { error = rman_manage_region(&w->rman, base, w->base - 1); w->base = base; } else { error = rman_manage_region(&w->rman, w->limit + 1, limit); w->limit = limit; } if (error) { if (bootverbose) device_printf(sc->dev, "failed to expand %s resource manager\n", w->name); (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? rman_get_start(res) : w->base, w->limit); } return (error); } /* * Attempt to grow a window to make room for a given resource request. */ static int pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, u_long start, u_long end, u_long count, u_int flags) { u_long align, start_free, end_free, front, back, wmask; int error; /* * Clamp the desired resource range to the maximum address * this window supports. Reject impossible requests. * * For I/O port requests behind a bridge with the ISA enable * bit set, force large allocations to start above 64k. */ if (!w->valid) return (EINVAL); if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && start < 65536) start = 65536; if (end > w->rman.rm_end) end = w->rman.rm_end; if (start + count - 1 > end || start + count < start) return (EINVAL); wmask = (1ul << w->step) - 1; /* * If there is no resource at all, just try to allocate enough * aligned space for this resource. */ if (w->res == NULL) { error = pcib_alloc_new_window(sc, w, type, start, end, count, flags); if (error) { if (bootverbose) device_printf(sc->dev, "failed to allocate initial %s window (%#lx-%#lx,%#lx)\n", w->name, start, end, count); return (error); } if (bootverbose) device_printf(sc->dev, "allocated initial %s window of %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); goto updatewin; } /* * See if growing the window would help. Compute the minimum * amount of address space needed on both the front and back * ends of the existing window to satisfy the allocation. * * For each end, build a candidate region adjusting for the * required alignment, etc. If there is a free region at the * edge of the window, grow from the inner edge of the free * region. Otherwise grow from the window boundary. * * Growing an I/O window below 64k for a bridge with the ISA * enable bit doesn't require any special magic as the step * size of an I/O window (1k) always includes multiple * non-alias ranges when it is grown in either direction. * * XXX: Special case: if w->res is completely empty and the * request size is larger than w->res, we should find the * optimal aligned buffer containing w->res and allocate that. */ if (bootverbose) device_printf(sc->dev, "attempting to grow %s window for (%#lx-%#lx,%#lx)\n", w->name, start, end, count); align = 1ul << RF_ALIGNMENT(flags); if (start < w->base) { if (rman_first_free_region(&w->rman, &start_free, &end_free) != 0 || start_free != w->base) end_free = w->base; if (end_free > end) end_free = end + 1; /* Move end_free down until it is properly aligned. */ end_free &= ~(align - 1); end_free--; front = end_free - (count - 1); /* * The resource would now be allocated at (front, * end_free). Ensure that fits in the (start, end) * bounds. end_free is checked above. If 'front' is * ok, ensure it is properly aligned for this window. * Also check for underflow. */ if (front >= start && front <= end_free) { if (bootverbose) printf("\tfront candidate range: %#lx-%#lx\n", front, end_free); front &= ~wmask; front = w->base - front; } else front = 0; } else front = 0; if (end > w->limit) { if (rman_last_free_region(&w->rman, &start_free, &end_free) != 0 || end_free != w->limit) start_free = w->limit + 1; if (start_free < start) start_free = start; /* Move start_free up until it is properly aligned. */ start_free = roundup2(start_free, align); back = start_free + count - 1; /* * The resource would now be allocated at (start_free, * back). Ensure that fits in the (start, end) * bounds. start_free is checked above. If 'back' is * ok, ensure it is properly aligned for this window. * Also check for overflow. */ if (back <= end && start_free <= back) { if (bootverbose) printf("\tback candidate range: %#lx-%#lx\n", start_free, back); back |= wmask; back -= w->limit; } else back = 0; } else back = 0; /* * Try to allocate the smallest needed region first. * If that fails, fall back to the other region. */ error = ENOSPC; while (front != 0 || back != 0) { if (front != 0 && (front <= back || back == 0)) { error = pcib_expand_window(sc, w, type, w->base - front, w->limit); if (error == 0) break; front = 0; } else { error = pcib_expand_window(sc, w, type, w->base, w->limit + back); if (error == 0) break; back = 0; } } if (error) return (error); if (bootverbose) device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", w->name, (uintmax_t)w->base, (uintmax_t)w->limit); updatewin: /* Write the new window. */ KASSERT((w->base & wmask) == 0, ("start address is not aligned")); KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); pcib_write_windows(sc, w->mask); return (0); } /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct pcib_softc *sc; struct resource *r; sc = device_get_softc(dev); /* * VGA resources are decoded iff the VGA enable bit is set in * the bridge control register. VGA resources do not fall into * the resource windows and are passed up to the parent. */ if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); else return (NULL); } switch (type) { #ifdef PCI_RES_BUS case PCI_RES_BUS: return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); #endif case SYS_RES_IOPORT: if (pcib_is_isa_range(sc, start, end, count)) return (NULL); r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (pcib_grow_window(sc, &sc->io, type, start, end, count, flags) == 0) r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, end, count, flags); break; case SYS_RES_MEMORY: /* * For prefetchable resources, prefer the prefetchable * memory window, but fall back to the regular memory * window if that fails. Try both windows before * attempting to grow a window in case the firmware * has used a range in the regular memory window to * map a prefetchable BAR. */ if (flags & RF_PREFETCHABLE) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) break; if (flags & RF_PREFETCHABLE) { if (pcib_grow_window(sc, &sc->pmem, type, start, end, count, flags) == 0) { r = pcib_suballoc_resource(sc, &sc->pmem, child, type, rid, start, end, count, flags); if (r != NULL) break; } } if (pcib_grow_window(sc, &sc->mem, type, start, end, count, flags & ~RF_PREFETCHABLE) == 0) r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, start, end, count, flags); break; default: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } /* * If attempts to suballocate from the window fail but this is a * subtractive bridge, pass the request up the tree. */ if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); return (r); } int pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r, u_long start, u_long end) { struct pcib_softc *sc; sc = device_get_softc(bus); if (pcib_is_resource_managed(sc, type, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, type, r, start, end)); } int pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pcib_softc *sc; int error; sc = device_get_softc(dev); if (pcib_is_resource_managed(sc, type, r)) { if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (bus_generic_release_resource(dev, child, type, rid, r)); } #else /* * We have to trap resource allocation requests and ensure that the bridge * is set up to, or capable of handling them. */ struct resource * pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct pcib_softc *sc = device_get_softc(dev); const char *name, *suffix; int ok; /* * Fail the allocation for this range if it's not supported. */ name = device_get_nameunit(child); if (name == NULL) { name = ""; suffix = ""; } else suffix = " "; switch (type) { case SYS_RES_IOPORT: ok = 0; if (!pcib_is_io_open(sc)) break; ok = (start >= sc->iobase && end <= sc->iolimit); /* * Make sure we allow access to VGA I/O addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_ioport_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { if (start < sc->iobase) start = sc->iobase; if (end > sc->iolimit) end = sc->iolimit; if (start < end) ok = 1; } } else { ok = 1; #if 0 /* * If we overlap with the subtractive range, then * pick the upper range to use. */ if (start < sc->iolimit && end > sc->iobase) start = sc->iolimit + 1; #endif } if (end < start) { device_printf(dev, "ioport: end (%lx) < start (%lx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok) { device_printf(dev, "%s%srequested unsupported I/O " "range 0x%lx-0x%lx (decoding 0x%x-0x%x)\n", name, suffix, start, end, sc->iobase, sc->iolimit); return (NULL); } if (bootverbose) device_printf(dev, "%s%srequested I/O range 0x%lx-0x%lx: in range\n", name, suffix, start, end); break; case SYS_RES_MEMORY: ok = 0; if (pcib_is_nonprefetch_open(sc)) ok = ok || (start >= sc->membase && end <= sc->memlimit); if (pcib_is_prefetch_open(sc)) ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); /* * Make sure we allow access to VGA memory addresses when the * bridge has the "VGA Enable" bit set. */ if (!ok && pci_is_vga_memory_range(start, end)) ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { if (!ok) { ok = 1; if (flags & RF_PREFETCHABLE) { if (pcib_is_prefetch_open(sc)) { if (start < sc->pmembase) start = sc->pmembase; if (end > sc->pmemlimit) end = sc->pmemlimit; } else { ok = 0; } } else { /* non-prefetchable */ if (pcib_is_nonprefetch_open(sc)) { if (start < sc->membase) start = sc->membase; if (end > sc->memlimit) end = sc->memlimit; } else { ok = 0; } } } } else if (!ok) { ok = 1; /* subtractive bridge: always ok */ #if 0 if (pcib_is_nonprefetch_open(sc)) { if (start < sc->memlimit && end > sc->membase) start = sc->memlimit + 1; } if (pcib_is_prefetch_open(sc)) { if (start < sc->pmemlimit && end > sc->pmembase) start = sc->pmemlimit + 1; } #endif } if (end < start) { device_printf(dev, "memory: end (%lx) < start (%lx)\n", end, start); start = 0; end = 0; ok = 0; } if (!ok && bootverbose) device_printf(dev, "%s%srequested unsupported memory range %#lx-%#lx " "(decoding %#jx-%#jx, %#jx-%#jx)\n", name, suffix, start, end, (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); if (!ok) return (NULL); if (bootverbose) device_printf(dev,"%s%srequested memory range " "0x%lx-0x%lx: good\n", name, suffix, start, end); break; default: break; } /* * Bridge is OK decoding this resource, so pass it up. */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif /* * If ARI is enabled on this downstream port, translate the function number * to the non-ARI slot/function. The downstream port will convert it back in * hardware. If ARI is not enabled slot and func are not modified. */ static __inline void pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) { struct pcib_softc *sc; int ari_func; sc = device_get_softc(pcib); ari_func = *func; if (sc->flags & PCIB_ENABLE_ARI) { KASSERT(*slot == 0, ("Non-zero slot number with ARI enabled!")); *slot = PCIE_ARI_SLOT(ari_func); *func = PCIE_ARI_FUNC(ari_func); } } static void pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) { uint32_t ctl2; ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); ctl2 |= PCIEM_CTL2_ARI; pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); sc->flags |= PCIB_ENABLE_ARI; } /* * PCIB interface. */ int pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int pcib_ari_maxslots(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_SLOTMAX); else return (PCI_SLOTMAX); } static int pcib_ari_maxfuncs(device_t dev) { struct pcib_softc *sc; sc = device_get_softc(dev); if (sc->flags & PCIB_ENABLE_ARI) return (PCIE_ARI_FUNCMAX); else return (PCI_FUNCMAX); } static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, int *func) { struct pcib_softc *sc; sc = device_get_softc(pcib); *bus = PCI_RID2BUS(rid); if (sc->flags & PCIB_ENABLE_ARI) { *slot = PCIE_ARI_RID2SLOT(rid); *func = PCIE_ARI_RID2FUNC(rid); } else { *slot = PCI_RID2SLOT(rid); *func = PCI_RID2FUNC(rid); } } /* * Since we are a child of a PCI bus, its parent must support the pcib interface. */ static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { pcib_xlate_ari(dev, b, &s, &f); return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width)); } static void pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { pcib_xlate_ari(dev, b, &s, &f); PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width); } /* * Route an interrupt across a PCI bridge. */ int pcib_route_interrupt(device_t pcib, device_t dev, int pin) { device_t bus; int parent_intpin; int intnum; /* * * The PCI standard defines a swizzle of the child-side device/intpin to * the parent-side intpin as follows. * * device = device on child bus * child_intpin = intpin on child bus slot (0-3) * parent_intpin = intpin on parent bus slot (0-3) * * parent_intpin = (device + child_intpin) % 4 */ parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; /* * Our parent is a PCI bus. Its parent must export the pcib interface * which includes the ability to route interrupts. */ bus = device_get_parent(pcib); intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { device_printf(pcib, "slot %d INT%c is routed to irq %d\n", pci_get_slot(dev), 'A' + pin - 1, intnum); } return(intnum); } /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ int pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSI) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ int pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } /* Pass request to alloc an MSI-X message up to the parent bridge. */ int pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { struct pcib_softc *sc = device_get_softc(pcib); device_t bus; if (sc->flags & PCIB_DISABLE_MSIX) return (ENXIO); bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to release an MSI-X message up to the parent bridge. */ int pcib_release_msix(device_t pcib, device_t dev, int irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } /* Pass request to map MSI/MSI-X message up to parent bridge. */ int pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; int error; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); pci_ht_map_msi(pcib, *addr); return (0); } /* Pass request for device power state up to parent bridge. */ int pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) { device_t bus; bus = device_get_parent(pcib); return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); } static int pcib_ari_enabled(device_t pcib) { struct pcib_softc *sc; sc = device_get_softc(pcib); return ((sc->flags & PCIB_ENABLE_ARI) != 0); } static uint16_t pcib_ari_get_rid(device_t pcib, device_t dev) { struct pcib_softc *sc; uint8_t bus, slot, func; sc = device_get_softc(pcib); if (sc->flags & PCIB_ENABLE_ARI) { bus = pci_get_bus(dev); func = pci_get_function(dev); return (PCI_ARI_RID(bus, func)); } else { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); return (PCI_RID(bus, slot, func)); } } /* * Check that the downstream port (pcib) and the endpoint device (dev) both * support ARI. If so, enable it and return 0, otherwise return an error. */ static int pcib_try_enable_ari(device_t pcib, device_t dev) { struct pcib_softc *sc; int error; uint32_t cap2; int ari_cap_off; uint32_t ari_ver; uint32_t pcie_pos; sc = device_get_softc(pcib); /* * ARI is controlled in a register in the PCIe capability structure. * If the downstream port does not have the PCIe capability structure * then it does not support ARI. */ error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); if (error != 0) return (ENODEV); /* Check that the PCIe port advertises ARI support. */ cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); if (!(cap2 & PCIEM_CAP2_ARI)) return (ENODEV); /* * Check that the endpoint device advertises ARI support via the ARI * extended capability structure. */ error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); if (error != 0) return (ENODEV); /* * Finally, check that the endpoint device supports the same version * of ARI that we do. */ ari_ver = pci_read_config(dev, ari_cap_off, 4); if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { if (bootverbose) device_printf(pcib, "Unsupported version of ARI (%d) detected\n", PCI_EXTCAP_VER(ari_ver)); return (ENXIO); } pcib_enable_ari(sc, pcie_pos); return (0); } Index: head/sys/dev/xen/pcifront/pcifront.c =================================================================== --- head/sys/dev/xen/pcifront/pcifront.c (revision 287881) +++ head/sys/dev/xen/pcifront/pcifront.c (revision 287882) @@ -1,688 +1,688 @@ /* * Copyright (c) 2006, Cisco Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Cisco Systems, Inc. nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #ifdef XEN_PCIDEV_FE_DEBUG #define DPRINTF(fmt, args...) \ printf("pcifront (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) ((void)0) #endif #define WPRINTF(fmt, args...) \ printf("pcifront (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #define INVALID_GRANT_REF (0) #define INVALID_EVTCHN (-1) #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) struct pcifront_device { STAILQ_ENTRY(pcifront_device) next; struct xenbus_device *xdev; int unit; int evtchn; int gnt_ref; /* Lock this when doing any operations in sh_info */ struct mtx sh_info_lock; struct xen_pci_sharedinfo *sh_info; device_t ndev; int ref_cnt; }; static STAILQ_HEAD(pcifront_dlist, pcifront_device) pdev_list = STAILQ_HEAD_INITIALIZER(pdev_list); struct xpcib_softc { int domain; int bus; struct pcifront_device *pdev; }; /* Allocate a PCI device structure */ static struct pcifront_device * alloc_pdev(struct xenbus_device *xdev) { struct pcifront_device *pdev = NULL; int err, unit; err = sscanf(xdev->nodename, "device/pci/%d", &unit); if (err != 1) { if (err == 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error scanning pci device instance number"); goto out; } pdev = (struct pcifront_device *)malloc(sizeof(struct pcifront_device), M_DEVBUF, M_NOWAIT); if (pdev == NULL) { err = -ENOMEM; xenbus_dev_fatal(xdev, err, "Error allocating pcifront_device struct"); goto out; } pdev->unit = unit; pdev->xdev = xdev; pdev->ref_cnt = 1; pdev->sh_info = (struct xen_pci_sharedinfo *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); if (pdev->sh_info == NULL) { free(pdev, M_DEVBUF); pdev = NULL; err = -ENOMEM; xenbus_dev_fatal(xdev, err, "Error allocating sh_info struct"); goto out; } pdev->sh_info->flags = 0; xdev->data = pdev; mtx_init(&pdev->sh_info_lock, "info_lock", "pci shared dev info lock", MTX_DEF); pdev->evtchn = INVALID_EVTCHN; pdev->gnt_ref = INVALID_GRANT_REF; STAILQ_INSERT_TAIL(&pdev_list, pdev, next); DPRINTF("Allocated pdev @ 0x%p (unit=%d)\n", pdev, unit); out: return pdev; } /* Hold a reference to a pcifront device */ static void get_pdev(struct pcifront_device *pdev) { pdev->ref_cnt++; } /* Release a reference to a pcifront device */ static void put_pdev(struct pcifront_device *pdev) { if (--pdev->ref_cnt > 0) return; DPRINTF("freeing pdev @ 0x%p (ref_cnt=%d)\n", pdev, pdev->ref_cnt); if (pdev->evtchn != INVALID_EVTCHN) xenbus_free_evtchn(pdev->xdev, pdev->evtchn); if (pdev->gnt_ref != INVALID_GRANT_REF) gnttab_end_foreign_access(pdev->gnt_ref, 0, (void *)pdev->sh_info); pdev->xdev->data = NULL; free(pdev, M_DEVBUF); } /* Write to the xenbus info needed by backend */ static int pcifront_publish_info(struct pcifront_device *pdev) { int err = 0; struct xenbus_transaction *trans; err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info)); if (err < 0) { WPRINTF("error granting access to ring page\n"); goto out; } pdev->gnt_ref = err; err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); if (err) goto out; do_publish: trans = xenbus_transaction_start(); if (IS_ERR(trans)) { xenbus_dev_fatal(pdev->xdev, err, "Error writing configuration for backend " "(start transaction)"); goto out; } err = xenbus_printf(trans, pdev->xdev->nodename, "pci-op-ref", "%u", pdev->gnt_ref); if (!err) err = xenbus_printf(trans, pdev->xdev->nodename, "event-channel", "%u", pdev->evtchn); if (!err) err = xenbus_printf(trans, pdev->xdev->nodename, "magic", XEN_PCI_MAGIC); if (!err) err = xenbus_switch_state(pdev->xdev, trans, XenbusStateInitialised); if (err) { xenbus_transaction_end(trans, 1); xenbus_dev_fatal(pdev->xdev, err, "Error writing configuration for backend"); goto out; } else { err = xenbus_transaction_end(trans, 0); if (err == -EAGAIN) goto do_publish; else if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error completing transaction for backend"); goto out; } } out: return err; } /* The backend is now connected so complete the connection process on our side */ static int pcifront_connect(struct pcifront_device *pdev) { device_t nexus; devclass_t nexus_devclass; /* We will add our device as a child of the nexus0 device */ if (!(nexus_devclass = devclass_find("nexus")) || !(nexus = devclass_get_device(nexus_devclass, 0))) { WPRINTF("could not find nexus0!\n"); return -1; } /* Create a newbus device representing this frontend instance */ pdev->ndev = BUS_ADD_CHILD(nexus, 0, "xpcife", pdev->unit); if (!pdev->ndev) { WPRINTF("could not create xpcife%d!\n", pdev->unit); return -EFAULT; } get_pdev(pdev); device_set_ivars(pdev->ndev, pdev); /* Good to go connected now */ xenbus_switch_state(pdev->xdev, NULL, XenbusStateConnected); printf("pcifront: connected to %s\n", pdev->xdev->nodename); mtx_lock(&Giant); device_probe_and_attach(pdev->ndev); mtx_unlock(&Giant); return 0; } /* The backend is closing so process a disconnect */ static int pcifront_disconnect(struct pcifront_device *pdev) { int err = 0; XenbusState prev_state; prev_state = xenbus_read_driver_state(pdev->xdev->nodename); if (prev_state < XenbusStateClosing) { err = xenbus_switch_state(pdev->xdev, NULL, XenbusStateClosing); if (!err && prev_state == XenbusStateConnected) { /* TODO - need to detach the newbus devices */ } } return err; } /* Process a probe from the xenbus */ static int pcifront_probe(struct xenbus_device *xdev, const struct xenbus_device_id *id) { int err = 0; struct pcifront_device *pdev; DPRINTF("xenbus probing\n"); if ((pdev = alloc_pdev(xdev)) == NULL) goto out; err = pcifront_publish_info(pdev); out: if (err) put_pdev(pdev); return err; } /* Remove the xenbus PCI device */ static int pcifront_remove(struct xenbus_device *xdev) { DPRINTF("removing xenbus device node (%s)\n", xdev->nodename); if (xdev->data) put_pdev(xdev->data); return 0; } /* Called by xenbus when our backend node changes state */ static void pcifront_backend_changed(struct xenbus_device *xdev, XenbusState be_state) { struct pcifront_device *pdev = xdev->data; switch (be_state) { case XenbusStateClosing: DPRINTF("backend closing (%s)\n", xdev->nodename); pcifront_disconnect(pdev); break; case XenbusStateClosed: DPRINTF("backend closed (%s)\n", xdev->nodename); pcifront_disconnect(pdev); break; case XenbusStateConnected: DPRINTF("backend connected (%s)\n", xdev->nodename); pcifront_connect(pdev); break; default: break; } } /* Process PCI operation */ static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op) { int err = 0; struct xen_pci_op *active_op = &pdev->sh_info->op; evtchn_port_t port = pdev->evtchn; time_t timeout; mtx_lock(&pdev->sh_info_lock); memcpy(active_op, op, sizeof(struct xen_pci_op)); /* Go */ wmb(); set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); notify_remote_via_evtchn(port); timeout = time_uptime + 2; clear_evtchn(port); /* Spin while waiting for the answer */ while (test_bit (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) { int err = HYPERVISOR_poll(&port, 1, 3 * hz); if (err) panic("Failed HYPERVISOR_poll: err=%d", err); clear_evtchn(port); if (time_uptime > timeout) { WPRINTF("pciback not responding!!!\n"); clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); err = XEN_PCI_ERR_dev_not_found; goto out; } } memcpy(op, active_op, sizeof(struct xen_pci_op)); err = op->err; out: mtx_unlock(&pdev->sh_info_lock); return err; } /* ** XenBus Driver registration ** */ static struct xenbus_device_id pcifront_ids[] = { { "pci" }, { "" } }; static struct xenbus_driver pcifront = { .name = "pcifront", .ids = pcifront_ids, .probe = pcifront_probe, .remove = pcifront_remove, .otherend_changed = pcifront_backend_changed, }; /* Register the driver with xenbus during sys init */ static void pcifront_init(void *unused) { if ((xen_start_info->flags & SIF_INITDOMAIN)) return; DPRINTF("xenbus registering\n"); xenbus_register_frontend(&pcifront); } SYSINIT(pciif, SI_SUB_PSEUDO, SI_ORDER_ANY, pcifront_init, NULL) /* Newbus xpcife device driver probe */ static int xpcife_probe(device_t dev) { #ifdef XEN_PCIDEV_FE_DEBUG struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(dev); DPRINTF("xpcife probe (unit=%d)\n", pdev->unit); #endif return (BUS_PROBE_NOWILDCARD); } /* Newbus xpcife device driver attach */ static int xpcife_attach(device_t dev) { struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(dev); int i, num_roots, len, err; char str[64]; unsigned int domain, bus; DPRINTF("xpcife attach (unit=%d)\n", pdev->unit); err = xenbus_scanf(NULL, pdev->xdev->otherend, "root_num", "%d", &num_roots); if (err != 1) { if (err == 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading number of PCI roots"); goto out; } /* Add a pcib device for each root */ for (i = 0; i < num_roots; i++) { device_t child; len = snprintf(str, sizeof(str), "root-%d", i); if (unlikely(len >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } err = xenbus_scanf(NULL, pdev->xdev->otherend, str, "%x:%x", &domain, &bus); if (err != 2) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading PCI root %d", i); goto out; } err = 0; if (domain != pdev->xdev->otherend_id) { err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Domain mismatch %d != %d", domain, pdev->xdev->otherend_id); goto out; } child = device_add_child(dev, "pcib", bus); if (!child) { err = -ENOMEM; xenbus_dev_fatal(pdev->xdev, err, "Unable to create pcib%d", bus); goto out; } } out: return bus_generic_attach(dev); } static devclass_t xpcife_devclass; static device_method_t xpcife_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xpcife_probe), DEVMETHOD(device_attach, xpcife_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t xpcife_driver = { "xpcife", xpcife_methods, 0, }; DRIVER_MODULE(xpcife, nexus, xpcife_driver, xpcife_devclass, 0, 0); /* Newbus xen pcib device driver probe */ static int xpcib_probe(device_t dev) { struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev); struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(device_get_parent(dev)); DPRINTF("xpcib probe (bus=%d)\n", device_get_unit(dev)); sc->domain = pdev->xdev->otherend_id; sc->bus = device_get_unit(dev); sc->pdev = pdev; return 0; } /* Newbus xen pcib device driver attach */ static int xpcib_attach(device_t dev) { struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev); DPRINTF("xpcib attach (bus=%d)\n", sc->bus); - device_add_child(dev, "pci", sc->bus); + device_add_child(dev, "pci", -1); return bus_generic_attach(dev); } static int xpcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->bus; return 0; } return ENOENT; } /* Return the number of slots supported */ static int xpcib_maxslots(device_t dev) { return 31; } #define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) /* Read configuration space register */ static u_int32_t xpcib_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev); struct xen_pci_op op = { .cmd = XEN_PCI_OP_conf_read, .domain = sc->domain, .bus = sc->bus, .devfn = PCI_DEVFN(slot, func), .offset = reg, .size = bytes, }; int err; err = do_pci_op(sc->pdev, &op); DPRINTF("read config (b=%d, s=%d, f=%d, reg=%d, len=%d, val=%x, err=%d)\n", bus, slot, func, reg, bytes, op.value, err); if (err) op.value = ~0; return op.value; } /* Write configuration space register */ static void xpcib_write_config(device_t dev, int bus, int slot, int func, int reg, u_int32_t data, int bytes) { struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev); struct xen_pci_op op = { .cmd = XEN_PCI_OP_conf_write, .domain = sc->domain, .bus = sc->bus, .devfn = PCI_DEVFN(slot, func), .offset = reg, .size = bytes, .value = data, }; int err; err = do_pci_op(sc->pdev, &op); DPRINTF("write config (b=%d, s=%d, f=%d, reg=%d, len=%d, val=%x, err=%d)\n", bus, slot, func, reg, bytes, data, err); } static int xpcib_route_interrupt(device_t pcib, device_t dev, int pin) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; DPRINTF("route intr (pin=%d, line=%d)\n", pin, cfg->intline); return cfg->intline; } static device_method_t xpcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xpcib_probe), DEVMETHOD(device_attach, xpcib_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, xpcib_read_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, xpcib_maxslots), DEVMETHOD(pcib_read_config, xpcib_read_config), DEVMETHOD(pcib_write_config, xpcib_write_config), DEVMETHOD(pcib_route_interrupt, xpcib_route_interrupt), DEVMETHOD_END }; static devclass_t xpcib_devclass; DEFINE_CLASS_0(pcib, xpcib_driver, xpcib_methods, sizeof(struct xpcib_softc)); DRIVER_MODULE(pcib, xpcife, xpcib_driver, xpcib_devclass, 0, 0); /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: t * End: */ Index: head/sys/mips/adm5120/admpci.c =================================================================== --- head/sys/mips/adm5120/admpci.c (revision 287881) +++ head/sys/mips/adm5120/admpci.c (revision 287882) @@ -1,502 +1,502 @@ /* $NetBSD: admpci.c,v 1.1 2007/03/20 08:52:02 dyoung Exp $ */ /*- * Copyright (c) 2007 David Young. All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /*- * Copyright (c) 2006 Itronix Inc. * All rights reserved. * * Written by Garrett D'Amore for Itronix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of Itronix Inc. may not be used to endorse * or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #ifdef ADMPCI_DEBUG int admpci_debug = 1; #define ADMPCI_DPRINTF(__fmt, ...) \ do { \ if (admpci_debug) \ printf((__fmt), __VA_ARGS__); \ } while (/*CONSTCOND*/0) #else /* !ADMPCI_DEBUG */ #define ADMPCI_DPRINTF(__fmt, ...) do { } while (/*CONSTCOND*/0) #endif /* ADMPCI_DEBUG */ #define ADMPCI_TAG_BUS_MASK __BITS(23, 16) /* Bit 11 is reserved. It selects the AHB-PCI bridge. Let device 0 * be the bridge. For all other device numbers, let bit[11] == 0. */ #define ADMPCI_TAG_DEVICE_MASK __BITS(15, 11) #define ADMPCI_TAG_DEVICE_SUBMASK __BITS(15, 12) #define ADMPCI_TAG_DEVICE_BRIDGE __BIT(11) #define ADMPCI_TAG_FUNCTION_MASK __BITS(10, 8) #define ADMPCI_TAG_REGISTER_MASK __BITS(7, 0) #define ADMPCI_MAX_DEVICE struct admpci_softc { device_t sc_dev; bus_space_tag_t sc_st; /* Access to PCI config registers */ bus_space_handle_t sc_addrh; bus_space_handle_t sc_datah; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; uint32_t sc_mem; uint32_t sc_io; }; static int admpci_probe(device_t dev) { return (0); } static int admpci_attach(device_t dev) { int busno = 0; struct admpci_softc *sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_busno = busno; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MIPS_PHYS_TO_KSEG1(ADM5120_BASE_PCI_IO); sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "ADMPCI I/O Ports"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0, 0xffff) != 0) { panic("admpci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MIPS_PHYS_TO_KSEG1(ADM5120_BASE_PCI_MEM); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ADMPCI PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + 0x100000) != 0) { panic("admpci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ADMPCI PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("admpci_attach: failed to set up IRQ rman"); if (bus_space_map(sc->sc_st, ADM5120_BASE_PCI_CONFADDR, 4, 0, &sc->sc_addrh) != 0) { device_printf(sc->sc_dev, "unable to address space\n"); panic("bus_space_map failed"); } if (bus_space_map(sc->sc_st, ADM5120_BASE_PCI_CONFDATA, 4, 0, &sc->sc_datah) != 0) { device_printf(sc->sc_dev, "unable to address space\n"); panic("bus_space_map failed"); } - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int admpci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t admpci_make_addr(int bus, int slot, int func, int reg) { return (0x80000000 | (bus << 16) | (slot << 11) | (func << 8) | reg); } static uint32_t admpci_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { struct admpci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t shift, mask; bus_addr_t addr; ADMPCI_DPRINTF("%s: sc %p tag (%x, %x, %x) reg %d\n", __func__, (void *)sc, bus, slot, func, reg); addr = admpci_make_addr(bus, slot, func, reg); ADMPCI_DPRINTF("%s: sc_addrh %p sc_datah %p addr %p\n", __func__, (void *)sc->sc_addrh, (void *)sc->sc_datah, (void *)addr); bus_space_write_4(sc->sc_io, sc->sc_addrh, 0, addr); data = bus_space_read_4(sc->sc_io, sc->sc_datah, 0); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if (reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } ADMPCI_DPRINTF("%s: read 0x%x\n", __func__, data); return (data); } static void admpci_write_config(device_t dev, int bus, int slot, int func, int reg, uint32_t data, int bytes) { struct admpci_softc *sc = device_get_softc(dev); bus_addr_t addr; uint32_t reg_data; uint32_t shift, mask; ADMPCI_DPRINTF("%s: sc %p tag (%x, %x, %x) reg %d\n", __func__, (void *)sc, bus, slot, func, reg); if (bytes != 4) { reg_data = admpci_read_config(dev, bus, slot, func, reg, 4); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if (reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } } addr = admpci_make_addr(bus, slot, func, reg); ADMPCI_DPRINTF("%s: sc_addrh %p sc_datah %p addr %p\n", __func__, (void *)sc->sc_addrh, (void *)sc->sc_datah, (void *)addr); bus_space_write_4(sc->sc_io, sc->sc_addrh, 0, addr); bus_space_write_4(sc->sc_io, sc->sc_datah, 0, data); } static int admpci_route_interrupt(device_t pcib, device_t dev, int pin) { /* TODO: implement */ return (0); } static int admpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct admpci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int admpci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct admpci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * admpci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { return (NULL); #if 0 struct admpci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, sc->sc_st); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); #endif } static int admpci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int admpci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { #if 0 struct admpci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, (void (*)(void *))NULL, "admpci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); admpci_set_icus(sc); #endif return (0); } static int admpci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (intr_event_remove_handler(cookie)); } static device_method_t admpci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, admpci_probe), DEVMETHOD(device_attach, admpci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, admpci_read_ivar), DEVMETHOD(bus_write_ivar, admpci_write_ivar), DEVMETHOD(bus_alloc_resource, admpci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, admpci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, admpci_setup_intr), DEVMETHOD(bus_teardown_intr, admpci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, admpci_maxslots), DEVMETHOD(pcib_read_config, admpci_read_config), DEVMETHOD(pcib_write_config, admpci_write_config), DEVMETHOD(pcib_route_interrupt, admpci_route_interrupt), DEVMETHOD_END }; static driver_t admpci_driver = { "pcib", admpci_methods, sizeof(struct admpci_softc), }; static devclass_t admpci_devclass; DRIVER_MODULE(admpci, obio, admpci_driver, admpci_devclass, 0, 0); Index: head/sys/mips/atheros/ar71xx_pci.c =================================================================== --- head/sys/mips/atheros/ar71xx_pci.c (revision 287881) +++ head/sys/mips/atheros/ar71xx_pci.c (revision 287882) @@ -1,706 +1,706 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR71XX_PCI_DEBUG #ifdef AR71XX_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct mtx ar71xx_pci_mtx; MTX_SYSINIT(ar71xx_pci_mtx, &ar71xx_pci_mtx, "ar71xx PCI space mutex", MTX_SPIN); struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; int sc_baseslot; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar71xx_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar71xx_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar71xx_pci_intr(void *); static void ar71xx_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg & ~(1 << irq)); } static void ar71xx_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg | (1 << irq)); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); } /* * get bitmask for bytes of interest: * 0 - we want this byte, 1 - ignore it. e.g: we read 1 byte * from register 7. Bitmask would be: 0111 */ static uint32_t ar71xx_get_bytes_to_read(int reg, int bytes) { uint32_t bytes_to_read = 0; if ((bytes % 4) == 0) bytes_to_read = 0; else if ((bytes % 4) == 1) bytes_to_read = (~(1 << (reg % 4))) & 0xf; else if ((bytes % 4) == 2) bytes_to_read = (~(3 << (reg % 4))) & 0xf; else panic("%s: wrong combination", __func__); return (bytes_to_read); } static int ar71xx_pci_check_bus_error(void) { uint32_t error, addr, has_errors = 0; mtx_assert(&ar71xx_pci_mtx, MA_OWNED); error = ATH_READ_REG(AR71XX_PCI_ERROR) & 0x3; dprintf("%s: PCI error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("PCI bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_ERROR, error); has_errors = 1; } error = ATH_READ_REG(AR71XX_PCI_AHB_ERROR) & 0x1; dprintf("%s: AHB error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_AHB_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("AHB bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_AHB_ERROR, error); has_errors = 1; } return (has_errors); } static uint32_t ar71xx_pci_make_addr(int bus, int slot, int func, int reg) { if (bus == 0) { return ((1 << slot) | (func << 8) | (reg & ~3)); } else { return ((bus << 16) | (slot << 11) | (func << 8) | (reg & ~3) | 1); } } static int ar71xx_pci_conf_setup(int bus, int slot, int func, int reg, int bytes, uint32_t cmd) { uint32_t addr = ar71xx_pci_make_addr(bus, slot, func, (reg & ~3)); mtx_assert(&ar71xx_pci_mtx, MA_OWNED); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 4); ATH_WRITE_REG(AR71XX_PCI_CONF_ADDR, addr); ATH_WRITE_REG(AR71XX_PCI_CONF_CMD, cmd); dprintf("%s: tag (%x, %x, %x) %d/%d addr=%08x, cmd=%08x\n", __func__, bus, slot, func, reg, bytes, addr, cmd); return ar71xx_pci_check_bus_error(); } static uint32_t ar71xx_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data; uint32_t shift, mask; /* register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_READ) == 0) data = ATH_READ_REG(AR71XX_PCI_CONF_READ_DATA); else data = -1; mtx_unlock_spin(&ar71xx_pci_mtx); /* get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar71xx_pci_local_write(device_t dev, uint32_t reg, uint32_t data, int bytes) { uint32_t cmd; dprintf("%s: local write reg %d(%d)\n", __func__, reg, bytes); data = data << (8*(reg % 4)); cmd = PCI_LCONF_CMD_WRITE | (reg & ~3); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 20); mtx_lock_spin(&ar71xx_pci_mtx); ATH_WRITE_REG(AR71XX_PCI_LCONF_CMD, cmd); ATH_WRITE_REG(AR71XX_PCI_LCONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } static void ar71xx_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); data = data << (8*(reg % 4)); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_WRITE) == 0) ATH_WRITE_REG(AR71XX_PCI_CONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } #ifdef AR71XX_ATH_EEPROM /* * Some embedded boards (eg AP94) have the MAC attached via PCI but they * don't have the MAC-attached EEPROM. The register initialisation * values and calibration data are stored in the on-board flash. * This routine initialises the NIC via the EEPROM register contents * before the probe/attach routines get a go at things. */ static void ar71xx_pci_fixup(device_t dev, u_int bus, u_int slot, u_int func, long flash_addr, int len) { uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); uint32_t reg, val, bar0; if (bootverbose) device_printf(dev, "%s: flash_addr=%lx, cal_data=%p\n", __func__, flash_addr, cal_data); /* XXX check 0xa55a */ /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_BAR(0), 4); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" reg: %x, val=%x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), bar0, 4); } static void ar71xx_pci_slot_fixup(device_t dev, u_int bus, u_int slot, u_int func) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), bus, slot, func); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", bus, slot, func); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", bus, slot, func); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, bus, slot, func); ar71xx_pci_fixup(dev, bus, slot, func, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, bus, slot, func, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar71xx_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar71xx_pci_attach(device_t dev) { int busno = 0; int rid = 0; struct ar71xx_pci_softc *sc = device_get_softc(dev); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar71xx PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar71xx_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar71xx PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar71xx_pci_attach: failed to set up IRQ rman"); /* * Check if there is a base slot hint. Otherwise use default value. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "baseslot", &sc->sc_baseslot) != 0) { device_printf(dev, "%s: missing hint '%s', default to AR71XX_PCI_BASE_SLOT\n", __func__, "baseslot"); sc->sc_baseslot = AR71XX_PCI_BASE_SLOT; } ATH_WRITE_REG(AR71XX_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar71xx_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* reset PCI core and PCI bus */ ar71xx_device_stop(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); ar71xx_device_start(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); /* Init PCI windows */ ATH_WRITE_REG(AR71XX_PCI_WINDOW0, PCI_WINDOW0_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW1, PCI_WINDOW1_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW2, PCI_WINDOW2_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW3, PCI_WINDOW3_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW4, PCI_WINDOW4_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW5, PCI_WINDOW5_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW6, PCI_WINDOW6_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW7, PCI_WINDOW7_CONF_ADDR); DELAY(100000); mtx_lock_spin(&ar71xx_pci_mtx); ar71xx_pci_check_bus_error(); mtx_unlock_spin(&ar71xx_pci_mtx); /* Fixup internal PCI bridge */ ar71xx_pci_local_write(dev, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 4); #ifdef AR71XX_ATH_EEPROM /* * Hard-code a check for slot 17 and 18 - these are * the two PCI slots which may have a PCI device that * requires "fixing". */ ar71xx_pci_slot_fixup(dev, 0, 17, 0); ar71xx_pci_slot_fixup(dev, 0, 18, 0); #endif /* AR71XX_ATH_EEPROM */ - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar71xx_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar71xx_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar71xx_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar71xx_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar71xx_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar71xx_pci_mask_irq, ar71xx_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar71xx_pci_unmask_irq((void*)irq); return (0); } static int ar71xx_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar71xx_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar71xx_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR71XX_PCI_INTR_STATUS); mask = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; for (irq = AR71XX_PCI_IRQ_START; irq <= AR71XX_PCI_IRQ_END; irq++) { if (reg & (1 << irq)) { event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { /* Ignore timer interrupts */ if (irq != 0) printf("Stray IRQ %d\n", irq); continue; } /* Flush DDR FIFO for PCI/PCIe */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } } return (FILTER_HANDLED); } static int ar71xx_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar71xx_pci_route_interrupt(device_t pcib, device_t device, int pin) { struct ar71xx_pci_softc *sc = device_get_softc(pcib); if (pci_get_slot(device) < sc->sc_baseslot) panic("%s: PCI slot %d is less then AR71XX_PCI_BASE_SLOT", __func__, pci_get_slot(device)); return (pci_get_slot(device) - sc->sc_baseslot); } static device_method_t ar71xx_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar71xx_pci_probe), DEVMETHOD(device_attach, ar71xx_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar71xx_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar71xx_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar71xx_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar71xx_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar71xx_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar71xx_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar71xx_pci_maxslots), DEVMETHOD(pcib_read_config, ar71xx_pci_read_config), DEVMETHOD(pcib_write_config, ar71xx_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar71xx_pci_route_interrupt), DEVMETHOD_END }; static driver_t ar71xx_pci_driver = { "pcib", ar71xx_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar71xx_pci_devclass; DRIVER_MODULE(ar71xx_pci, nexus, ar71xx_pci_driver, ar71xx_pci_devclass, 0, 0); Index: head/sys/mips/atheros/ar724x_pci.c =================================================================== --- head/sys/mips/atheros/ar724x_pci.c (revision 287881) +++ head/sys/mips/atheros/ar724x_pci.c (revision 287882) @@ -1,666 +1,666 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #include #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar724x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar724x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar724x_pci_intr(void *); static void ar724x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t ar724x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(AR724X_PCI_CFG_BASE + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar724x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; /* * WAR for BAR issue on AR7240 - We are unable to access the PCI * device space if we set the BAR with proper base address. * * However, we _do_ want to allow programming in the probe value * (0xffffffff) so the PCI code can find out how big the memory * map is for this device. Without it, it'll think the memory * map is 32 bits wide, the PCI code will then end up thinking * the register window is '0' and fail to allocate resources. */ if (reg == PCIR_BAR(0) && bytes == 4 && ar71xx_soc == AR71XX_SOC_AR7240 && data != 0xffffffff) ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, 0xffff, bytes); else ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, data, bytes); } static void ar724x_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX - Only one interrupt ? Only one device ? */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg & ~AR724X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, reg | AR724X_PCI_INTR_DEV0); } static void ar724x_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg | AR724X_PCI_INTR_DEV0); } static int ar724x_pci_setup(device_t dev) { uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; ar724x_pci_write(AR724X_PCI_CRP_BASE, PCIR_COMMAND, reg, 2); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x20, 0x1ff01000, 4); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x24, 0x1ff01000, 4); reg = ATH_READ_REG(AR724X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(AR724X_PCI_RESET, 0); DELAY(100); ATH_WRITE_REG(AR724X_PCI_RESET, 4); DELAY(100000); } if (ar71xx_soc == AR71XX_SOC_AR7240) reg = AR724X_PCI_APP_LTSSM_ENABLE; else reg = 0x1ffc1; ATH_WRITE_REG(AR724X_PCI_APP, reg); /* Flush write */ (void) ATH_READ_REG(AR724X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(AR724X_PCI_RESET); if ((reg & AR724X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } if (ar71xx_soc == AR71XX_SOC_AR7241 || ar71xx_soc == AR71XX_SOC_AR7242) { reg = ATH_READ_REG(AR724X_PCI_APP); reg |= (1 << 16); ATH_WRITE_REG(AR724X_PCI_APP, reg); } return (0); } #ifdef AR71XX_ATH_EEPROM #define AR5416_EEPROM_MAGIC 0xa55a /* * XXX - This should not be here ! And this looks like Atheros (if_ath) only. */ static void ar724x_pci_fixup(device_t dev, long flash_addr, int len) { uint32_t bar0, reg, val; uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); #if 0 if (cal_data[0] != AR5416_EEPROM_MAGIC) { device_printf(dev, "%s: Invalid calibration data from 0x%x\n", __func__, (uintptr_t) flash_addr); return; } #endif /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_BAR(0), 4); /* Write temporary BAR0 to map the NIC into a fixed location */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* set pointer to first reg address */ cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" 0x%08x=0x%04x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), bar0, 4); } #undef AR5416_EEPROM_MAGIC /* * XXX This is (mostly) duplicated with ar71xx_pci.c. * It should at some point be fixed. */ static void ar724x_pci_slot_fixup(device_t dev) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), 0, 0, 0); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", 0, 0, 0); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", 0, 0, 0); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, 0, 0, 0); ar724x_pci_fixup(dev, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, 0, 0, 0, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar724x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar724x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int busno = 0; int rid = 0; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar724x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar724x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar724x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar724x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar724x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(AR724X_RESET_PCIE); ar71xx_device_stop(AR724X_RESET_PCIE_PHY); ar71xx_device_stop(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY); ar71xx_device_start(AR724X_RESET_PCIE); if (ar724x_pci_setup(dev)) return (ENXIO); #ifdef AR71XX_ATH_EEPROM ar724x_pci_slot_fixup(dev); #endif /* AR71XX_ATH_EEPROM */ /* Fixup internal PCI bridge */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar724x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar724x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar724x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar724x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar724x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar724x_pci_mask_irq, ar724x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar724x_pci_unmask_irq((void*)irq); return (0); } static int ar724x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar724x_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar724x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); mask = ATH_READ_REG(AR724X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; if (reg & AR724X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* Flush pending memory transactions */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int ar724x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar724x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t ar724x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar724x_pci_probe), DEVMETHOD(device_attach, ar724x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar724x_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar724x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar724x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar724x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar724x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar724x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar724x_pci_maxslots), DEVMETHOD(pcib_read_config, ar724x_pci_read_config), DEVMETHOD(pcib_write_config, ar724x_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar724x_pci_route_interrupt), DEVMETHOD_END }; static driver_t ar724x_pci_driver = { "pcib", ar724x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar724x_pci_devclass; DRIVER_MODULE(ar724x_pci, nexus, ar724x_pci_driver, ar724x_pci_devclass, 0, 0); Index: head/sys/mips/atheros/qca955x_pci.c =================================================================== --- head/sys/mips/atheros/qca955x_pci.c (revision 287881) +++ head/sys/mips/atheros/qca955x_pci.c (revision 287882) @@ -1,607 +1,607 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * Copyright (c) 2015, Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include /* XXX aim to eliminate this! */ #include #include #include #include #undef AR724X_PCI_DEBUG //#define AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* * This is a PCI controller for the QCA955x and later SoCs. * It needs to be aware of >1 PCIe host endpoints. * * XXX TODO; it may be nice to merge this with ar724x_pci.c; * they're very similar. */ struct ar71xx_pci_irq { struct ar71xx_pci_softc *sc; int irq; }; struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; uint32_t sc_pci_reg_base; /* XXX until bus stuff is done */ uint32_t sc_pci_crp_base; /* XXX until bus stuff is done */ uint32_t sc_pci_ctrl_base; /* XXX until bus stuff is done */ uint32_t sc_pci_mem_base; /* XXX until bus stuff is done */ uint32_t sc_pci_membase_limit; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct ar71xx_pci_irq sc_pci_irq[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int qca955x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int qca955x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int qca955x_pci_intr(void *); static void qca955x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t qca955x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(sc->sc_pci_reg_base + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void qca955x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; qca955x_pci_write(sc->sc_pci_reg_base, reg, data, bytes); } static void qca955x_pci_mask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; /* XXX - Only one interrupt ? Only one device ? */ if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg & ~QCA955X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, reg | QCA955X_PCI_INTR_DEV0); } static void qca955x_pci_unmask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg | QCA955X_PCI_INTR_DEV0); } static int qca955x_pci_setup(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; qca955x_pci_write(sc->sc_pci_crp_base, PCIR_COMMAND, reg, 2); /* These are the memory/prefetch base/limit parameters */ qca955x_pci_write(sc->sc_pci_crp_base, 0x20, sc->sc_pci_membase_limit, 4); qca955x_pci_write(sc->sc_pci_crp_base, 0x24, sc->sc_pci_membase_limit, 4); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 0); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 4); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100000); } ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP, 0x1ffc1); /* Flush write */ (void) ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if ((reg & QCA955X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } return (0); } static int qca955x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int qca955x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int unit = device_get_unit(dev); int busno = 0; int rid = 0; /* Dirty; maybe these could all just be hints */ if (unit == 0) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE0; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE0; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE0; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE0; /* XXX verify */ sc->sc_pci_membase_limit = 0x11f01000; } else if (unit == 1) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE1; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE1; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE1; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE1; /* XXX verify */ sc->sc_pci_membase_limit = 0x12f01200; } else { device_printf(dev, "%s: invalid unit (%d)\n", __func__, unit); return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "qca955x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_pci_mem_base, sc->sc_pci_mem_base + QCA955X_PCI_MEM_SIZE - 1) != 0) { panic("qca955x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "qca955x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("qca955x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, qca955x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(QCA955X_RESET_PCIE); ar71xx_device_stop(QCA955X_RESET_PCIE_PHY); DELAY(100); ar71xx_device_start(QCA955X_RESET_PCIE_PHY); ar71xx_device_start(QCA955X_RESET_PCIE); if (qca955x_pci_setup(dev)) return (ENXIO); /* * Write initial base address. * * I'm not yet sure why this is required and/or why it isn't * initialised like this. The AR71xx PCI code initialises * the PCI windows for each device, but neither it or the * 724x PCI bridge modules explicitly initialise the BAR. * * So before this gets committed, have a chat with jhb@ or * someone else who knows PCI well and figure out whether * the initial BAR is supposed to be determined by /other/ * means. */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), sc->sc_pci_mem_base, 4); /* Fixup internal PCI bridge */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int qca955x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int qca955x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * qca955x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int qca955x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int qca955x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { sc->sc_pci_irq[irq].sc = sc; sc->sc_pci_irq[irq].irq = irq; error = intr_event_create(&event, (void *)&sc->sc_pci_irq[irq], 0, irq, qca955x_pci_mask_irq, qca955x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); qca955x_pci_unmask_irq(&sc->sc_pci_irq[irq]); return (0); } static int qca955x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); qca955x_pci_mask_irq(&sc->sc_pci_irq[irq]); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int qca955x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; /* There's only one PCIe DDR flush for both PCIe EPs */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); mask = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; /* * XXX TODO: handle >1 PCIe end point! */ if (reg & QCA955X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int qca955x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int qca955x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t qca955x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qca955x_pci_probe), DEVMETHOD(device_attach, qca955x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, qca955x_pci_read_ivar), DEVMETHOD(bus_write_ivar, qca955x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, qca955x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, qca955x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, qca955x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, qca955x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, qca955x_pci_maxslots), DEVMETHOD(pcib_read_config, qca955x_pci_read_config), DEVMETHOD(pcib_write_config, qca955x_pci_write_config), DEVMETHOD(pcib_route_interrupt, qca955x_pci_route_interrupt), DEVMETHOD_END }; static driver_t qca955x_pci_driver = { "pcib", qca955x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t qca955x_pci_devclass; DRIVER_MODULE(qca955x_pci, nexus, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); DRIVER_MODULE(qca955x_pci, apb, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); Index: head/sys/mips/cavium/octopci.c =================================================================== --- head/sys/mips/cavium/octopci.c (revision 287881) +++ head/sys/mips/cavium/octopci.c (revision 287882) @@ -1,991 +1,991 @@ /*- * Copyright (c) 2010-2011 Juli Mallett * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define NPI_WRITE(addr, value) cvmx_write64_uint32((addr) ^ 4, (value)) #define NPI_READ(addr) cvmx_read64_uint32((addr) ^ 4) struct octopci_softc { device_t sc_dev; unsigned sc_domain; unsigned sc_bus; bus_addr_t sc_io_base; unsigned sc_io_next; struct rman sc_io; bus_addr_t sc_mem1_base; unsigned sc_mem1_next; struct rman sc_mem1; }; static void octopci_identify(driver_t *, device_t); static int octopci_probe(device_t); static int octopci_attach(device_t); static int octopci_read_ivar(device_t, device_t, int, uintptr_t *); static struct resource *octopci_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); static int octopci_activate_resource(device_t, device_t, int, int, struct resource *); static int octopci_maxslots(device_t); static uint32_t octopci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void octopci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int octopci_route_interrupt(device_t, device_t, int); static unsigned octopci_init_bar(device_t, unsigned, unsigned, unsigned, unsigned, uint8_t *); static unsigned octopci_init_device(device_t, unsigned, unsigned, unsigned, unsigned); static unsigned octopci_init_bus(device_t, unsigned); static void octopci_init_pci(device_t); static uint64_t octopci_cs_addr(unsigned, unsigned, unsigned, unsigned); static void octopci_identify(driver_t *drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "pcib", 0); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) BUS_ADD_CHILD(parent, 0, "pcib", 1); } static int octopci_probe(device_t dev) { if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { device_set_desc(dev, "Cavium Octeon PCIe bridge"); return (0); } /* Check whether we are a PCI host. */ if ((cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST) == 0) return (ENXIO); if (device_get_unit(dev) != 0) return (ENXIO); device_set_desc(dev, "Cavium Octeon PCI bridge"); return (0); } static int octopci_attach(device_t dev) { struct octopci_softc *sc; unsigned subbus; int error; sc = device_get_softc(dev); sc->sc_dev = dev; if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { sc->sc_domain = device_get_unit(dev); error = cvmx_pcie_rc_initialize(sc->sc_domain); if (error != 0) { device_printf(dev, "Failed to put PCIe bus in host mode.\n"); return (ENXIO); } /* * In RC mode, the Simple Executive programs the first bus to * be numbered as bus 1, because some IDT bridges used in * Octeon systems object to being attached to bus 0. */ sc->sc_bus = 1; sc->sc_io_base = CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(sc->sc_domain)); sc->sc_io.rm_descr = "Cavium Octeon PCIe I/O Ports"; sc->sc_mem1_base = CVMX_ADD_IO_SEG(cvmx_pcie_get_mem_base_address(sc->sc_domain)); sc->sc_mem1.rm_descr = "Cavium Octeon PCIe Memory"; } else { octopci_init_pci(dev); sc->sc_domain = 0; sc->sc_bus = 0; sc->sc_io_base = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_IO)); sc->sc_io.rm_descr = "Cavium Octeon PCI I/O Ports"; sc->sc_mem1_base = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_MEM1)); sc->sc_mem1.rm_descr = "Cavium Octeon PCI Memory"; } sc->sc_io.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_io); if (error != 0) return (error); error = rman_manage_region(&sc->sc_io, CVMX_OCT_PCI_IO_BASE, CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE); if (error != 0) return (error); sc->sc_mem1.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_mem1); if (error != 0) return (error); error = rman_manage_region(&sc->sc_mem1, CVMX_OCT_PCI_MEM1_BASE, CVMX_OCT_PCI_MEM1_BASE + CVMX_OCT_PCI_MEM1_SIZE); if (error != 0) return (error); /* * Next offsets for resource allocation in octopci_init_bar. */ sc->sc_io_next = 0; sc->sc_mem1_next = 0; /* * Configure devices. */ octopci_write_config(dev, sc->sc_bus, 0, 0, PCIR_SUBBUS_1, 0xff, 1); subbus = octopci_init_bus(dev, sc->sc_bus); octopci_write_config(dev, sc->sc_bus, 0, 0, PCIR_SUBBUS_1, subbus, 1); - device_add_child(dev, "pci", device_get_unit(dev)); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int octopci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct octopci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->sc_domain; return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); } return (ENOENT); } static struct resource * octopci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct octopci_softc *sc; struct resource *res; struct rman *rm; int error; sc = device_get_softc(bus); switch (type) { case SYS_RES_IRQ: res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); if (res != NULL) return (res); return (NULL); case SYS_RES_MEMORY: rm = &sc->sc_mem1; break; case SYS_RES_IOPORT: rm = &sc->sc_io; break; default: return (NULL); } res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_bustag(res, octopci_bus_space); switch (type) { case SYS_RES_MEMORY: rman_set_bushandle(res, sc->sc_mem1_base + rman_get_start(res)); break; case SYS_RES_IOPORT: rman_set_bushandle(res, sc->sc_io_base + rman_get_start(res)); #if __mips_n64 rman_set_virtual(res, (void *)rman_get_bushandle(res)); #else /* * XXX * We can't access ports via a 32-bit pointer. */ rman_set_virtual(res, NULL); #endif break; } if ((flags & RF_ACTIVE) != 0) { error = bus_activate_resource(child, type, *rid, res); if (error != 0) { rman_release_resource(res); return (NULL); } } return (res); } static int octopci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { bus_space_handle_t bh; int error; switch (type) { case SYS_RES_IRQ: error = bus_generic_activate_resource(bus, child, type, rid, res); if (error != 0) return (error); return (0); case SYS_RES_MEMORY: case SYS_RES_IOPORT: error = bus_space_map(rman_get_bustag(res), rman_get_bushandle(res), rman_get_size(res), 0, &bh); if (error != 0) return (error); rman_set_bushandle(res, bh); break; default: return (ENXIO); } error = rman_activate_resource(res); if (error != 0) return (error); return (0); } static int octopci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t octopci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct octopci_softc *sc; uint64_t addr; uint32_t data; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { if (bus == 0 && slot == 0 && func == 0) return ((uint32_t)-1); switch (bytes) { case 4: return (cvmx_pcie_config_read32(sc->sc_domain, bus, slot, func, reg)); case 2: return (cvmx_pcie_config_read16(sc->sc_domain, bus, slot, func, reg)); case 1: return (cvmx_pcie_config_read8(sc->sc_domain, bus, slot, func, reg)); default: return ((uint32_t)-1); } } addr = octopci_cs_addr(bus, slot, func, reg); switch (bytes) { case 4: data = le32toh(cvmx_read64_uint32(addr)); return (data); case 2: data = le16toh(cvmx_read64_uint16(addr)); return (data); case 1: data = cvmx_read64_uint8(addr); return (data); default: return ((uint32_t)-1); } } static void octopci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct octopci_softc *sc; uint64_t addr; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { switch (bytes) { case 4: cvmx_pcie_config_write32(sc->sc_domain, bus, slot, func, reg, data); return; case 2: cvmx_pcie_config_write16(sc->sc_domain, bus, slot, func, reg, data); return; case 1: cvmx_pcie_config_write8(sc->sc_domain, bus, slot, func, reg, data); return; default: return; } } addr = octopci_cs_addr(bus, slot, func, reg); switch (bytes) { case 4: cvmx_write64_uint32(addr, htole32(data)); return; case 2: cvmx_write64_uint16(addr, htole16(data)); return; case 1: cvmx_write64_uint8(addr, data); return; default: return; } } static int octopci_route_interrupt(device_t dev, device_t child, int pin) { struct octopci_softc *sc; unsigned bus, slot, func; unsigned irq; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return (OCTEON_IRQ_PCI_INT0 + pin - 1); bus = pci_get_bus(child); slot = pci_get_slot(child); func = pci_get_function(child); /* * Board types we have to know at compile-time. */ #if defined(OCTEON_BOARD_CAPK_0100ND) if (bus == 0 && slot == 12 && func == 0) return (OCTEON_IRQ_PCI_INT2); #endif /* * For board types we can determine at runtime. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: return (OCTEON_IRQ_PCI_INT0 + pin - 1); case CVMX_BOARD_TYPE_CUST_LANNER_MR320: if (slot < 32) { if (slot == 3 || slot == 9) irq = pin; else irq = pin - 1; return (OCTEON_IRQ_PCI_INT0 + (irq & 3)); } break; #endif default: break; } irq = slot + pin - 3; return (OCTEON_IRQ_PCI_INT0 + (irq & 3)); } static unsigned octopci_init_bar(device_t dev, unsigned b, unsigned s, unsigned f, unsigned barnum, uint8_t *commandp) { struct octopci_softc *sc; uint64_t bar; unsigned size; int barsize; sc = device_get_softc(dev); octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), 0xffffffff, 4); bar = octopci_read_config(dev, b, s, f, PCIR_BAR(barnum), 4); if (bar == 0) { /* Bar not implemented; got to next bar. */ return (barnum + 1); } if (PCI_BAR_IO(bar)) { size = ~(bar & PCIM_BAR_IO_BASE) + 1; sc->sc_io_next = (sc->sc_io_next + size - 1) & ~(size - 1); if (sc->sc_io_next + size > CVMX_OCT_PCI_IO_SIZE) { device_printf(dev, "%02x.%02x:%02x: no ports for BAR%u.\n", b, s, f, barnum); return (barnum + 1); } octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), CVMX_OCT_PCI_IO_BASE + sc->sc_io_next, 4); sc->sc_io_next += size; /* * Enable I/O ports. */ *commandp |= PCIM_CMD_PORTEN; return (barnum + 1); } else { if (PCIR_BAR(barnum) == PCIR_BIOS) { /* * ROM BAR is always 32-bit. */ barsize = 1; } else { switch (bar & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_64: /* * XXX * High 32 bits are all zeroes for now. */ octopci_write_config(dev, b, s, f, PCIR_BAR(barnum + 1), 0, 4); barsize = 2; break; default: barsize = 1; break; } } size = ~(bar & (uint32_t)PCIM_BAR_MEM_BASE) + 1; sc->sc_mem1_next = (sc->sc_mem1_next + size - 1) & ~(size - 1); if (sc->sc_mem1_next + size > CVMX_OCT_PCI_MEM1_SIZE) { device_printf(dev, "%02x.%02x:%02x: no memory for BAR%u.\n", b, s, f, barnum); return (barnum + barsize); } octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), CVMX_OCT_PCI_MEM1_BASE + sc->sc_mem1_next, 4); sc->sc_mem1_next += size; /* * Enable memory access. */ *commandp |= PCIM_CMD_MEMEN; return (barnum + barsize); } } static unsigned octopci_init_device(device_t dev, unsigned b, unsigned s, unsigned f, unsigned secbus) { unsigned barnum, bars; uint8_t brctl; uint8_t class, subclass; uint8_t command; uint8_t hdrtype; /* Read header type (again.) */ hdrtype = octopci_read_config(dev, b, s, f, PCIR_HDRTYPE, 1); /* * Disable memory and I/O while programming BARs. */ command = octopci_read_config(dev, b, s, f, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); DELAY(10000); /* Program BARs. */ switch (hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: bars = 6; break; case PCIM_HDRTYPE_BRIDGE: bars = 2; break; case PCIM_HDRTYPE_CARDBUS: bars = 0; break; default: device_printf(dev, "%02x.%02x:%02x: invalid header type %#x\n", b, s, f, hdrtype); return (secbus); } barnum = 0; while (barnum < bars) barnum = octopci_init_bar(dev, b, s, f, barnum, &command); /* Enable bus mastering. */ command |= PCIM_CMD_BUSMASTEREN; /* Enable whatever facilities the BARs require. */ octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); DELAY(10000); /* * Set cache line size. On Octeon it should be 128 bytes, * but according to Linux some Intel bridges have trouble * with values over 64 bytes, so use 64 bytes. */ octopci_write_config(dev, b, s, f, PCIR_CACHELNSZ, 16, 1); /* Set latency timer. */ octopci_write_config(dev, b, s, f, PCIR_LATTIMER, 48, 1); /* Board-specific or device-specific fixups and workarounds. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: if (b == 1 && s == 7 && f == 0) { bus_addr_t busaddr, unitbusaddr; uint32_t bar; uint32_t tmp; unsigned unit; /* * Set Tx DMA power. */ bar = octopci_read_config(dev, b, s, f, PCIR_BAR(3), 4); busaddr = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_MEM1)); busaddr += (bar & (uint32_t)PCIM_BAR_MEM_BASE); for (unit = 0; unit < 4; unit++) { unitbusaddr = busaddr + 0x430 + (unit << 8); tmp = le32toh(cvmx_read64_uint32(unitbusaddr)); tmp &= ~0x700; tmp |= 0x300; cvmx_write64_uint32(unitbusaddr, htole32(tmp)); } } break; #endif default: break; } /* Configure PCI-PCI bridges. */ class = octopci_read_config(dev, b, s, f, PCIR_CLASS, 1); if (class != PCIC_BRIDGE) return (secbus); subclass = octopci_read_config(dev, b, s, f, PCIR_SUBCLASS, 1); if (subclass != PCIS_BRIDGE_PCI) return (secbus); /* Enable memory and I/O access. */ command |= PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); /* Enable errors and parity checking. Do a bus reset. */ brctl = octopci_read_config(dev, b, s, f, PCIR_BRIDGECTL_1, 1); brctl |= PCIB_BCR_PERR_ENABLE | PCIB_BCR_SERR_ENABLE; /* Perform a secondary bus reset. */ brctl |= PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); brctl &= ~PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); secbus++; /* Program memory and I/O ranges. */ octopci_write_config(dev, b, s, f, PCIR_MEMBASE_1, CVMX_OCT_PCI_MEM1_BASE >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_MEMLIMIT_1, (CVMX_OCT_PCI_MEM1_BASE + CVMX_OCT_PCI_MEM1_SIZE - 1) >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_IOBASEL_1, CVMX_OCT_PCI_IO_BASE >> 8, 1); octopci_write_config(dev, b, s, f, PCIR_IOBASEH_1, CVMX_OCT_PCI_IO_BASE >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_IOLIMITL_1, (CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE - 1) >> 8, 1); octopci_write_config(dev, b, s, f, PCIR_IOLIMITH_1, (CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE - 1) >> 16, 2); /* Program prefetchable memory decoder. */ /* XXX */ /* Probe secondary/subordinate buses. */ octopci_write_config(dev, b, s, f, PCIR_PRIBUS_1, b, 1); octopci_write_config(dev, b, s, f, PCIR_SECBUS_1, secbus, 1); octopci_write_config(dev, b, s, f, PCIR_SUBBUS_1, 0xff, 1); /* Perform a secondary bus reset. */ brctl |= PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); brctl &= ~PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); /* Give the bus time to settle now before reading configspace. */ DELAY(100000); secbus = octopci_init_bus(dev, secbus); octopci_write_config(dev, b, s, f, PCIR_SUBBUS_1, secbus, 1); return (secbus); } static unsigned octopci_init_bus(device_t dev, unsigned b) { unsigned s, f; uint8_t hdrtype; unsigned secbus; secbus = b; for (s = 0; s <= PCI_SLOTMAX; s++) { for (f = 0; f <= PCI_FUNCMAX; f++) { hdrtype = octopci_read_config(dev, b, s, f, PCIR_HDRTYPE, 1); if (hdrtype == 0xff) { if (f == 0) break; /* Next slot. */ continue; /* Next function. */ } secbus = octopci_init_device(dev, b, s, f, secbus); if (f == 0 && (hdrtype & PCIM_MFDEV) == 0) break; /* Next slot. */ } } return (secbus); } static uint64_t octopci_cs_addr(unsigned bus, unsigned slot, unsigned func, unsigned reg) { octeon_pci_config_space_address_t pci_addr; pci_addr.u64 = 0; pci_addr.s.upper = 2; pci_addr.s.io = 1; pci_addr.s.did = 3; pci_addr.s.subdid = CVMX_OCT_SUBDID_PCI_CFG; pci_addr.s.endian_swap = 1; pci_addr.s.bus = bus; pci_addr.s.dev = slot; pci_addr.s.func = func; pci_addr.s.reg = reg; return (pci_addr.u64); } static void octopci_init_pci(device_t dev) { cvmx_npi_mem_access_subid_t npi_mem_access_subid; cvmx_npi_pci_int_arb_cfg_t npi_pci_int_arb_cfg; cvmx_npi_ctl_status_t npi_ctl_status; cvmx_pci_ctl_status_2_t pci_ctl_status_2; cvmx_pci_cfg56_t pci_cfg56; cvmx_pci_cfg22_t pci_cfg22; cvmx_pci_cfg16_t pci_cfg16; cvmx_pci_cfg19_t pci_cfg19; cvmx_pci_cfg01_t pci_cfg01; unsigned i; /* * Reset the PCI bus. */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1); cvmx_read_csr(CVMX_CIU_SOFT_PRST); DELAY(2000); npi_ctl_status.u64 = 0; npi_ctl_status.s.max_word = 1; npi_ctl_status.s.timer = 1; cvmx_write_csr(CVMX_NPI_CTL_STATUS, npi_ctl_status.u64); /* * Set host mode. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR320: case CVMX_BOARD_TYPE_CUST_LANNER_MR955: /* 32-bit PCI-X */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x0); break; #endif default: /* 64-bit PCI-X */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); break; } cvmx_read_csr(CVMX_CIU_SOFT_PRST); DELAY(2000); /* * Enable BARs and configure big BAR mode. */ pci_ctl_status_2.u32 = 0; pci_ctl_status_2.s.bb1_hole = 5; /* 256MB hole in BAR1 */ pci_ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */ pci_ctl_status_2.s.bb_ca = 1; /* Bypass cache for big BAR */ pci_ctl_status_2.s.bb_es = 1; /* Do big BAR byte-swapping */ pci_ctl_status_2.s.bb1 = 1; /* BAR1 is big */ pci_ctl_status_2.s.bb0 = 1; /* BAR0 is big */ pci_ctl_status_2.s.bar2pres = 1; /* BAR2 present */ pci_ctl_status_2.s.pmo_amod = 1; /* Round-robin priority */ pci_ctl_status_2.s.tsr_hwm = 1; pci_ctl_status_2.s.bar2_enb = 1; /* Enable BAR2 */ pci_ctl_status_2.s.bar2_esx = 1; /* Do BAR2 byte-swapping */ pci_ctl_status_2.s.bar2_cax = 1; /* Bypass cache for BAR2 */ NPI_WRITE(CVMX_NPI_PCI_CTL_STATUS_2, pci_ctl_status_2.u32); DELAY(2000); pci_ctl_status_2.u32 = NPI_READ(CVMX_NPI_PCI_CTL_STATUS_2); device_printf(dev, "%u-bit PCI%s bus.\n", pci_ctl_status_2.s.ap_64ad ? 64 : 32, pci_ctl_status_2.s.ap_pcix ? "-X" : ""); /* * Set up transaction splitting, etc., parameters. */ pci_cfg19.u32 = 0; pci_cfg19.s.mrbcm = 1; if (pci_ctl_status_2.s.ap_pcix) { pci_cfg19.s.mdrrmc = 0; pci_cfg19.s.tdomc = 4; } else { pci_cfg19.s.mdrrmc = 2; pci_cfg19.s.tdomc = 1; } NPI_WRITE(CVMX_NPI_PCI_CFG19, pci_cfg19.u32); NPI_READ(CVMX_NPI_PCI_CFG19); /* * Set up PCI error handling and memory access. */ pci_cfg01.u32 = 0; pci_cfg01.s.fbbe = 1; pci_cfg01.s.see = 1; pci_cfg01.s.pee = 1; pci_cfg01.s.me = 1; pci_cfg01.s.msae = 1; if (pci_ctl_status_2.s.ap_pcix) { pci_cfg01.s.fbb = 0; } else { pci_cfg01.s.fbb = 1; } NPI_WRITE(CVMX_NPI_PCI_CFG01, pci_cfg01.u32); NPI_READ(CVMX_NPI_PCI_CFG01); /* * Enable the Octeon bus arbiter. */ npi_pci_int_arb_cfg.u64 = 0; npi_pci_int_arb_cfg.s.en = 1; cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, npi_pci_int_arb_cfg.u64); /* * Disable master latency timer. */ pci_cfg16.u32 = 0; pci_cfg16.s.mltd = 1; NPI_WRITE(CVMX_NPI_PCI_CFG16, pci_cfg16.u32); NPI_READ(CVMX_NPI_PCI_CFG16); /* * Configure master arbiter. */ pci_cfg22.u32 = 0; pci_cfg22.s.flush = 1; pci_cfg22.s.mrv = 255; NPI_WRITE(CVMX_NPI_PCI_CFG22, pci_cfg22.u32); NPI_READ(CVMX_NPI_PCI_CFG22); /* * Set up PCI-X capabilities. */ if (pci_ctl_status_2.s.ap_pcix) { pci_cfg56.u32 = 0; pci_cfg56.s.most = 3; pci_cfg56.s.roe = 1; /* Enable relaxed ordering */ pci_cfg56.s.dpere = 1; pci_cfg56.s.ncp = 0xe8; pci_cfg56.s.pxcid = 7; NPI_WRITE(CVMX_NPI_PCI_CFG56, pci_cfg56.u32); NPI_READ(CVMX_NPI_PCI_CFG56); } NPI_WRITE(CVMX_NPI_PCI_READ_CMD_6, 0x22); NPI_READ(CVMX_NPI_PCI_READ_CMD_6); NPI_WRITE(CVMX_NPI_PCI_READ_CMD_C, 0x33); NPI_READ(CVMX_NPI_PCI_READ_CMD_C); NPI_WRITE(CVMX_NPI_PCI_READ_CMD_E, 0x33); NPI_READ(CVMX_NPI_PCI_READ_CMD_E); /* * Configure MEM1 sub-DID access. */ npi_mem_access_subid.u64 = 0; npi_mem_access_subid.s.esr = 1; /* Byte-swap on read */ npi_mem_access_subid.s.esw = 1; /* Byte-swap on write */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: npi_mem_access_subid.s.shortl = 1; break; #endif default: break; } cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, npi_mem_access_subid.u64); /* * Configure BAR2. Linux says this has to come first. */ NPI_WRITE(CVMX_NPI_PCI_CFG08, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG08); NPI_WRITE(CVMX_NPI_PCI_CFG09, 0x00000080); NPI_READ(CVMX_NPI_PCI_CFG09); /* * Disable BAR1 IndexX. */ for (i = 0; i < 32; i++) { NPI_WRITE(CVMX_NPI_PCI_BAR1_INDEXX(i), 0); NPI_READ(CVMX_NPI_PCI_BAR1_INDEXX(i)); } /* * Configure BAR0 and BAR1. */ NPI_WRITE(CVMX_NPI_PCI_CFG04, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG04); NPI_WRITE(CVMX_NPI_PCI_CFG05, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG05); NPI_WRITE(CVMX_NPI_PCI_CFG06, 0x80000000); NPI_READ(CVMX_NPI_PCI_CFG06); NPI_WRITE(CVMX_NPI_PCI_CFG07, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG07); /* * Clear PCI interrupts. */ cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, 0xffffffffffffffffull); } static device_method_t octopci_methods[] = { /* Device interface */ DEVMETHOD(device_identify, octopci_identify), DEVMETHOD(device_probe, octopci_probe), DEVMETHOD(device_attach, octopci_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, octopci_read_ivar), DEVMETHOD(bus_alloc_resource, octopci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,octopci_activate_resource), DEVMETHOD(bus_deactivate_resource,bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_add_child, bus_generic_add_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, octopci_maxslots), DEVMETHOD(pcib_read_config, octopci_read_config), DEVMETHOD(pcib_write_config, octopci_write_config), DEVMETHOD(pcib_route_interrupt, octopci_route_interrupt), DEVMETHOD_END }; static driver_t octopci_driver = { "pcib", octopci_methods, sizeof(struct octopci_softc), }; static devclass_t octopci_devclass; DRIVER_MODULE(octopci, ciu, octopci_driver, octopci_devclass, 0, 0); Index: head/sys/mips/idt/idtpci.c =================================================================== --- head/sys/mips/idt/idtpci.c (revision 287881) +++ head/sys/mips/idt/idtpci.c (revision 287882) @@ -1,557 +1,557 @@ /* $NetBSD: idtpci.c,v 1.1 2007/03/20 08:52:02 dyoung Exp $ */ /*- * Copyright (c) 2007 David Young. * Copyright (c) 2007 Oleskandr Tymoshenko. All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /*- * Copyright (c) 2006 Itronix Inc. * All rights reserved. * * Written by Garrett D'Amore for Itronix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of Itronix Inc. may not be used to endorse * or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #ifdef IDTPCI_DEBUG int idtpci_debug = 1; #define IDTPCI_DPRINTF(__fmt, ...) \ do { \ if (idtpci_debug) \ printf((__fmt), __VA_ARGS__); \ } while (/*CONSTCOND*/0) #else /* !IDTPCI_DEBUG */ #define IDTPCI_DPRINTF(__fmt, ...) do { } while (/*CONSTCOND*/0) #endif /* IDTPCI_DEBUG */ #define IDTPCI_TAG_BUS_MASK 0x007f0000 #define IDTPCI_TAG_DEVICE_MASK 0x00007800 #define IDTPCI_TAG_FUNCTION_MASK 0x00000300 #define IDTPCI_TAG_REGISTER_MASK 0x0000007c #define IDTPCI_MAX_DEVICE #define REG_READ(o) *((volatile uint32_t *)MIPS_PHYS_TO_KSEG1(IDT_BASE_PCI + (o))) #define REG_WRITE(o,v) (REG_READ(o)) = (v) unsigned int korina_fixup[24] = { 0x00000157, 0x00000000, 0x00003c04, 0x00000008, 0x18800001, 0x18000001, 0x48000008, 0x00000000, 0x00000000, 0x00000000, 0x011d0214, 0x00000000, 0x00000000, 0x00000000, 0x38080101, 0x00008080, 0x00000d6e, 0x00000000, 0x00000051, 0x00000000, 0x00000055, 0x18000000, 0x00000000, 0x00000000 }; struct idtpci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman[2]; struct rman sc_io_rman[2]; struct rman sc_irq_rman; }; static uint32_t idtpci_make_addr(int bus, int slot, int func, int reg) { return 0x80000000 | (bus << 16) | (slot << 11) | (func << 8) | reg; } static int idtpci_probe(device_t dev) { return (0); } static int idtpci_attach(device_t dev) { int busno = 0; struct idtpci_softc *sc = device_get_softc(dev); unsigned int pci_data, force_endianess = 0; int i; bus_addr_t addr; sc->sc_dev = dev; sc->sc_busno = busno; /* TODO: Check for host mode */ /* Enabled PCI, IG mode, EAP mode */ REG_WRITE(IDT_PCI_CNTL, IDT_PCI_CNTL_IGM | IDT_PCI_CNTL_EAP | IDT_PCI_CNTL_EN); /* Wait while "Reset in progress bit" set */ while(1) { pci_data = REG_READ(IDT_PCI_STATUS); if((pci_data & IDT_PCI_STATUS_RIP) == 0) break; } /* Reset status register */ REG_WRITE(IDT_PCI_STATUS, 0); /* Mask interrupts related to status register */ REG_WRITE(IDT_PCI_STATUS_MASK, 0xffffffff); /* Disable PCI decoupled access */ REG_WRITE(IDT_PCI_DAC, 0); /* Zero status and mask DA interrupts */ REG_WRITE(IDT_PCI_DAS, 0); REG_WRITE(IDT_PCI_DASM, 0x7f); /* Init PCI messaging unit */ /* Disable messaging interrupts */ REG_WRITE(IDT_PCI_IIC, 0); REG_WRITE(IDT_PCI_IIM, 0xffffffff); REG_WRITE(IDT_PCI_OIC, 0); REG_WRITE(IDT_PCI_OIM, 0); #ifdef __MIPSEB__ force_endianess = IDT_PCI_LBA_FE; #endif /* LBA0 -- memory window */ REG_WRITE(IDT_PCI_LBA0, IDT_PCIMEM0_BASE); REG_WRITE(IDT_PCI_LBA0_MAP, IDT_PCIMEM0_BASE); REG_WRITE(IDT_PCI_LBA0_CNTL, IDT_PCI_LBA_SIZE_16MB | force_endianess); pci_data = REG_READ(IDT_PCI_LBA0_CNTL); /* LBA1 -- memory window */ REG_WRITE(IDT_PCI_LBA1, IDT_PCIMEM1_BASE); REG_WRITE(IDT_PCI_LBA1_MAP, IDT_PCIMEM1_BASE); REG_WRITE(IDT_PCI_LBA1_CNTL, IDT_PCI_LBA_SIZE_256MB | force_endianess); pci_data = REG_READ(IDT_PCI_LBA1_CNTL); /* LBA2 -- IO window */ REG_WRITE(IDT_PCI_LBA2, IDT_PCIMEM2_BASE); REG_WRITE(IDT_PCI_LBA2_MAP, IDT_PCIMEM2_BASE); REG_WRITE(IDT_PCI_LBA2_CNTL, IDT_PCI_LBA_SIZE_4MB | IDT_PCI_LBA_MSI | force_endianess); pci_data = REG_READ(IDT_PCI_LBA2_CNTL); /* LBA3 -- IO window */ REG_WRITE(IDT_PCI_LBA3, IDT_PCIMEM3_BASE); REG_WRITE(IDT_PCI_LBA3_MAP, IDT_PCIMEM3_BASE); REG_WRITE(IDT_PCI_LBA3_CNTL, IDT_PCI_LBA_SIZE_1MB | IDT_PCI_LBA_MSI | force_endianess); pci_data = REG_READ(IDT_PCI_LBA3_CNTL); pci_data = REG_READ(IDT_PCI_CNTL) & ~IDT_PCI_CNTL_TNR; REG_WRITE(IDT_PCI_CNTL, pci_data); pci_data = REG_READ(IDT_PCI_CNTL); /* Rewrite Target Control register with default values */ REG_WRITE(IDT_PCI_TC, (IDT_PCI_TC_DTIMER << 8) | IDT_PCI_TC_RTIMER); /* Perform Korina fixup */ addr = idtpci_make_addr(0, 0, 0, 4); for (i = 0; i < 24; i++) { REG_WRITE(IDT_PCI_CFG_ADDR, addr); REG_WRITE(IDT_PCI_CFG_DATA, korina_fixup[i]); __asm__ volatile ("sync"); REG_WRITE(IDT_PCI_CFG_ADDR, 0); REG_WRITE(IDT_PCI_CFG_DATA, 0); addr += 4; } /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io_rman[0].rm_type = RMAN_ARRAY; sc->sc_io_rman[0].rm_descr = "IDTPCI I/O Ports window 1"; if (rman_init(&sc->sc_io_rman[0]) != 0 || rman_manage_region(&sc->sc_io_rman[0], IDT_PCIMEM2_BASE, IDT_PCIMEM2_BASE + IDT_PCIMEM2_SIZE - 1) != 0) { panic("idtpci_attach: failed to set up I/O rman"); } sc->sc_io_rman[1].rm_type = RMAN_ARRAY; sc->sc_io_rman[1].rm_descr = "IDTPCI I/O Ports window 2"; if (rman_init(&sc->sc_io_rman[1]) != 0 || rman_manage_region(&sc->sc_io_rman[1], IDT_PCIMEM3_BASE, IDT_PCIMEM3_BASE + IDT_PCIMEM3_SIZE - 1) != 0) { panic("idtpci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem_rman[0].rm_type = RMAN_ARRAY; sc->sc_mem_rman[0].rm_descr = "IDTPCI PCI Memory window 1"; if (rman_init(&sc->sc_mem_rman[0]) != 0 || rman_manage_region(&sc->sc_mem_rman[0], IDT_PCIMEM0_BASE, IDT_PCIMEM0_BASE + IDT_PCIMEM0_SIZE) != 0) { panic("idtpci_attach: failed to set up memory rman"); } sc->sc_mem_rman[1].rm_type = RMAN_ARRAY; sc->sc_mem_rman[1].rm_descr = "IDTPCI PCI Memory window 2"; if (rman_init(&sc->sc_mem_rman[1]) != 0 || rman_manage_region(&sc->sc_mem_rman[1], IDT_PCIMEM1_BASE, IDT_PCIMEM1_BASE + IDT_PCIMEM1_SIZE) != 0) { panic("idtpci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "IDTPCI PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, PCI_IRQ_BASE, PCI_IRQ_END) != 0) panic("idtpci_attach: failed to set up IRQ rman"); - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int idtpci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t idtpci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data; uint32_t shift, mask; bus_addr_t addr; IDTPCI_DPRINTF("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); addr = idtpci_make_addr(bus, slot, func, reg); REG_WRITE(IDT_PCI_CFG_ADDR, addr); data = REG_READ(IDT_PCI_CFG_DATA); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if (reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } __asm__ volatile ("sync"); IDTPCI_DPRINTF("%s: read 0x%x\n", __func__, data); return (data); } static void idtpci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { bus_addr_t addr; uint32_t reg_data; uint32_t shift, mask; IDTPCI_DPRINTF("%s: tag (%x, %x, %x) reg %d(%d) data %08x\n", __func__, bus, slot, func, reg, bytes, data); if (bytes != 4) { reg_data = idtpci_read_config(dev, bus, slot, func, reg, 4); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if (reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } } addr = idtpci_make_addr(bus, slot, func, reg); REG_WRITE(IDT_PCI_CFG_ADDR, addr); REG_WRITE(IDT_PCI_CFG_DATA, data); __asm__ volatile ("sync"); REG_WRITE(IDT_PCI_CFG_ADDR, 0); REG_WRITE(IDT_PCI_CFG_DATA, 0); } static int idtpci_route_interrupt(device_t pcib, device_t device, int pin) { static int idt_pci_table[2][12] = { { 0, 0, 2, 3, 2, 3, 0, 0, 0, 0, 0, 1 }, { 0, 0, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3 } }; int dev, bus, irq; dev = pci_get_slot(device); bus = pci_get_bus(device); if (bootverbose) device_printf(pcib, "routing pin %d for %s\n", pin, device_get_nameunit(device)); if (bus >= 0 && bus <= 1 && dev >= 0 && dev <= 11) { irq = IP_IRQ(6, idt_pci_table[bus][dev] + 4); if (bootverbose) printf("idtpci: %d/%d/%d -> IRQ%d\n", pci_get_bus(device), dev, pci_get_function(device), irq); return (irq); } else printf("idtpci: no mapping for %d/%d/%d\n", pci_get_bus(device), dev, pci_get_function(device)); return (-1); } static int idtpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct idtpci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int idtpci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct idtpci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * idtpci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct idtpci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm1, *rm2; switch (type) { case SYS_RES_IRQ: rm1 = &sc->sc_irq_rman; rm2 = NULL; break; case SYS_RES_MEMORY: rm1 = &sc->sc_mem_rman[0]; rm2 = &sc->sc_mem_rman[1]; break; case SYS_RES_IOPORT: rm1 = &sc->sc_io_rman[0]; rm2 = &sc->sc_io_rman[1]; break; default: return (NULL); } rv = rman_reserve_resource(rm1, start, end, count, flags, child); /* Try second window if it exists */ if ((rv == NULL) && (rm2 != NULL)) rv = rman_reserve_resource(rm2, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int idtpci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (intr_event_remove_handler(cookie)); } static device_method_t idtpci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, idtpci_probe), DEVMETHOD(device_attach, idtpci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, idtpci_read_ivar), DEVMETHOD(bus_write_ivar, idtpci_write_ivar), DEVMETHOD(bus_alloc_resource, idtpci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, idtpci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, idtpci_maxslots), DEVMETHOD(pcib_read_config, idtpci_read_config), DEVMETHOD(pcib_write_config, idtpci_write_config), DEVMETHOD(pcib_route_interrupt, idtpci_route_interrupt), DEVMETHOD_END }; static driver_t idtpci_driver = { "pcib", idtpci_methods, sizeof(struct idtpci_softc), }; static devclass_t idtpci_devclass; DRIVER_MODULE(idtpci, obio, idtpci_driver, idtpci_devclass, 0, 0); Index: head/sys/mips/malta/gt_pci.c =================================================================== --- head/sys/mips/malta/gt_pci.c (revision 287881) +++ head/sys/mips/malta/gt_pci.c (revision 287882) @@ -1,774 +1,774 @@ /* $NetBSD: gt_pci.c,v 1.4 2003/07/15 00:24:54 lukem Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PCI configuration support for gt I/O Processor chip. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #define ICU_LEN 16 /* number of ISA IRQs */ /* * XXX: These defines are from NetBSD's . Respective file * from FreeBSD src tree lacks some definitions. */ #define PIC_OCW1 1 #define PIC_OCW2 0 #define PIC_OCW3 0 #define OCW2_SELECT 0 #define OCW2_ILS(x) ((x) << 0) /* interrupt level select */ #define OCW3_POLL_IRQ(x) ((x) & 0x7f) #define OCW3_POLL_PENDING (1U << 7) /* * Galileo controller's registers are LE so convert to then * to/from native byte order. We rely on boot loader or emulator * to set "swap bytes" configuration correctly for us */ #define GT_PCI_DATA(v) htole32((v)) #define GT_HOST_DATA(v) le32toh((v)) struct gt_pci_softc; struct gt_pci_intr_cookie { int irq; struct gt_pci_softc *sc; }; struct gt_pci_softc { device_t sc_dev; bus_space_tag_t sc_st; bus_space_handle_t sc_ioh_icu1; bus_space_handle_t sc_ioh_icu2; bus_space_handle_t sc_ioh_elcr; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; unsigned long sc_mem; bus_space_handle_t sc_io; struct resource *sc_irq; struct intr_event *sc_eventstab[ICU_LEN]; struct gt_pci_intr_cookie sc_intr_cookies[ICU_LEN]; uint16_t sc_imask; uint16_t sc_elcr; uint16_t sc_reserved; void *sc_ih; }; static void gt_pci_set_icus(struct gt_pci_softc *); static int gt_pci_intr(void *v); static int gt_pci_probe(device_t); static int gt_pci_attach(device_t); static int gt_pci_activate_resource(device_t, device_t, int, int, struct resource *); static int gt_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int gt_pci_teardown_intr(device_t, device_t, struct resource *, void*); static int gt_pci_maxslots(device_t ); static int gt_pci_conf_setup(struct gt_pci_softc *, int, int, int, int, uint32_t *); static uint32_t gt_pci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void gt_pci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin); static struct resource * gt_pci_alloc_resource(device_t, device_t, int, int *, u_long, u_long, u_long, u_int); static void gt_pci_mask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; sc->sc_imask |= (1 << irq); sc->sc_elcr |= (1 << irq); gt_pci_set_icus(sc); } static void gt_pci_unmask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); gt_pci_set_icus(sc); } static void gt_pci_set_icus(struct gt_pci_softc *sc) { /* Enable the cascade IRQ (2) if 8-15 is enabled. */ if ((sc->sc_imask & 0xff00) != 0xff00) sc->sc_imask &= ~(1U << 2); else sc->sc_imask |= (1U << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW1, sc->sc_imask & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW1, (sc->sc_imask >> 8) & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); } static int gt_pci_intr(void *v) { struct gt_pci_softc *sc = v; struct intr_event *event; int irq; for (;;) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3); if ((irq & OCW3_POLL_PENDING) == 0) { return FILTER_HANDLED; } irq = OCW3_POLL_IRQ(irq); if (irq == 2) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3); if (irq & OCW3_POLL_PENDING) irq = OCW3_POLL_IRQ(irq) + 8; else irq = 2; } event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) continue; /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); /* XXX: Log stray IRQs */ /* Send a specific EOI to the 8259. */ if (irq > 7) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq & 7)); irq = 2; } bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq)); } return FILTER_HANDLED; } static int gt_pci_probe(device_t dev) { device_set_desc(dev, "GT64120 PCI bridge"); return (0); } static int gt_pci_attach(device_t dev) { uint32_t busno; struct gt_pci_softc *sc = device_get_softc(dev); int rid; busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_st = mips_bus_space_generic; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MIPS_PHYS_TO_KSEG1(MALTA_PCI0_IO_BASE); sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "GT64120 PCI I/O Ports"; /* * First 256 bytes are ISA's registers: e.g. i8259's * So do not use them for general purpose PCI I/O window */ if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0x100, 0xffff) != 0) { panic("gt_pci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MIPS_PHYS_TO_KSEG1(MALTA_PCIMEM1_BASE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "GT64120 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + MALTA_PCIMEM1_SIZE) != 0) { panic("gt_pci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "GT64120 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("gt_pci_attach: failed to set up IRQ rman"); /* * Map the PIC/ELCR registers. */ #if 0 if (bus_space_map(sc->sc_st, 0x4d0, 2, 0, &sc->sc_ioh_elcr) != 0) device_printf(dev, "unable to map ELCR registers\n"); if (bus_space_map(sc->sc_st, IO_ICU1, 2, 0, &sc->sc_ioh_icu1) != 0) device_printf(dev, "unable to map ICU1 registers\n"); if (bus_space_map(sc->sc_st, IO_ICU2, 2, 0, &sc->sc_ioh_icu2) != 0) device_printf(dev, "unable to map ICU2 registers\n"); #else sc->sc_ioh_elcr = sc->sc_io + 0x4d0; sc->sc_ioh_icu1 = sc->sc_io + IO_ICU1; sc->sc_ioh_icu2 = sc->sc_io + IO_ICU2; #endif /* All interrupts default to "masked off". */ sc->sc_imask = 0xffff; /* All interrupts default to edge-triggered. */ sc->sc_elcr = 0; /* * Initialize the 8259s. */ /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, ICW1_RESET | ICW1_IC4); /* * XXX: values from NetBSD's */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_RR); /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, ICW1_RESET | ICW1_IC4); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_RR); /* * Default all interrupts to edge-triggered. */ bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); /* * Some ISA interrupts are reserved for devices that * we know are hard-wired to certain IRQs. */ sc->sc_reserved = (1U << 0) | /* timer */ (1U << 1) | /* keyboard controller (keyboard) */ (1U << 2) | /* PIC cascade */ (1U << 3) | /* COM 2 */ (1U << 4) | /* COM 1 */ (1U << 6) | /* floppy */ (1U << 7) | /* centronics */ (1U << 8) | /* RTC */ (1U << 9) | /* I2C */ (1U << 12) | /* keyboard controller (mouse) */ (1U << 14) | /* IDE primary */ (1U << 15); /* IDE secondary */ /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, MALTA_SOUTHBRIDGE_INTR, MALTA_SOUTHBRIDGE_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, gt_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* Initialize memory and i/o rmans. */ - device_add_child(dev, "pci", busno); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int gt_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int gt_pci_conf_setup(struct gt_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { *addr = (bus << 16) | (slot << 11) | (func << 8) | reg; return (0); } static uint32_t gt_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t addr; uint32_t shift, mask; if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return (uint32_t)(-1); /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) data = GT_PCI_DATA(GT_REGVAL(GT_PCI0_CFG_DATA)); else data = GT_REGVAL(GT_PCI0_CFG_DATA); /* Check for master abort. */ if (GT_HOST_DATA(GT_REGVAL(GT_INTR_CAUSE)) & (GTIC_MASABORT0 | GTIC_TARABORT0)) data = (uint32_t) -1; switch(reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch(bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if(reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } #if 0 printf("PCICONF_READ(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif return (data); } static void gt_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t addr; uint32_t reg_data; uint32_t shift, mask; if(bytes != 4) { reg_data = gt_pci_read_config(dev, bus, slot, func, reg, 4); shift = 8 * (reg & 3); switch(bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if(reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } } if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return; /* The galileo has problems accessing device 31. */ if (bus == 0 && slot == 31) return; /* XXX: no support for bus > 0 yet */ if (bus > 0) return; /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) GT_REGVAL(GT_PCI0_CFG_DATA) = GT_PCI_DATA(data); else GT_REGVAL(GT_PCI0_CFG_DATA) = data; #if 0 printf("PCICONF_WRITE(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif } static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; /* struct gt_pci_softc *sc = device_get_softc(pcib); */ bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); /* * XXXMIPS: We need routing logic. This is just a stub . */ switch (device) { case 9: /* * PIIX4 IDE adapter. HW IRQ0 */ return 0; case 11: /* Ethernet */ return 10; default: device_printf(pcib, "no IRQ mapping for %d/%d/%d/%d\n", bus, device, func, pin); } return (0); } static int gt_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gt_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int gt_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct gt_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * gt_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct gt_pci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, gt_pci_bus_space); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); } static int gt_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int gt_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct gt_pci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; sc->sc_intr_cookies[irq].irq = irq; sc->sc_intr_cookies[irq].sc = sc; if (event == NULL) { error = intr_event_create(&event, (void *)&sc->sc_intr_cookies[irq], 0, irq, gt_pci_mask_irq, gt_pci_unmask_irq, NULL, NULL, "gt_pci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); gt_pci_unmask_irq((void *)&sc->sc_intr_cookies[irq]); return 0; } static int gt_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct gt_pci_softc *sc = device_get_softc(dev); int irq; irq = rman_get_start(res); gt_pci_mask_irq((void *)&sc->sc_intr_cookies[irq]); return (intr_event_remove_handler(cookie)); } static device_method_t gt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gt_pci_probe), DEVMETHOD(device_attach, gt_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, gt_read_ivar), DEVMETHOD(bus_write_ivar, gt_write_ivar), DEVMETHOD(bus_alloc_resource, gt_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, gt_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, gt_pci_setup_intr), DEVMETHOD(bus_teardown_intr, gt_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, gt_pci_maxslots), DEVMETHOD(pcib_read_config, gt_pci_read_config), DEVMETHOD(pcib_write_config, gt_pci_write_config), DEVMETHOD(pcib_route_interrupt, gt_pci_route_interrupt), DEVMETHOD_END }; static driver_t gt_pci_driver = { "pcib", gt_pci_methods, sizeof(struct gt_pci_softc), }; static devclass_t gt_pci_devclass; DRIVER_MODULE(gt_pci, gt, gt_pci_driver, gt_pci_devclass, 0, 0); Index: head/sys/mips/nlm/xlp_pci.c =================================================================== --- head/sys/mips/nlm/xlp_pci.c (revision 287881) +++ head/sys/mips/nlm/xlp_pci.c (revision 287882) @@ -1,574 +1,574 @@ /*- * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" static int xlp_pci_attach(device_t dev) { struct pci_devinfo *dinfo; device_t pcib; int maxslots, s, f, pcifunchigh, irq; int busno, node, devoffset; uint16_t devid; uint8_t hdrtype; /* * The on-chip devices are on a bus that is almost, but not * quite, completely like PCI. Add those things by hand. */ pcib = device_get_parent(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { pcifunchigh = 0; f = 0; hdrtype = PCIB_READ_CONFIG(pcib, busno, s, f, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCI_FUNCMAX; node = s / 8; for (f = 0; f <= pcifunchigh; f++) { devoffset = XLP_HDR_OFFSET(node, 0, s % 8, f); if (!nlm_dev_exists(devoffset)) continue; /* Find if there is a desc for the SoC device */ devid = PCIB_READ_CONFIG(pcib, busno, s, f, PCIR_DEVICE, 2); /* Skip devices that don't have a proper PCI header */ switch (devid) { case PCI_DEVICE_ID_NLM_ICI: case PCI_DEVICE_ID_NLM_PIC: case PCI_DEVICE_ID_NLM_FMN: case PCI_DEVICE_ID_NLM_UART: case PCI_DEVICE_ID_NLM_I2C: case PCI_DEVICE_ID_NLM_NOR: case PCI_DEVICE_ID_NLM_MMC: continue; case PCI_DEVICE_ID_NLM_EHCI: irq = PIC_USB_IRQ(f); PCIB_WRITE_CONFIG(pcib, busno, s, f, XLP_PCI_DEVSCRATCH_REG0 << 2, (1 << 8) | irq, 4); } dinfo = pci_read_device(pcib, pcib_get_domain(dev), busno, s, f, sizeof(*dinfo)); pci_add_child(dev, dinfo); } } return (bus_generic_attach(dev)); } static int xlp_pci_probe(device_t dev) { device_t pcib; pcib = device_get_parent(dev); /* * Only the top level bus has SoC devices, leave the rest to * Generic PCI code */ if (strcmp(device_get_nameunit(pcib), "pcib0") != 0) return (ENXIO); device_set_desc(dev, "XLP SoCbus"); return (BUS_PROBE_DEFAULT); } static devclass_t pci_devclass; static device_method_t xlp_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xlp_pci_probe), DEVMETHOD(device_attach, xlp_pci_attach), DEVMETHOD_END }; DEFINE_CLASS_1(pci, xlp_pci_driver, xlp_pci_methods, sizeof(struct pci_softc), pci_driver); DRIVER_MODULE(xlp_pci, pcib, xlp_pci_driver, pci_devclass, 0, 0); static int xlp_pcib_probe(device_t dev) { if (ofw_bus_is_compatible(dev, "netlogic,xlp-pci")) { device_set_desc(dev, "XLP PCI bus"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int xlp_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = 0; return (0); } return (ENOENT); } static int xlp_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: return (EINVAL); } return (ENOENT); } static int xlp_pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } static u_int32_t xlp_pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { uint32_t data = 0; uint64_t cfgaddr; int regindex = reg/sizeof(uint32_t); cfgaddr = nlm_pcicfg_base(XLP_HDR_OFFSET(0, b, s, f)); if ((width == 2) && (reg & 1)) return 0xFFFFFFFF; else if ((width == 4) && (reg & 3)) return 0xFFFFFFFF; /* * The intline and int pin of SoC devices are DOA, except * for bridges (slot %8 == 1). * use the values we stashed in a writable PCI scratch reg. */ if (b == 0 && regindex == 0xf && s % 8 > 1) regindex = XLP_PCI_DEVSCRATCH_REG0; data = nlm_read_pci_reg(cfgaddr, regindex); if (width == 1) return ((data >> ((reg & 3) << 3)) & 0xff); else if (width == 2) return ((data >> ((reg & 3) << 3)) & 0xffff); else return (data); } static void xlp_pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, u_int32_t val, int width) { uint64_t cfgaddr; uint32_t data = 0; int regindex = reg / sizeof(uint32_t); cfgaddr = nlm_pcicfg_base(XLP_HDR_OFFSET(0, b, s, f)); if ((width == 2) && (reg & 1)) return; else if ((width == 4) && (reg & 3)) return; if (width == 1) { data = nlm_read_pci_reg(cfgaddr, regindex); data = (data & ~(0xff << ((reg & 3) << 3))) | (val << ((reg & 3) << 3)); } else if (width == 2) { data = nlm_read_pci_reg(cfgaddr, regindex); data = (data & ~(0xffff << ((reg & 3) << 3))) | (val << ((reg & 3) << 3)); } else { data = val; } /* * use shadow reg for intpin/intline which are dead */ if (b == 0 && regindex == 0xf && s % 8 > 1) regindex = XLP_PCI_DEVSCRATCH_REG0; nlm_write_pci_reg(cfgaddr, regindex, data); } /* * Enable byte swap in hardware when compiled big-endian. * Programs a link's PCIe SWAP regions from the link's IO and MEM address * ranges. */ static void xlp_pcib_hardware_swap_enable(int node, int link) { #if BYTE_ORDER == BIG_ENDIAN uint64_t bbase, linkpcibase; uint32_t bar; int pcieoffset; pcieoffset = XLP_IO_PCIE_OFFSET(node, link); if (!nlm_dev_exists(pcieoffset)) return; bbase = nlm_get_bridge_regbase(node); linkpcibase = nlm_pcicfg_base(pcieoffset); bar = nlm_read_bridge_reg(bbase, BRIDGE_PCIEMEM_BASE0 + link); nlm_write_pci_reg(linkpcibase, PCIE_BYTE_SWAP_MEM_BASE, bar); bar = nlm_read_bridge_reg(bbase, BRIDGE_PCIEMEM_LIMIT0 + link); nlm_write_pci_reg(linkpcibase, PCIE_BYTE_SWAP_MEM_LIM, bar | 0xFFF); bar = nlm_read_bridge_reg(bbase, BRIDGE_PCIEIO_BASE0 + link); nlm_write_pci_reg(linkpcibase, PCIE_BYTE_SWAP_IO_BASE, bar); bar = nlm_read_bridge_reg(bbase, BRIDGE_PCIEIO_LIMIT0 + link); nlm_write_pci_reg(linkpcibase, PCIE_BYTE_SWAP_IO_LIM, bar | 0xFFF); #endif } static int xlp_pcib_attach(device_t dev) { int node, link; /* enable hardware swap on all nodes/links */ for (node = 0; node < XLP_MAX_NODES; node++) for (link = 0; link < 4; link++) xlp_pcib_hardware_swap_enable(node, link); - device_add_child(dev, "pci", 0); + device_add_child(dev, "pci", -1); bus_generic_attach(dev); return (0); } /* * XLS PCIe can have upto 4 links, and each link has its on IRQ * Find the link on which the device is on */ static int xlp_pcie_link(device_t pcib, device_t dev) { device_t parent, tmp; /* find the lane on which the slot is connected to */ tmp = dev; while (1) { parent = device_get_parent(tmp); if (parent == NULL || parent == pcib) { device_printf(dev, "Cannot find parent bus\n"); return (-1); } if (strcmp(device_get_nameunit(parent), "pci0") == 0) break; tmp = parent; } return (pci_get_function(tmp)); } static int xlp_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { int i, link; /* * Each link has 32 MSIs that can be allocated, but for now * we only support one device per link. * msi_alloc() equivalent is needed when we start supporting * bridges on the PCIe link. */ link = xlp_pcie_link(pcib, dev); if (link == -1) return (ENXIO); /* * encode the irq so that we know it is a MSI interrupt when we * setup interrupts */ for (i = 0; i < count; i++) irqs[i] = 64 + link * 32 + i; return (0); } static int xlp_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (0); } static int xlp_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { int link; if (irq < 64) { device_printf(dev, "%s: map_msi for irq %d - ignored", device_get_nameunit(pcib), irq); return (ENXIO); } link = (irq - 64) / 32; *addr = MIPS_MSI_ADDR(0); *data = MIPS_MSI_DATA(PIC_PCIE_IRQ(link)); return (0); } static void bridge_pcie_ack(int irq, void *arg) { uint32_t node,reg; uint64_t base; node = nlm_nodeid(); reg = PCIE_MSI_STATUS; switch (irq) { case PIC_PCIE_0_IRQ: base = nlm_pcicfg_base(XLP_IO_PCIE0_OFFSET(node)); break; case PIC_PCIE_1_IRQ: base = nlm_pcicfg_base(XLP_IO_PCIE1_OFFSET(node)); break; case PIC_PCIE_2_IRQ: base = nlm_pcicfg_base(XLP_IO_PCIE2_OFFSET(node)); break; case PIC_PCIE_3_IRQ: base = nlm_pcicfg_base(XLP_IO_PCIE3_OFFSET(node)); break; default: return; } nlm_write_pci_reg(base, reg, 0xFFFFFFFF); return; } static int mips_platform_pcib_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error = 0; int xlpirq; error = rman_activate_resource(irq); if (error) return error; if (rman_get_start(irq) != rman_get_end(irq)) { device_printf(dev, "Interrupt allocation %lu != %lu\n", rman_get_start(irq), rman_get_end(irq)); return (EINVAL); } xlpirq = rman_get_start(irq); if (xlpirq == 0) return (0); if (strcmp(device_get_name(dev), "pcib") != 0) return (0); /* * temporary hack for MSI, we support just one device per * link, and assign the link interrupt to the device interrupt */ if (xlpirq >= 64) { int node, val, link; uint64_t base; xlpirq -= 64; if (xlpirq % 32 != 0) return (0); node = nlm_nodeid(); link = xlpirq / 32; base = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node,link)); /* MSI Interrupt Vector enable at bridge's configuration */ nlm_write_pci_reg(base, PCIE_MSI_EN, PCIE_MSI_VECTOR_INT_EN); val = nlm_read_pci_reg(base, PCIE_INT_EN0); /* MSI Interrupt enable at bridge's configuration */ nlm_write_pci_reg(base, PCIE_INT_EN0, (val | PCIE_MSI_INT_EN)); /* legacy interrupt disable at bridge */ val = nlm_read_pci_reg(base, PCIE_BRIDGE_CMD); nlm_write_pci_reg(base, PCIE_BRIDGE_CMD, (val | PCIM_CMD_INTxDIS)); /* MSI address update at bridge */ nlm_write_pci_reg(base, PCIE_BRIDGE_MSI_ADDRL, MSI_MIPS_ADDR_BASE); nlm_write_pci_reg(base, PCIE_BRIDGE_MSI_ADDRH, 0); val = nlm_read_pci_reg(base, PCIE_BRIDGE_MSI_CAP); /* MSI capability enable at bridge */ nlm_write_pci_reg(base, PCIE_BRIDGE_MSI_CAP, (val | (PCIM_MSICTRL_MSI_ENABLE << 16) | (PCIM_MSICTRL_MMC_32 << 16))); xlpirq = PIC_PCIE_IRQ(link); } /* if it is for real PCIe, we need to ack at bridge too */ if (xlpirq >= PIC_PCIE_IRQ(0) && xlpirq <= PIC_PCIE_IRQ(3)) xlp_set_bus_ack(xlpirq, bridge_pcie_ack, NULL); cpu_establish_hardintr(device_get_name(child), filt, intr, arg, xlpirq, flags, cookiep); return (0); } static int mips_platform_pcib_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { if (strcmp(device_get_name(child), "pci") == 0) { /* if needed reprogram the pic to clear pcix related entry */ device_printf(dev, "teardown intr\n"); } return (bus_generic_teardown_intr(dev, child, irq, cookie)); } static int mips_pcib_route_interrupt(device_t bus, device_t dev, int pin) { int f, d; /* * Validate requested pin number. */ if ((pin < 1) || (pin > 4)) return (255); if (pci_get_bus(dev) == 0 && pci_get_vendor(dev) == PCI_VENDOR_NETLOGIC) { f = pci_get_function(dev); d = pci_get_slot(dev) % 8; /* * For PCIe links, return link IRT, for other SoC devices * get the IRT from its PCIe header */ if (d == 1) return (PIC_PCIE_IRQ(f)); else return (255); /* use intline, don't reroute */ } else { /* Regular PCI devices */ return (PIC_PCIE_IRQ(xlp_pcie_link(bus, dev))); } } static device_method_t xlp_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xlp_pcib_probe), DEVMETHOD(device_attach, xlp_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, xlp_pcib_read_ivar), DEVMETHOD(bus_write_ivar, xlp_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, mips_platform_pcib_setup_intr), DEVMETHOD(bus_teardown_intr, mips_platform_pcib_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, xlp_pcib_maxslots), DEVMETHOD(pcib_read_config, xlp_pcib_read_config), DEVMETHOD(pcib_write_config, xlp_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mips_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, xlp_alloc_msi), DEVMETHOD(pcib_release_msi, xlp_release_msi), DEVMETHOD(pcib_map_msi, xlp_map_msi), DEVMETHOD_END }; static driver_t xlp_pcib_driver = { "pcib", xlp_pcib_methods, 1, /* no softc */ }; static devclass_t pcib_devclass; DRIVER_MODULE(xlp_pcib, simplebus, xlp_pcib_driver, pcib_devclass, 0, 0); Index: head/sys/mips/rmi/xlr_pci.c =================================================================== --- head/sys/mips/rmi/xlr_pci.c (revision 287881) +++ head/sys/mips/rmi/xlr_pci.c (revision 287882) @@ -1,659 +1,659 @@ /*- * Copyright (c) 2003-2009 RMI Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of RMI Corporation, nor the names of its contributors, * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RMI_BSD */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define pci_cfg_offset(bus,slot,devfn,where) (((bus)<<16) + ((slot) << 11)+((devfn)<<8)+(where)) #define PCIE_LINK_STATE 0x4000 #define LSU_CFG0_REGID 0 #define LSU_CERRLOG_REGID 9 #define LSU_CERROVF_REGID 10 #define LSU_CERRINT_REGID 11 /* MSI support */ #define MSI_MIPS_ADDR_DEST 0x000ff000 #define MSI_MIPS_ADDR_RH 0x00000008 #define MSI_MIPS_ADDR_RH_OFF 0x00000000 #define MSI_MIPS_ADDR_RH_ON 0x00000008 #define MSI_MIPS_ADDR_DM 0x00000004 #define MSI_MIPS_ADDR_DM_PHYSICAL 0x00000000 #define MSI_MIPS_ADDR_DM_LOGICAL 0x00000004 /* Fields in data for Intel MSI messages. */ #define MSI_MIPS_DATA_TRGRMOD 0x00008000 /* Trigger mode */ #define MSI_MIPS_DATA_TRGREDG 0x00000000 /* edge */ #define MSI_MIPS_DATA_TRGRLVL 0x00008000 /* level */ #define MSI_MIPS_DATA_LEVEL 0x00004000 /* Polarity. */ #define MSI_MIPS_DATA_DEASSERT 0x00000000 #define MSI_MIPS_DATA_ASSERT 0x00004000 #define MSI_MIPS_DATA_DELMOD 0x00000700 /* Delivery Mode */ #define MSI_MIPS_DATA_DELFIXED 0x00000000 /* fixed */ #define MSI_MIPS_DATA_DELLOPRI 0x00000100 /* lowest priority */ #define MSI_MIPS_DATA_INTVEC 0x000000ff /* * Build Intel MSI message and data values from a source. AMD64 systems * seem to be compatible, so we use the same function for both. */ #define MIPS_MSI_ADDR(cpu) \ (MSI_MIPS_ADDR_BASE | (cpu) << 12 | \ MSI_MIPS_ADDR_RH_OFF | MSI_MIPS_ADDR_DM_PHYSICAL) #define MIPS_MSI_DATA(irq) \ (MSI_MIPS_DATA_TRGRLVL | MSI_MIPS_DATA_DELFIXED | \ MSI_MIPS_DATA_ASSERT | (irq)) struct xlr_pcib_softc { bus_dma_tag_t sc_pci_dmat; /* PCI DMA tag pointer */ }; static devclass_t pcib_devclass; static void *xlr_pci_config_base; static struct rman irq_rman, port_rman, mem_rman; static void xlr_pci_init_resources(void) { irq_rman.rm_start = 0; irq_rman.rm_end = 255; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Mapped Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 255)) panic("pci_init_resources irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = ~0ul; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0x10000000, 0x1fffffff)) panic("pci_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0xd0000000, 0xdfffffff)) panic("pci_init_resources mem_rman"); } static int xlr_pcib_probe(device_t dev) { if (xlr_board_info.is_xls) device_set_desc(dev, "XLS PCIe bus"); else device_set_desc(dev, "XLR PCI bus"); xlr_pci_init_resources(); xlr_pci_config_base = (void *)MIPS_PHYS_TO_KSEG1(DEFAULT_PCI_CONFIG_BASE); return (0); } static int xlr_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = 0; return (0); } return (ENOENT); } static int xlr_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { switch (which) { case PCIB_IVAR_DOMAIN: return (EINVAL); case PCIB_IVAR_BUS: return (EINVAL); } return (ENOENT); } static int xlr_pcib_maxslots(device_t dev) { return (PCI_SLOTMAX); } static __inline__ void disable_and_clear_cache_error(void) { uint64_t lsu_cfg0; lsu_cfg0 = read_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CFG0_REGID); lsu_cfg0 = lsu_cfg0 & ~0x2e; write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CFG0_REGID, lsu_cfg0); /* Clear cache error log */ write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CERRLOG_REGID, 0); } static __inline__ void clear_and_enable_cache_error(void) { uint64_t lsu_cfg0 = 0; /* first clear the cache error logging register */ write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CERRLOG_REGID, 0); write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CERROVF_REGID, 0); write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CERRINT_REGID, 0); lsu_cfg0 = read_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CFG0_REGID); lsu_cfg0 = lsu_cfg0 | 0x2e; write_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CFG0_REGID, lsu_cfg0); } static uint32_t pci_cfg_read_32bit(uint32_t addr) { uint32_t temp = 0; uint32_t *p = (uint32_t *)xlr_pci_config_base + addr / sizeof(uint32_t); uint64_t cerr_cpu_log = 0; disable_and_clear_cache_error(); temp = bswap32(*p); /* Read cache err log */ cerr_cpu_log = read_xlr_ctrl_register(CPU_BLOCKID_LSU, LSU_CERRLOG_REGID); if (cerr_cpu_log) { /* Device don't exist. */ temp = ~0x0; } clear_and_enable_cache_error(); return (temp); } static u_int32_t xlr_pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) { uint32_t data = 0; if ((width == 2) && (reg & 1)) return 0xFFFFFFFF; else if ((width == 4) && (reg & 3)) return 0xFFFFFFFF; data = pci_cfg_read_32bit(pci_cfg_offset(b, s, f, reg)); if (width == 1) return ((data >> ((reg & 3) << 3)) & 0xff); else if (width == 2) return ((data >> ((reg & 3) << 3)) & 0xffff); else return (data); } static void xlr_pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, u_int32_t val, int width) { uint32_t cfgaddr = pci_cfg_offset(b, s, f, reg); uint32_t data = 0, *p; if ((width == 2) && (reg & 1)) return; else if ((width == 4) && (reg & 3)) return; if (width == 1) { data = pci_cfg_read_32bit(cfgaddr); data = (data & ~(0xff << ((reg & 3) << 3))) | (val << ((reg & 3) << 3)); } else if (width == 2) { data = pci_cfg_read_32bit(cfgaddr); data = (data & ~(0xffff << ((reg & 3) << 3))) | (val << ((reg & 3) << 3)); } else { data = val; } p = (uint32_t *)xlr_pci_config_base + cfgaddr / sizeof(uint32_t); *p = bswap32(data); return; } static int xlr_pcib_attach(device_t dev) { struct xlr_pcib_softc *sc; sc = device_get_softc(dev); /* * XLR C revision chips cannot do DMA above 2G physical address * create a parent tag with this lowaddr */ if (xlr_is_c_revision()) { if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 0x7fffffff, ~0, NULL, NULL, 0x7fffffff, 0xff, 0x7fffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0) panic("%s: bus_dma_tag_create failed", __func__); } - device_add_child(dev, "pci", 0); + device_add_child(dev, "pci", -1); bus_generic_attach(dev); return (0); } static void xlr_pcib_identify(driver_t * driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "pcib", 0); } /* * XLS PCIe can have upto 4 links, and each link has its on IRQ * Find the link on which the device is on */ static int xls_pcie_link(device_t pcib, device_t dev) { device_t parent, tmp; /* find the lane on which the slot is connected to */ printf("xls_pcie_link : bus %s dev %s\n", device_get_nameunit(pcib), device_get_nameunit(dev)); tmp = dev; while (1) { parent = device_get_parent(tmp); if (parent == NULL || parent == pcib) { device_printf(dev, "Cannot find parent bus\n"); return (-1); } if (strcmp(device_get_nameunit(parent), "pci0") == 0) break; tmp = parent; } return (pci_get_slot(tmp)); } /* * Find the IRQ for the link, each link has a different interrupt * at the XLS pic */ static int xls_pcie_link_irq(int link) { switch (link) { case 0: return (PIC_PCIE_LINK0_IRQ); case 1: return (PIC_PCIE_LINK1_IRQ); case 2: if (xlr_is_xls_b0()) return (PIC_PCIE_B0_LINK2_IRQ); else return (PIC_PCIE_LINK2_IRQ); case 3: if (xlr_is_xls_b0()) return (PIC_PCIE_B0_LINK3_IRQ); else return (PIC_PCIE_LINK3_IRQ); } return (-1); } static int xlr_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { int i, link; /* * Each link has 32 MSIs that can be allocated, but for now * we only support one device per link. * msi_alloc() equivalent is needed when we start supporting * bridges on the PCIe link. */ link = xls_pcie_link(pcib, dev); if (link == -1) return (ENXIO); /* * encode the irq so that we know it is a MSI interrupt when we * setup interrupts */ for (i = 0; i < count; i++) irqs[i] = 64 + link * 32 + i; return (0); } static int xlr_release_msi(device_t pcib, device_t dev, int count, int *irqs) { device_printf(dev, "%s: msi release %d\n", device_get_nameunit(pcib), count); return (0); } static int xlr_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { int msi; if (irq >= 64) { msi = irq - 64; *addr = MIPS_MSI_ADDR(0); *data = MIPS_MSI_DATA(msi); return (0); } else { device_printf(dev, "%s: map_msi for irq %d - ignored", device_get_nameunit(pcib), irq); return (ENXIO); } } static void bridge_pcix_ack(int irq) { (void)xlr_read_reg(xlr_io_mmio(XLR_IO_PCIX_OFFSET), 0x140 >> 2); } static void bridge_pcie_ack(int irq) { uint32_t reg; xlr_reg_t *pcie_mmio_le = xlr_io_mmio(XLR_IO_PCIE_1_OFFSET); switch (irq) { case PIC_PCIE_LINK0_IRQ: reg = PCIE_LINK0_MSI_STATUS; break; case PIC_PCIE_LINK1_IRQ: reg = PCIE_LINK1_MSI_STATUS; break; case PIC_PCIE_LINK2_IRQ: case PIC_PCIE_B0_LINK2_IRQ: reg = PCIE_LINK2_MSI_STATUS; break; case PIC_PCIE_LINK3_IRQ: case PIC_PCIE_B0_LINK3_IRQ: reg = PCIE_LINK3_MSI_STATUS; break; default: return; } xlr_write_reg(pcie_mmio_le, reg>>2, 0xffffffff); } static int mips_platform_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int error = 0; int xlrirq; error = rman_activate_resource(irq); if (error) return error; if (rman_get_start(irq) != rman_get_end(irq)) { device_printf(dev, "Interrupt allocation %lu != %lu\n", rman_get_start(irq), rman_get_end(irq)); return (EINVAL); } xlrirq = rman_get_start(irq); if (strcmp(device_get_name(dev), "pcib") != 0) return (0); if (xlr_board_info.is_xls == 0) { xlr_establish_intr(device_get_name(child), filt, intr, arg, PIC_PCIX_IRQ, flags, cookiep, bridge_pcix_ack); pic_setup_intr(PIC_IRT_PCIX_INDEX, PIC_PCIX_IRQ, 0x1, 1); } else { /* * temporary hack for MSI, we support just one device per * link, and assign the link interrupt to the device interrupt */ if (xlrirq >= 64) { xlrirq -= 64; if (xlrirq % 32 != 0) return (0); xlrirq = xls_pcie_link_irq(xlrirq / 32); if (xlrirq == -1) return (EINVAL); } xlr_establish_intr(device_get_name(child), filt, intr, arg, xlrirq, flags, cookiep, bridge_pcie_ack); pic_setup_intr(xlrirq - PIC_IRQ_BASE, xlrirq, 0x1, 1); } return (bus_generic_setup_intr(dev, child, irq, flags, filt, intr, arg, cookiep)); } static int mips_platform_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { if (strcmp(device_get_name(child), "pci") == 0) { /* if needed reprogram the pic to clear pcix related entry */ device_printf(dev, "teardown intr\n"); } return (bus_generic_teardown_intr(dev, child, irq, cookie)); } static struct resource * xlr_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct rman *rm; struct resource *rv; vm_offset_t va; int needactivate = flags & RF_ACTIVE; switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_IOPORT: rm = &port_rman; break; case SYS_RES_MEMORY: rm = &mem_rman; break; default: return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) return (0); rman_set_rid(rv, *rid); if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { va = (vm_offset_t)pmap_mapdev(start, count); rman_set_bushandle(rv, va); /* bushandle is same as virtual addr */ rman_set_virtual(rv, (void *)va); rman_set_bustag(rv, rmi_pci_bus_space); } if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int xlr_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { return (rman_release_resource(r)); } static bus_dma_tag_t xlr_pci_get_dma_tag(device_t bus, device_t child) { struct xlr_pcib_softc *sc; sc = device_get_softc(bus); return (sc->sc_pci_dmat); } static int xlr_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { return (rman_activate_resource(r)); } static int xlr_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { return (rman_deactivate_resource(r)); } static int mips_pci_route_interrupt(device_t bus, device_t dev, int pin) { int irq, link; /* * Validate requested pin number. */ if ((pin < 1) || (pin > 4)) return (255); if (xlr_board_info.is_xls) { link = xls_pcie_link(bus, dev); irq = xls_pcie_link_irq(link); if (irq != -1) return (irq); } else { if (pin == 1) return (PIC_PCIX_IRQ); } return (255); } static device_method_t xlr_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_identify, xlr_pcib_identify), DEVMETHOD(device_probe, xlr_pcib_probe), DEVMETHOD(device_attach, xlr_pcib_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, xlr_pcib_read_ivar), DEVMETHOD(bus_write_ivar, xlr_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, xlr_pci_alloc_resource), DEVMETHOD(bus_release_resource, xlr_pci_release_resource), DEVMETHOD(bus_get_dma_tag, xlr_pci_get_dma_tag), DEVMETHOD(bus_activate_resource, xlr_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, xlr_pci_deactivate_resource), DEVMETHOD(bus_setup_intr, mips_platform_pci_setup_intr), DEVMETHOD(bus_teardown_intr, mips_platform_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, xlr_pcib_maxslots), DEVMETHOD(pcib_read_config, xlr_pcib_read_config), DEVMETHOD(pcib_write_config, xlr_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mips_pci_route_interrupt), DEVMETHOD(pcib_alloc_msi, xlr_alloc_msi), DEVMETHOD(pcib_release_msi, xlr_release_msi), DEVMETHOD(pcib_map_msi, xlr_map_msi), DEVMETHOD_END }; static driver_t xlr_pcib_driver = { "pcib", xlr_pcib_methods, sizeof(struct xlr_pcib_softc), }; DRIVER_MODULE(pcib, iodi, xlr_pcib_driver, pcib_devclass, 0, 0); Index: head/sys/powerpc/ofw/ofw_pci.c =================================================================== --- head/sys/powerpc/ofw/ofw_pci.c (revision 287881) +++ head/sys/powerpc/ofw/ofw_pci.c (revision 287882) @@ -1,561 +1,561 @@ /*- * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* * Bus interface. */ static int ofw_pci_read_ivar(device_t, device_t, int, uintptr_t *); static struct resource * ofw_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags); static int ofw_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res); static int ofw_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res); static int ofw_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res); static int ofw_pci_adjust_resource(device_t bus, device_t child, int type, struct resource *res, u_long start, u_long end); /* * pcib interface. */ static int ofw_pci_maxslots(device_t); static int ofw_pci_route_interrupt(device_t, device_t, int); /* * ofw_bus interface */ static phandle_t ofw_pci_get_node(device_t bus, device_t dev); /* * local methods */ static int ofw_pci_nranges(phandle_t node); static int ofw_pci_fill_ranges(phandle_t node, struct ofw_pci_range *ranges); /* * Driver methods. */ static device_method_t ofw_pci_methods[] = { /* Device interface */ DEVMETHOD(device_attach, ofw_pci_attach), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, ofw_pci_alloc_resource), DEVMETHOD(bus_release_resource, ofw_pci_release_resource), DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, ofw_pci_deactivate_resource), DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource), /* pcib interface */ DEVMETHOD(pcib_maxslots, ofw_pci_maxslots), DEVMETHOD(pcib_route_interrupt, ofw_pci_route_interrupt), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node), DEVMETHOD_END }; DEFINE_CLASS_0(ofw_pci, ofw_pci_driver, ofw_pci_methods, 0); int ofw_pci_init(device_t dev) { struct ofw_pci_softc *sc; phandle_t node; u_int32_t busrange[2]; struct ofw_pci_range *rp; int error; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); sc->sc_initialized = 1; if (OF_getprop(node, "reg", &sc->sc_pcir, sizeof(sc->sc_pcir)) == -1) return (ENXIO); if (OF_getprop(node, "bus-range", busrange, sizeof(busrange)) != 8) busrange[0] = 0; sc->sc_dev = dev; sc->sc_node = node; sc->sc_bus = busrange[0]; if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { phandle_t c; int n, i; sc->sc_nrange = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pci_nranges(c); if (n > 0) sc->sc_nrange += n; } if (sc->sc_nrange == 0) return (ENXIO); sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); i = 0; for (c = OF_child(node); c != 0; c = OF_peer(c)) { n = ofw_pci_fill_ranges(c, &sc->sc_range[i]); if (n > 0) i += n; } KASSERT(i == sc->sc_nrange, ("range count mismatch")); } else { sc->sc_nrange = ofw_pci_nranges(node); if (sc->sc_nrange <= 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), M_DEVBUF, M_WAITOK); ofw_pci_fill_ranges(node, sc->sc_range); } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "PCI I/O Ports"; error = rman_init(&sc->sc_io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "PCI Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { error = 0; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_CONFIG: break; case OFW_PCI_PHYS_HI_SPACE_IO: error = rman_manage_region(&sc->sc_io_rman, rp->pci, rp->pci + rp->size - 1); break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: error = rman_manage_region(&sc->sc_mem_rman, rp->pci, rp->pci + rp->size - 1); break; } if (error) { device_printf(dev, "rman_manage_region(%x, %#jx, %#jx) failed. " "error = %d\n", rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, rp->pci + rp->size - 1, error); return (error); } } ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); return (error); } int ofw_pci_attach(device_t dev) { struct ofw_pci_softc *sc; int error; sc = device_get_softc(dev); if (!sc->sc_initialized) { error = ofw_pci_init(dev); if (error) return (error); } - device_add_child(dev, "pci", device_get_unit(dev)); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ofw_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ofw_pci_route_interrupt(device_t bus, device_t dev, int pin) { struct ofw_pci_softc *sc; struct ofw_pci_register reg; uint32_t pintr, mintr[2]; int intrcells; phandle_t iparent; sc = device_get_softc(bus); pintr = pin; /* Fabricate imap information in case this isn't an OFW device */ bzero(®, sizeof(reg)); reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr), &iparent); if (intrcells) { pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); return (pintr); } /* Maybe it's a real interrupt, not an intpin */ if (pin > 4) return (pin); device_printf(bus, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (PCI_INVALID_IRQ); } static int ofw_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ofw_pci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); } return (ENOENT); } static struct resource * ofw_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct ofw_pci_softc *sc; struct resource *rv; struct rman *rm; int needactivate; needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; sc = device_get_softc(bus); switch (type) { case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_IRQ: return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource for %s\n", device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int ofw_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, res); if (error) return error; } return (rman_release_resource(res)); } static int ofw_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct ofw_pci_softc *sc; void *p; sc = device_get_softc(bus); if (type == SYS_RES_IRQ) { return (bus_activate_resource(bus, type, rid, res)); } if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { struct ofw_pci_range *rp; vm_offset_t start; int space; start = (vm_offset_t)rman_get_start(res); /* * Map this through the ranges list */ for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && rp->pci_hi != 0; rp++) { if (start < rp->pci || start >= rp->pci + rp->size) continue; switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_IO: space = SYS_RES_IOPORT; break; case OFW_PCI_PHYS_HI_SPACE_MEM32: case OFW_PCI_PHYS_HI_SPACE_MEM64: space = SYS_RES_MEMORY; break; default: space = -1; } if (type == space) { start += (rp->host - rp->pci); break; } } if (bootverbose) printf("ofw_pci mapdev: start %zx, len %ld\n", start, rman_get_size(res)); p = pmap_mapdev(start, (vm_size_t)rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_le_tag); rman_set_bushandle(res, (u_long)p); } return (rman_activate_resource(res)); } static int ofw_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); } static int ofw_pci_adjust_resource(device_t bus, device_t child, int type, struct resource *res, u_long start, u_long end) { struct rman *rm = NULL; struct ofw_pci_softc *sc = device_get_softc(bus); KASSERT(!(rman_get_flags(res) & RF_ACTIVE), ("active resources cannot be adjusted")); if (rman_get_flags(res) & RF_ACTIVE) return (EINVAL); switch (type) { case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; default: return (ENXIO); } if (!rman_is_region_manager(res, rm)) return (EINVAL); return (rman_adjust_resource(res, start, end)); } static phandle_t ofw_pci_get_node(device_t bus, device_t dev) { struct ofw_pci_softc *sc; sc = device_get_softc(bus); /* We only have one child, the PCI bus, which needs our own node. */ return (sc->sc_node); } static int ofw_pci_nranges(phandle_t node) { int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; ssize_t nbase_ranges; OF_getprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); OF_getprop(node, "#address-cells", &pci_address_cells, sizeof(pci_address_cells)); OF_getprop(node, "#size-cells", &size_cells, sizeof(size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); return (nbase_ranges / sizeof(cell_t) / (pci_address_cells + host_address_cells + size_cells)); } static int ofw_pci_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) { int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; cell_t *base_ranges; ssize_t nbase_ranges; int nranges; int i, j, k; OF_getprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); OF_getprop(node, "#address-cells", &pci_address_cells, sizeof(pci_address_cells)); OF_getprop(node, "#size-cells", &size_cells, sizeof(size_cells)); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges <= 0) return (-1); nranges = nbase_ranges / sizeof(cell_t) / (pci_address_cells + host_address_cells + size_cells); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < nranges; i++) { ranges[i].pci_hi = base_ranges[j++]; ranges[i].pci = 0; for (k = 0; k < pci_address_cells - 1; k++) { ranges[i].pci <<= 32; ranges[i].pci |= base_ranges[j++]; } ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { ranges[i].host <<= 32; ranges[i].host |= base_ranges[j++]; } ranges[i].size = 0; for (k = 0; k < size_cells; k++) { ranges[i].size <<= 32; ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (nranges); } Index: head/sys/x86/pci/pci_bus.c =================================================================== --- head/sys/x86/pci/pci_bus.c (revision 287881) +++ head/sys/x86/pci/pci_bus.c (revision 287882) @@ -1,764 +1,764 @@ /*- * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPU_ELAN #include #endif #include #include #include #include "pcib_if.h" int legacy_pcib_maxslots(device_t dev) { return 31; } /* read configuration space register */ uint32_t legacy_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { return(pci_cfgregread(bus, slot, func, reg, bytes)); } /* write configuration space register */ void legacy_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { pci_cfgregwrite(bus, slot, func, reg, data, bytes); } /* route interrupt */ static int legacy_pcib_route_interrupt(device_t pcib, device_t dev, int pin) { #ifdef __HAVE_PIR return (pci_pir_route_interrupt(pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pin)); #else /* No routing possible */ return (PCI_INVALID_IRQ); #endif } /* Pass MSI requests up to the nexus. */ int legacy_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } int legacy_pcib_alloc_msix(device_t pcib, device_t dev, int *irq) { device_t bus; bus = device_get_parent(pcib); return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } int legacy_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus, hostb; int error, func, slot; bus = device_get_parent(pcib); error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); if (error) return (error); slot = legacy_get_pcislot(pcib); func = legacy_get_pcifunc(pcib); if (slot == -1 || func == -1) return (0); hostb = pci_find_bsf(0, slot, func); KASSERT(hostb != NULL, ("%s: missing hostb for 0:%d:%d", __func__, slot, func)); pci_ht_map_msi(hostb, *addr); return (0); } static const char * legacy_pcib_is_host_bridge(int bus, int slot, int func, uint32_t id, uint8_t class, uint8_t subclass, uint8_t *busnum) { #ifdef __i386__ const char *s = NULL; static uint8_t pxb[4]; /* hack for 450nx */ *busnum = 0; switch (id) { case 0x12258086: s = "Intel 824?? host to PCI bridge"; /* XXX This is a guess */ /* *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x41, 1); */ *busnum = bus; break; case 0x71208086: s = "Intel 82810 (i810 GMCH) Host To Hub bridge"; break; case 0x71228086: s = "Intel 82810-DC100 (i810-DC100 GMCH) Host To Hub bridge"; break; case 0x71248086: s = "Intel 82810E (i810E GMCH) Host To Hub bridge"; break; case 0x11308086: s = "Intel 82815 (i815 GMCH) Host To Hub bridge"; break; case 0x71808086: s = "Intel 82443LX (440 LX) host to PCI bridge"; break; case 0x71908086: s = "Intel 82443BX (440 BX) host to PCI bridge"; break; case 0x71928086: s = "Intel 82443BX host to PCI bridge (AGP disabled)"; break; case 0x71948086: s = "Intel 82443MX host to PCI bridge"; break; case 0x71a08086: s = "Intel 82443GX host to PCI bridge"; break; case 0x71a18086: s = "Intel 82443GX host to AGP bridge"; break; case 0x71a28086: s = "Intel 82443GX host to PCI bridge (AGP disabled)"; break; case 0x84c48086: s = "Intel 82454KX/GX (Orion) host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x4a, 1); break; case 0x84ca8086: /* * For the 450nx chipset, there is a whole bundle of * things pretending to be host bridges. The MIOC will * be seen first and isn't really a pci bridge (the * actual busses are attached to the PXB's). We need to * read the registers of the MIOC to figure out the * bus numbers for the PXB channels. * * Since the MIOC doesn't have a pci bus attached, we * pretend it wasn't there. */ pxb[0] = legacy_pcib_read_config(0, bus, slot, func, 0xd0, 1); /* BUSNO[0] */ pxb[1] = legacy_pcib_read_config(0, bus, slot, func, 0xd1, 1) + 1; /* SUBA[0]+1 */ pxb[2] = legacy_pcib_read_config(0, bus, slot, func, 0xd3, 1); /* BUSNO[1] */ pxb[3] = legacy_pcib_read_config(0, bus, slot, func, 0xd4, 1) + 1; /* SUBA[1]+1 */ return NULL; case 0x84cb8086: switch (slot) { case 0x12: s = "Intel 82454NX PXB#0, Bus#A"; *busnum = pxb[0]; break; case 0x13: s = "Intel 82454NX PXB#0, Bus#B"; *busnum = pxb[1]; break; case 0x14: s = "Intel 82454NX PXB#1, Bus#A"; *busnum = pxb[2]; break; case 0x15: s = "Intel 82454NX PXB#1, Bus#B"; *busnum = pxb[3]; break; } break; case 0x1A308086: s = "Intel 82845 Host to PCI bridge"; break; /* AMD -- vendor 0x1022 */ case 0x30001022: s = "AMD Elan SC520 host to PCI bridge"; #ifdef CPU_ELAN init_AMD_Elan_sc520(); #else printf( "*** WARNING: missing CPU_ELAN -- timekeeping may be wrong\n"); #endif break; case 0x70061022: s = "AMD-751 host to PCI bridge"; break; case 0x700e1022: s = "AMD-761 host to PCI bridge"; break; /* SiS -- vendor 0x1039 */ case 0x04961039: s = "SiS 85c496"; break; case 0x04061039: s = "SiS 85c501"; break; case 0x06011039: s = "SiS 85c601"; break; case 0x55911039: s = "SiS 5591 host to PCI bridge"; break; case 0x00011039: s = "SiS 5591 host to AGP bridge"; break; /* VLSI -- vendor 0x1004 */ case 0x00051004: s = "VLSI 82C592 Host to PCI bridge"; break; /* XXX Here is MVP3, I got the datasheet but NO M/B to test it */ /* totally. Please let me know if anything wrong. -F */ /* XXX need info on the MVP3 -- any takers? */ case 0x05981106: s = "VIA 82C598MVP (Apollo MVP3) host bridge"; break; /* AcerLabs -- vendor 0x10b9 */ /* Funny : The datasheet told me vendor id is "10b8",sub-vendor */ /* id is '10b9" but the register always shows "10b9". -Foxfair */ case 0x154110b9: s = "AcerLabs M1541 (Aladdin-V) PCI host bridge"; break; /* OPTi -- vendor 0x1045 */ case 0xc7011045: s = "OPTi 82C700 host to PCI bridge"; break; case 0xc8221045: s = "OPTi 82C822 host to PCI Bridge"; break; /* ServerWorks -- vendor 0x1166 */ case 0x00051166: s = "ServerWorks NB6536 2.0HE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00061166: /* FALLTHROUGH */ case 0x00081166: /* FALLTHROUGH */ case 0x02011166: /* FALLTHROUGH */ case 0x010f1014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00091166: s = "ServerWorks NB6635 3.0LE host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00101166: s = "ServerWorks CIOB30 host to PCI bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; case 0x00111166: /* FALLTHROUGH */ case 0x03021014: /* IBM re-badged ServerWorks chipset */ s = "ServerWorks CMIC-HE host to PCI-X bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* XXX unknown chipset, but working */ case 0x00171166: /* FALLTHROUGH */ case 0x01011166: case 0x01101166: case 0x02251166: s = "ServerWorks host to PCI bridge(unknown chipset)"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0x44, 1); break; /* Compaq/HP -- vendor 0x0e11 */ case 0x60100e11: s = "Compaq/HP Model 6010 HotPlug PCI Bridge"; *busnum = legacy_pcib_read_config(0, bus, slot, func, 0xc8, 1); break; /* Integrated Micro Solutions -- vendor 0x10e0 */ case 0x884910e0: s = "Integrated Micro Solutions VL Bridge"; break; default: if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; break; } return s; #else const char *s = NULL; *busnum = 0; if (class == PCIC_BRIDGE && subclass == PCIS_BRIDGE_HOST) s = "Host to PCI bridge"; return s; #endif } /* * Scan the first pci bus for host-pci bridges and add pcib instances * to the nexus for each bridge. */ static void legacy_pcib_identify(driver_t *driver, device_t parent) { int bus, slot, func; uint8_t hdrtype; int found = 0; int pcifunchigh; int found824xx = 0; int found_orion = 0; device_t child; devclass_t pci_devclass; if (pci_cfgregopen() == 0) return; /* * Check to see if we haven't already had a PCI bus added * via some other means. If we have, bail since otherwise * we're going to end up duplicating it. */ if ((pci_devclass = devclass_find("pci")) && devclass_get_device(pci_devclass, 0)) return; bus = 0; retry: for (slot = 0; slot <= PCI_SLOTMAX; slot++) { func = 0; hdrtype = legacy_pcib_read_config(0, bus, slot, func, PCIR_HDRTYPE, 1); /* * When enumerating bus devices, the standard says that * one should check the header type and ignore the slots whose * header types that the software doesn't know about. We use * this to filter out devices. */ if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if ((hdrtype & PCIM_MFDEV) && (!found_orion || hdrtype != 0xff)) pcifunchigh = PCI_FUNCMAX; else pcifunchigh = 0; for (func = 0; func <= pcifunchigh; func++) { /* * Read the IDs and class from the device. */ uint32_t id; uint8_t class, subclass, busnum; const char *s; device_t *devs; int ndevs, i; id = legacy_pcib_read_config(0, bus, slot, func, PCIR_DEVVENDOR, 4); if (id == -1) continue; class = legacy_pcib_read_config(0, bus, slot, func, PCIR_CLASS, 1); subclass = legacy_pcib_read_config(0, bus, slot, func, PCIR_SUBCLASS, 1); s = legacy_pcib_is_host_bridge(bus, slot, func, id, class, subclass, &busnum); if (s == NULL) continue; /* * Check to see if the physical bus has already * been seen. Eg: hybrid 32 and 64 bit host * bridges to the same logical bus. */ if (device_get_children(parent, &devs, &ndevs) == 0) { for (i = 0; s != NULL && i < ndevs; i++) { if (strcmp(device_get_name(devs[i]), "pcib") != 0) continue; if (legacy_get_pcibus(devs[i]) == busnum) s = NULL; } free(devs, M_TEMP); } if (s == NULL) continue; /* * Add at priority 100 to make sure we * go after any motherboard resources */ child = BUS_ADD_CHILD(parent, 100, "pcib", busnum); device_set_desc(child, s); legacy_set_pcibus(child, busnum); legacy_set_pcislot(child, slot); legacy_set_pcifunc(child, func); found = 1; if (id == 0x12258086) found824xx = 1; if (id == 0x84c48086) found_orion = 1; } } if (found824xx && bus == 0) { bus++; goto retry; } /* * Make sure we add at least one bridge since some old * hardware doesn't actually have a host-pci bridge device. * Note that pci_cfgregopen() thinks we have PCI devices.. */ if (!found) { if (bootverbose) printf( "legacy_pcib_identify: no bridge found, adding pcib0 anyway\n"); child = BUS_ADD_CHILD(parent, 100, "pcib", 0); legacy_set_pcibus(child, 0); } } static int legacy_pcib_probe(device_t dev) { if (pci_cfgregopen() == 0) return ENXIO; return -100; } static int legacy_pcib_attach(device_t dev) { #ifdef __HAVE_PIR device_t pir; #endif int bus; bus = pcib_get_bus(dev); #ifdef __HAVE_PIR /* * Look for a PCI BIOS interrupt routing table as that will be * our method of routing interrupts if we have one. */ if (pci_pir_probe(bus, 0)) { pir = BUS_ADD_CHILD(device_get_parent(dev), 0, "pir", 0); if (pir != NULL) device_probe_and_attach(pir); } #endif - device_add_child(dev, "pci", bus); + device_add_child(dev, "pci", -1); return bus_generic_attach(dev); } int legacy_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return 0; case PCIB_IVAR_BUS: *result = legacy_get_pcibus(dev); return 0; } return ENOENT; } int legacy_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case PCIB_IVAR_DOMAIN: return EINVAL; case PCIB_IVAR_BUS: legacy_set_pcibus(dev, value); return 0; } return ENOENT; } /* * Helper routine for x86 Host-PCI bridge driver resource allocation. * This is used to adjust the start address of wildcard allocation * requests to avoid low addresses that are known to be problematic. * * If no memory preference is given, use upper 32MB slot most BIOSes * use for their memory window. This is typically only used on older * laptops that don't have PCI busses behind a PCI bridge, so assuming * > 32MB is likely OK. * * However, this can cause problems for other chipsets, so we make * this tunable by hw.pci.host_mem_start. */ SYSCTL_DECL(_hw_pci); static unsigned long host_mem_start = 0x80000000; SYSCTL_ULONG(_hw_pci, OID_AUTO, host_mem_start, CTLFLAG_RDTUN, &host_mem_start, 0, "Limit the host bridge memory to being above this address."); u_long hostb_alloc_start(int type, u_long start, u_long end, u_long count) { if (start + count - 1 != end) { if (type == SYS_RES_MEMORY && start < host_mem_start) start = host_mem_start; if (type == SYS_RES_IOPORT && start < 0x1000) start = 0x1000; } return (start); } struct resource * legacy_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif start = hostb_alloc_start(type, start, end, count); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) int legacy_pcib_adjust_resource(device_t dev, device_t child, int type, struct resource *r, u_long start, u_long end) { if (type == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); return (bus_generic_adjust_resource(dev, child, type, r, start, end)); } int legacy_pcib_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (type == PCI_RES_BUS) return (pci_domain_release_bus(0, child, rid, r)); return (bus_generic_release_resource(dev, child, type, rid, r)); } #endif static device_method_t legacy_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_identify, legacy_pcib_identify), DEVMETHOD(device_probe, legacy_pcib_probe), DEVMETHOD(device_attach, legacy_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), #else DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), #endif DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD_END }; static devclass_t hostb_devclass; DEFINE_CLASS_0(pcib, legacy_pcib_driver, legacy_pcib_methods, 1); DRIVER_MODULE(pcib, legacy, legacy_pcib_driver, hostb_devclass, 0, 0); /* * Install placeholder to claim the resources owned by the * PCI bus interface. This could be used to extract the * config space registers in the extreme case where the PnP * ID is available and the PCI BIOS isn't, but for now we just * eat the PnP ID and do nothing else. * * XXX we should silence this probe, as it will generally confuse * people. */ static struct isa_pnp_id pcibus_pnp_ids[] = { { 0x030ad041 /* PNP0A03 */, "PCI Bus" }, { 0x080ad041 /* PNP0A08 */, "PCIe Bus" }, { 0 } }; static int pcibus_pnp_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, pcibus_pnp_ids)) <= 0) device_quiet(dev); return(result); } static int pcibus_pnp_attach(device_t dev) { return(0); } static device_method_t pcibus_pnp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibus_pnp_probe), DEVMETHOD(device_attach, pcibus_pnp_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static devclass_t pcibus_pnp_devclass; DEFINE_CLASS_0(pcibus_pnp, pcibus_pnp_driver, pcibus_pnp_methods, 1); DRIVER_MODULE(pcibus_pnp, isa, pcibus_pnp_driver, pcibus_pnp_devclass, 0, 0); #ifdef __HAVE_PIR /* * Provide a PCI-PCI bridge driver for PCI busses behind PCI-PCI bridges * that appear in the PCIBIOS Interrupt Routing Table to use the routing * table for interrupt routing when possible. */ static int pcibios_pcib_probe(device_t bus); static device_method_t pcibios_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcibios_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, legacy_pcib_route_interrupt), {0, 0} }; static devclass_t pcib_devclass; DEFINE_CLASS_1(pcib, pcibios_pcib_driver, pcibios_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(pcibios_pcib, pci, pcibios_pcib_driver, pcib_devclass, 0, 0); static int pcibios_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (!pci_pir_probe(bus, 1)) return (ENXIO); device_set_desc(dev, "PCIBIOS PCI-PCI bridge"); return (-2000); } #endif Index: head/sys/x86/pci/qpi.c =================================================================== --- head/sys/x86/pci/qpi.c (revision 287881) +++ head/sys/x86/pci/qpi.c (revision 287882) @@ -1,304 +1,304 @@ /*- * Copyright (c) 2010 Hudson River Trading LLC * Written by: John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This driver provides a psuedo-bus to enumerate the PCI buses * present on a sytem using a QPI chipset. It creates a qpi0 bus that * is a child of nexus0 and then creates two Host-PCI bridges as a * child of that. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" struct qpi_device { int qd_pcibus; }; static MALLOC_DEFINE(M_QPI, "qpidrv", "qpi system device"); static void qpi_identify(driver_t *driver, device_t parent) { /* Check CPUID to ensure this is an i7 CPU of some sort. */ if (!(cpu_vendor_id == CPU_VENDOR_INTEL && CPUID_TO_FAMILY(cpu_id) == 0x6 && (CPUID_TO_MODEL(cpu_id) == 0x1a || CPUID_TO_MODEL(cpu_id) == 0x2c))) return; /* PCI config register access is required. */ if (pci_cfgregopen() == 0) return; /* Add a qpi bus device. */ if (BUS_ADD_CHILD(parent, 20, "qpi", -1) == NULL) panic("Failed to add qpi bus"); } static int qpi_probe(device_t dev) { device_set_desc(dev, "QPI system bus"); return (BUS_PROBE_SPECIFIC); } /* * Look for a PCI bus with the specified bus address. If one is found, * add a pcib device and return 0. Otherwise, return an error code. */ static int qpi_probe_pcib(device_t dev, int bus) { struct qpi_device *qdev; device_t child; uint32_t devid; /* * If a PCI bus already exists for this bus number, then * fail. */ if (pci_find_bsf(bus, 0, 0) != NULL) return (EEXIST); /* * Attempt to read the device id for device 0, function 0 on * the bus. A value of 0xffffffff means that the bus is not * present. */ devid = pci_cfgregread(bus, 0, 0, PCIR_DEVVENDOR, 4); if (devid == 0xffffffff) return (ENOENT); if ((devid & 0xffff) != 0x8086) { device_printf(dev, "Device at pci%d.0.0 has non-Intel vendor 0x%x\n", bus, devid & 0xffff); return (ENXIO); } child = BUS_ADD_CHILD(dev, 0, "pcib", -1); if (child == NULL) panic("%s: failed to add pci bus %d", device_get_nameunit(dev), bus); qdev = malloc(sizeof(struct qpi_device), M_QPI, M_WAITOK); qdev->qd_pcibus = bus; device_set_ivars(child, qdev); return (0); } static int qpi_attach(device_t dev) { int bus; /* * Each processor socket has a dedicated PCI bus counting down from * 255. We keep probing buses until one fails. */ for (bus = 255;; bus--) if (qpi_probe_pcib(dev, bus) != 0) break; return (bus_generic_attach(dev)); } static int qpi_print_child(device_t bus, device_t child) { struct qpi_device *qdev; int retval = 0; qdev = device_get_ivars(child); retval += bus_print_child_header(bus, child); if (qdev->qd_pcibus != -1) retval += printf(" pcibus %d", qdev->qd_pcibus); retval += bus_print_child_footer(bus, child); return (retval); } static int qpi_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct qpi_device *qdev; qdev = device_get_ivars(child); switch (which) { case PCIB_IVAR_BUS: *result = qdev->qd_pcibus; break; default: return (ENOENT); } return (0); } static device_method_t qpi_methods[] = { /* Device interface */ DEVMETHOD(device_identify, qpi_identify), DEVMETHOD(device_probe, qpi_probe), DEVMETHOD(device_attach, qpi_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, qpi_print_child), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, qpi_read_ivar), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static devclass_t qpi_devclass; DEFINE_CLASS_0(qpi, qpi_driver, qpi_methods, 0); DRIVER_MODULE(qpi, nexus, qpi_driver, qpi_devclass, 0, 0); static int qpi_pcib_probe(device_t dev) { device_set_desc(dev, "QPI Host-PCI bridge"); return (BUS_PROBE_SPECIFIC); } static int qpi_pcib_attach(device_t dev) { - device_add_child(dev, "pci", pcib_get_bus(dev)); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int qpi_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = pcib_get_bus(dev); return (0); default: return (ENOENT); } } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static struct resource * qpi_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } #endif static int qpi_pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { device_t bus; bus = device_get_parent(pcib); return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); } static device_method_t qpi_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qpi_pcib_probe), DEVMETHOD(device_attach, qpi_pcib_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, qpi_pcib_read_ivar), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_alloc_resource, qpi_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, legacy_pcib_adjust_resource), DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), #else DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), #endif DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, qpi_pcib_map_msi), DEVMETHOD_END }; static devclass_t qpi_pcib_devclass; DEFINE_CLASS_0(pcib, qpi_pcib_driver, qpi_pcib_methods, 0); DRIVER_MODULE(pcib, qpi, qpi_pcib_driver, qpi_pcib_devclass, 0, 0); Index: head/sys/x86/x86/mptable_pci.c =================================================================== --- head/sys/x86/x86/mptable_pci.c (revision 287881) +++ head/sys/x86/x86/mptable_pci.c (revision 287882) @@ -1,240 +1,240 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Host to PCI and PCI to PCI bridge drivers that use the MP Table to route * interrupts from PCI devices to I/O APICs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" /* Host to PCI bridge driver. */ static int mptable_hostb_probe(device_t dev) { if (pci_cfgregopen() == 0) return (ENXIO); if (mptable_pci_probe_table(pcib_get_bus(dev)) != 0) return (ENXIO); device_set_desc(dev, "MPTable Host-PCI bridge"); return (0); } static int mptable_hostb_attach(device_t dev) { #ifdef NEW_PCIB mptable_pci_host_res_init(dev); #endif - device_add_child(dev, "pci", pcib_get_bus(dev)); + device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } #ifdef NEW_PCIB static int mptable_is_isa_range(u_long start, u_long end) { if (end >= 0x10000) return (0); if ((start & 0xfc00) != (end & 0xfc00)) return (0); start &= ~0xfc00; end &= ~0xfc00; return (start >= 0x100 && end <= 0x3ff); } static int mptable_is_vga_range(u_long start, u_long end) { if (end >= 0x10000) return (0); if ((start & 0xfc00) != (end & 0xfc00)) return (0); start &= ~0xfc00; end &= ~0xfc00; return (pci_is_vga_ioport_range(start, end)); } static struct resource * mptable_hostb_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct mptable_hostb_softc *sc; #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_alloc_bus(0, child, rid, start, end, count, flags)); #endif sc = device_get_softc(dev); if (type == SYS_RES_IOPORT && start + count - 1 == end) { if (mptable_is_isa_range(start, end)) { switch (sc->sc_decodes_isa_io) { case -1: return (NULL); case 1: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); default: break; } } if (mptable_is_vga_range(start, end)) { switch (sc->sc_decodes_vga_io) { case -1: return (NULL); case 1: return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); default: break; } } } start = hostb_alloc_start(type, start, end, count); return (pcib_host_res_alloc(&sc->sc_host_res, child, type, rid, start, end, count, flags)); } static int mptable_hostb_adjust_resource(device_t dev, device_t child, int type, struct resource *r, u_long start, u_long end) { struct mptable_hostb_softc *sc; #ifdef PCI_RES_BUS if (type == PCI_RES_BUS) return (pci_domain_adjust_bus(0, child, r, start, end)); #endif sc = device_get_softc(dev); return (pcib_host_res_adjust(&sc->sc_host_res, child, type, r, start, end)); } #endif static device_method_t mptable_hostb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mptable_hostb_probe), DEVMETHOD(device_attach, mptable_hostb_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, legacy_pcib_read_ivar), DEVMETHOD(bus_write_ivar, legacy_pcib_write_ivar), #ifdef NEW_PCIB DEVMETHOD(bus_alloc_resource, mptable_hostb_alloc_resource), DEVMETHOD(bus_adjust_resource, mptable_hostb_adjust_resource), #else DEVMETHOD(bus_alloc_resource, legacy_pcib_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), #endif #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_release_resource, legacy_pcib_release_resource), #else DEVMETHOD(bus_release_resource, bus_generic_release_resource), #endif DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, legacy_pcib_maxslots), DEVMETHOD(pcib_read_config, legacy_pcib_read_config), DEVMETHOD(pcib_write_config, legacy_pcib_write_config), DEVMETHOD(pcib_route_interrupt, mptable_pci_route_interrupt), DEVMETHOD(pcib_alloc_msi, legacy_pcib_alloc_msi), DEVMETHOD(pcib_release_msi, pcib_release_msi), DEVMETHOD(pcib_alloc_msix, legacy_pcib_alloc_msix), DEVMETHOD(pcib_release_msix, pcib_release_msix), DEVMETHOD(pcib_map_msi, legacy_pcib_map_msi), DEVMETHOD_END }; static devclass_t hostb_devclass; DEFINE_CLASS_0(pcib, mptable_hostb_driver, mptable_hostb_methods, sizeof(struct mptable_hostb_softc)); DRIVER_MODULE(mptable_pcib, legacy, mptable_hostb_driver, hostb_devclass, 0, 0); /* PCI to PCI bridge driver. */ static int mptable_pcib_probe(device_t dev) { int bus; if ((pci_get_class(dev) != PCIC_BRIDGE) || (pci_get_subclass(dev) != PCIS_BRIDGE_PCI)) return (ENXIO); bus = pci_read_config(dev, PCIR_SECBUS_1, 1); if (bus == 0) return (ENXIO); if (mptable_pci_probe_table(bus) != 0) return (ENXIO); device_set_desc(dev, "MPTable PCI-PCI bridge"); return (-1000); } static device_method_t mptable_pcib_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mptable_pcib_probe), /* pcib interface */ DEVMETHOD(pcib_route_interrupt, mptable_pci_route_interrupt), {0, 0} }; static devclass_t pcib_devclass; DEFINE_CLASS_1(pcib, mptable_pcib_driver, mptable_pcib_pci_methods, sizeof(struct pcib_softc), pcib_driver); DRIVER_MODULE(mptable_pcib, pci, mptable_pcib_driver, pcib_devclass, 0, 0);