Index: stable/4/sys/dev/aic7xxx/ahc_eisa.c =================================================================== --- stable/4/sys/dev/aic7xxx/ahc_eisa.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/ahc_eisa.c (revision 125849) @@ -1,214 +1,215 @@ /* * FreeBSD, EISA product support functions * * * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahc_eisa.c#11 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahc_eisa.c#13 $ */ +#include +__FBSDID("$FreeBSD$"); + #include #include static int aic7770_probe(device_t dev) { struct aic7770_identity *entry; struct resource *regs; uint32_t iobase; bus_space_handle_t bsh; bus_space_tag_t tag; u_int irq; u_int intdef; u_int hcntrl; int shared; int rid; int error; entry = aic7770_find_device(eisa_get_id(dev)); if (entry == NULL) return (ENXIO); device_set_desc(dev, entry->name); iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) + AHC_EISA_SLOT_OFFSET; eisa_add_iospace(dev, iobase, AHC_EISA_IOSIZE, RESVADDR_NONE); rid = 0; regs = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); if (regs == NULL) { device_printf(dev, "Unable to map I/O space?!\n"); return ENOMEM; } tag = rman_get_bustag(regs); bsh = rman_get_bushandle(regs); error = 0; /* Pause the card preseving the IRQ type */ hcntrl = bus_space_read_1(tag, bsh, HCNTRL) & IRQMS; bus_space_write_1(tag, bsh, HCNTRL, hcntrl | PAUSE); while ((bus_space_read_1(tag, bsh, HCNTRL) & PAUSE) == 0) ; /* Make sure we have a valid interrupt vector */ intdef = bus_space_read_1(tag, bsh, INTDEF); shared = (intdef & EDGE_TRIG) ? EISA_TRIGGER_EDGE : EISA_TRIGGER_LEVEL; irq = intdef & VECTOR; switch (irq) { case 9: case 10: case 11: case 12: case 14: case 15: break; default: printf("aic7770 at slot %d: illegal irq setting %d\n", eisa_get_slot(dev), intdef); error = ENXIO; } if (error == 0) eisa_add_intr(dev, irq, shared); bus_release_resource(dev, SYS_RES_IOPORT, rid, regs); return (error); } static int aic7770_attach(device_t dev) { struct aic7770_identity *entry; struct ahc_softc *ahc; char *name; int error; entry = aic7770_find_device(eisa_get_id(dev)); if (entry == NULL) return (ENXIO); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ name = malloc(strlen(device_get_nameunit(dev)) + 1, M_DEVBUF, M_NOWAIT); if (name == NULL) return (ENOMEM); strcpy(name, device_get_nameunit(dev)); ahc = ahc_alloc(dev, name); if (ahc == NULL) return (ENOMEM); ahc_set_unit(ahc, device_get_unit(dev)); /* Allocate a dmatag for our SCB DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ - error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, + error = aic_dma_tag_create(ahc, /*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/0, &ahc->parent_dmat); if (error != 0) { printf("ahc_eisa_attach: Could not allocate DMA tag " "- error %d\n", error); ahc_free(ahc); return (ENOMEM); } ahc->dev_softc = dev; error = aic7770_config(ahc, entry, /*unused ioport arg*/0); if (error != 0) { ahc_free(ahc); return (error); } ahc_attach(ahc); return (0); } int aic7770_map_registers(struct ahc_softc *ahc, u_int unused_ioport_arg) { struct resource *regs; int rid; rid = 0; regs = bus_alloc_resource(ahc->dev_softc, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); if (regs == NULL) { device_printf(ahc->dev_softc, "Unable to map I/O space?!\n"); return ENOMEM; } ahc->platform_data->regs_res_type = SYS_RES_IOPORT; ahc->platform_data->regs_res_id = rid, ahc->platform_data->regs = regs; ahc->tag = rman_get_bustag(regs); ahc->bsh = rman_get_bushandle(regs); return (0); } int aic7770_map_int(struct ahc_softc *ahc, int irq) { int zero; zero = 0; ahc->platform_data->irq = bus_alloc_resource(ahc->dev_softc, SYS_RES_IRQ, &zero, 0, ~0, 1, RF_ACTIVE); if (ahc->platform_data->irq == NULL) return (ENOMEM); ahc->platform_data->irq_res_type = SYS_RES_IRQ; return (ahc_map_int(ahc)); } static device_method_t ahc_eisa_device_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aic7770_probe), DEVMETHOD(device_attach, aic7770_attach), DEVMETHOD(device_detach, ahc_detach), { 0, 0 } }; static driver_t ahc_eisa_driver = { "ahc", ahc_eisa_device_methods, sizeof(struct ahc_softc) }; DRIVER_MODULE(ahc_eisa, eisa, ahc_eisa_driver, ahc_devclass, 0, 0); MODULE_DEPEND(ahc_eisa, ahc, 1, 1, 1); MODULE_VERSION(ahc_eisa, 1); Index: stable/4/sys/dev/aic7xxx/ahc_pci.c =================================================================== --- stable/4/sys/dev/aic7xxx/ahc_pci.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/ahc_pci.c (revision 125849) @@ -1,281 +1,258 @@ /* * FreeBSD, PCI product support functions * * Copyright (c) 1995-2001 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahc_pci.c#13 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahc_pci.c#19 $ */ +#include +__FBSDID("$FreeBSD$"); + #include -#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */ -#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ - static int ahc_pci_probe(device_t dev); static int ahc_pci_attach(device_t dev); static device_method_t ahc_pci_device_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ahc_pci_probe), DEVMETHOD(device_attach, ahc_pci_attach), DEVMETHOD(device_detach, ahc_detach), { 0, 0 } }; static driver_t ahc_pci_driver = { "ahc", ahc_pci_device_methods, sizeof(struct ahc_softc) }; DRIVER_MODULE(ahc_pci, pci, ahc_pci_driver, ahc_devclass, 0, 0); DRIVER_MODULE(ahc_pci, cardbus, ahc_pci_driver, ahc_devclass, 0, 0); MODULE_DEPEND(ahc_pci, ahc, 1, 1, 1); MODULE_VERSION(ahc_pci, 1); static int ahc_pci_probe(device_t dev) { struct ahc_pci_identity *entry; entry = ahc_find_pci_device(dev); if (entry != NULL) { device_set_desc(dev, entry->name); return (0); } return (ENXIO); } static int ahc_pci_attach(device_t dev) { struct ahc_pci_identity *entry; struct ahc_softc *ahc; char *name; int error; entry = ahc_find_pci_device(dev); if (entry == NULL) return (ENXIO); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ name = malloc(strlen(device_get_nameunit(dev)) + 1, M_DEVBUF, M_NOWAIT); if (name == NULL) return (ENOMEM); strcpy(name, device_get_nameunit(dev)); ahc = ahc_alloc(dev, name); if (ahc == NULL) return (ENOMEM); ahc_set_unit(ahc, device_get_unit(dev)); /* * Should we bother disabling 39Bit addressing * based on installed memory? */ if (sizeof(bus_addr_t) > 4) ahc->flags |= AHC_39BIT_ADDRESSING; /* Allocate a dmatag for our SCB DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ - error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, + error = aic_dma_tag_create(ahc, /*parent*/NULL, /*alignment*/1, /*boundary*/0, (ahc->flags & AHC_39BIT_ADDRESSING) - ? 0x7FFFFFFFFF + ? 0x7FFFFFFFFFLL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/0, &ahc->parent_dmat); if (error != 0) { printf("ahc_pci_attach: Could not allocate DMA tag " "- error %d\n", error); ahc_free(ahc); return (ENOMEM); } ahc->dev_softc = dev; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); return (error); } ahc_attach(ahc); return (0); } int ahc_pci_map_registers(struct ahc_softc *ahc) { struct resource *regs; u_int command; int regs_type; int regs_id; int allow_memio; - command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); + command = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); regs = NULL; regs_type = 0; regs_id = 0; /* Retrieve the per-device 'allow_memio' hint */ if (resource_int_value(device_get_name(ahc->dev_softc), device_get_unit(ahc->dev_softc), "allow_memio", &allow_memio) != 0) { if (bootverbose) device_printf(ahc->dev_softc, "Defaulting to MEMIO "); #ifdef AHC_ALLOW_MEMIO if (bootverbose) printf("on\n"); allow_memio = 1; #else if (bootverbose) printf("off\n"); allow_memio = 0; #endif } if ((allow_memio != 0) && (command & PCIM_CMD_MEMEN) != 0) { regs_type = SYS_RES_MEMORY; regs_id = AHC_PCI_MEMADDR; regs = bus_alloc_resource(ahc->dev_softc, regs_type, ®s_id, 0, ~0, 1, RF_ACTIVE); if (regs != NULL) { ahc->tag = rman_get_bustag(regs); ahc->bsh = rman_get_bushandle(regs); /* * Do a quick test to see if memory mapped * I/O is functioning correctly. */ if (ahc_pci_test_register_access(ahc) != 0) { device_printf(ahc->dev_softc, "PCI Device %d:%d:%d failed memory " "mapped test. Using PIO.\n", - ahc_get_pci_bus(ahc->dev_softc), - ahc_get_pci_slot(ahc->dev_softc), - ahc_get_pci_function(ahc->dev_softc)); + aic_get_pci_bus(ahc->dev_softc), + aic_get_pci_slot(ahc->dev_softc), + aic_get_pci_function(ahc->dev_softc)); bus_release_resource(ahc->dev_softc, regs_type, regs_id, regs); regs = NULL; } else { command &= ~PCIM_CMD_PORTEN; - ahc_pci_write_config(ahc->dev_softc, + aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/1); } } } if (regs == NULL && (command & PCIM_CMD_PORTEN) != 0) { regs_type = SYS_RES_IOPORT; regs_id = AHC_PCI_IOADDR; regs = bus_alloc_resource(ahc->dev_softc, regs_type, ®s_id, 0, ~0, 1, RF_ACTIVE); if (regs != NULL) { ahc->tag = rman_get_bustag(regs); ahc->bsh = rman_get_bushandle(regs); - command &= ~PCIM_CMD_MEMEN; - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, - command, /*bytes*/1); + if (ahc_pci_test_register_access(ahc) != 0) { + device_printf(ahc->dev_softc, + "PCI Device %d:%d:%d failed I/O " + "mapped test.\n", + aic_get_pci_bus(ahc->dev_softc), + aic_get_pci_slot(ahc->dev_softc), + aic_get_pci_function(ahc->dev_softc)); + bus_release_resource(ahc->dev_softc, regs_type, + regs_id, regs); + regs = NULL; + } else { + command &= ~PCIM_CMD_MEMEN; + aic_pci_write_config(ahc->dev_softc, + PCIR_COMMAND, + command, /*bytes*/1); + } } } if (regs == NULL) { device_printf(ahc->dev_softc, "can't allocate register resources\n"); return (ENOMEM); } ahc->platform_data->regs_res_type = regs_type; ahc->platform_data->regs_res_id = regs_id; ahc->platform_data->regs = regs; return (0); } int ahc_pci_map_int(struct ahc_softc *ahc) { int zero; zero = 0; ahc->platform_data->irq = bus_alloc_resource(ahc->dev_softc, SYS_RES_IRQ, &zero, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (ahc->platform_data->irq == NULL) { device_printf(ahc->dev_softc, "bus_alloc_resource() failed to allocate IRQ\n"); return (ENOMEM); } ahc->platform_data->irq_res_type = SYS_RES_IRQ; return (ahc_map_int(ahc)); } -void -ahc_power_state_change(struct ahc_softc *ahc, ahc_power_state new_state) -{ - uint32_t cap; - u_int cap_offset; - - /* - * Traverse the capability list looking for - * the power management capability. - */ - cap = 0; - cap_offset = ahc_pci_read_config(ahc->dev_softc, - PCIR_CAP_PTR, /*bytes*/1); - while (cap_offset != 0) { - - cap = ahc_pci_read_config(ahc->dev_softc, - cap_offset, /*bytes*/4); - if ((cap & 0xFF) == 1 - && ((cap >> 16) & 0x3) > 0) { - uint32_t pm_control; - - pm_control = ahc_pci_read_config(ahc->dev_softc, - cap_offset + 4, - /*bytes*/2); - pm_control &= ~0x3; - pm_control |= new_state; - ahc_pci_write_config(ahc->dev_softc, - cap_offset + 4, - pm_control, /*bytes*/2); - break; - } - cap_offset = (cap >> 8) & 0xFF; - } -} Index: stable/4/sys/dev/aic7xxx/ahd_pci.c =================================================================== --- stable/4/sys/dev/aic7xxx/ahd_pci.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/ahd_pci.c (revision 125849) @@ -1,297 +1,259 @@ /* * FreeBSD, PCI product support functions * * Copyright (c) 1995-2001 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahd_pci.c#13 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/ahd_pci.c#17 $ */ +#include +__FBSDID("$FreeBSD$"); + #include -#define AHD_PCI_IOADDR0 PCIR_MAPS /* Primary I/O BAR */ -#define AHD_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ -#define AHD_PCI_IOADDR1 (PCIR_MAPS + 12)/* Secondary I/O BAR */ - static int ahd_pci_probe(device_t dev); static int ahd_pci_attach(device_t dev); static device_method_t ahd_pci_device_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ahd_pci_probe), DEVMETHOD(device_attach, ahd_pci_attach), DEVMETHOD(device_detach, ahd_detach), { 0, 0 } }; static driver_t ahd_pci_driver = { "ahd", ahd_pci_device_methods, sizeof(struct ahd_softc) }; static devclass_t ahd_devclass; DRIVER_MODULE(ahd, pci, ahd_pci_driver, ahd_devclass, 0, 0); DRIVER_MODULE(ahd, cardbus, ahd_pci_driver, ahd_devclass, 0, 0); MODULE_DEPEND(ahd_pci, ahd, 1, 1, 1); MODULE_VERSION(ahd_pci, 1); static int ahd_pci_probe(device_t dev) { struct ahd_pci_identity *entry; entry = ahd_find_pci_device(dev); if (entry != NULL) { device_set_desc(dev, entry->name); return (0); } return (ENXIO); } static int ahd_pci_attach(device_t dev) { struct ahd_pci_identity *entry; struct ahd_softc *ahd; char *name; int error; entry = ahd_find_pci_device(dev); if (entry == NULL) return (ENXIO); /* * Allocate a softc for this card and * set it up for attachment by our * common detect routine. */ name = malloc(strlen(device_get_nameunit(dev)) + 1, M_DEVBUF, M_NOWAIT); if (name == NULL) return (ENOMEM); strcpy(name, device_get_nameunit(dev)); ahd = ahd_alloc(dev, name); if (ahd == NULL) return (ENOMEM); ahd_set_unit(ahd, device_get_unit(dev)); /* * Should we bother disabling 39Bit addressing * based on installed memory? */ if (sizeof(bus_addr_t) > 4) ahd->flags |= AHD_39BIT_ADDRESSING; /* Allocate a dmatag for our SCB DMA maps */ /* XXX Should be a child of the PCI bus dma tag */ - error = bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, + error = aic_dma_tag_create(ahd, /*parent*/NULL, /*alignment*/1, /*boundary*/0, (ahd->flags & AHD_39BIT_ADDRESSING) ? 0x7FFFFFFFFF : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/0, &ahd->parent_dmat); if (error != 0) { printf("ahd_pci_attach: Could not allocate DMA tag " "- error %d\n", error); ahd_free(ahd); return (ENOMEM); } ahd->dev_softc = dev; error = ahd_pci_config(ahd, entry); if (error != 0) { ahd_free(ahd); return (error); } ahd_attach(ahd); return (0); } int ahd_pci_map_registers(struct ahd_softc *ahd) { struct resource *regs; struct resource *regs2; u_int command; int regs_type; int regs_id; int regs_id2; int allow_memio; - command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); + command = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/1); regs = NULL; regs2 = NULL; regs_type = 0; regs_id = 0; /* Retrieve the per-device 'allow_memio' hint */ if (resource_int_value(device_get_name(ahd->dev_softc), device_get_unit(ahd->dev_softc), "allow_memio", &allow_memio) != 0) { if (bootverbose) device_printf(ahd->dev_softc, "Defaulting to MEMIO on\n"); } if ((command & PCIM_CMD_MEMEN) != 0 && (ahd->bugs & AHD_PCIX_MMAPIO_BUG) == 0 && allow_memio != 0) { regs_type = SYS_RES_MEMORY; regs_id = AHD_PCI_MEMADDR; regs = bus_alloc_resource(ahd->dev_softc, regs_type, ®s_id, 0, ~0, 1, RF_ACTIVE); if (regs != NULL) { int error; ahd->tags[0] = rman_get_bustag(regs); ahd->bshs[0] = rman_get_bushandle(regs); ahd->tags[1] = ahd->tags[0]; error = bus_space_subregion(ahd->tags[0], ahd->bshs[0], /*offset*/0x100, /*size*/0x100, &ahd->bshs[1]); /* * Do a quick test to see if memory mapped * I/O is functioning correctly. */ if (error != 0 || ahd_pci_test_register_access(ahd) != 0) { device_printf(ahd->dev_softc, "PCI Device %d:%d:%d failed memory " "mapped test. Using PIO.\n", - ahd_get_pci_bus(ahd->dev_softc), - ahd_get_pci_slot(ahd->dev_softc), - ahd_get_pci_function(ahd->dev_softc)); + aic_get_pci_bus(ahd->dev_softc), + aic_get_pci_slot(ahd->dev_softc), + aic_get_pci_function(ahd->dev_softc)); bus_release_resource(ahd->dev_softc, regs_type, regs_id, regs); regs = NULL; } else { command &= ~PCIM_CMD_PORTEN; - ahd_pci_write_config(ahd->dev_softc, + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/1); } } } if (regs == NULL && (command & PCIM_CMD_PORTEN) != 0) { regs_type = SYS_RES_IOPORT; regs_id = AHD_PCI_IOADDR0; regs = bus_alloc_resource(ahd->dev_softc, regs_type, ®s_id, 0, ~0, 1, RF_ACTIVE); if (regs == NULL) { device_printf(ahd->dev_softc, "can't allocate register resources\n"); return (ENOMEM); } ahd->tags[0] = rman_get_bustag(regs); ahd->bshs[0] = rman_get_bushandle(regs); /* And now the second BAR */ regs_id2 = AHD_PCI_IOADDR1; regs2 = bus_alloc_resource(ahd->dev_softc, regs_type, ®s_id2, 0, ~0, 1, RF_ACTIVE); if (regs2 == NULL) { device_printf(ahd->dev_softc, "can't allocate register resources\n"); return (ENOMEM); } ahd->tags[1] = rman_get_bustag(regs2); ahd->bshs[1] = rman_get_bushandle(regs2); command &= ~PCIM_CMD_MEMEN; - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/1); ahd->platform_data->regs_res_type[1] = regs_type; ahd->platform_data->regs_res_id[1] = regs_id2; ahd->platform_data->regs[1] = regs2; } ahd->platform_data->regs_res_type[0] = regs_type; ahd->platform_data->regs_res_id[0] = regs_id; ahd->platform_data->regs[0] = regs; return (0); } int ahd_pci_map_int(struct ahd_softc *ahd) { int zero; zero = 0; ahd->platform_data->irq = bus_alloc_resource(ahd->dev_softc, SYS_RES_IRQ, &zero, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (ahd->platform_data->irq == NULL) return (ENOMEM); ahd->platform_data->irq_res_type = SYS_RES_IRQ; return (ahd_map_int(ahd)); -} - -void -ahd_power_state_change(struct ahd_softc *ahd, ahd_power_state new_state) -{ - uint32_t cap; - u_int cap_offset; - - /* - * Traverse the capability list looking for - * the power management capability. - */ - cap = 0; - cap_offset = ahd_pci_read_config(ahd->dev_softc, - PCIR_CAP_PTR, /*bytes*/1); - while (cap_offset != 0) { - - cap = ahd_pci_read_config(ahd->dev_softc, - cap_offset, /*bytes*/4); - if ((cap & 0xFF) == 1 - && ((cap >> 16) & 0x3) > 0) { - uint32_t pm_control; - - pm_control = ahd_pci_read_config(ahd->dev_softc, - cap_offset + 4, - /*bytes*/2); - pm_control &= ~0x3; - pm_control |= new_state; - ahd_pci_write_config(ahd->dev_softc, - cap_offset + 4, - pm_control, /*bytes*/2); - break; - } - cap_offset = (cap >> 8) & 0xFF; - } } Index: stable/4/sys/dev/aic7xxx/aic7770.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic7770.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7770.c (revision 125849) @@ -1,415 +1,415 @@ /* * Product specific probe and attach routines for: * 27/284X and aic7770 motherboard SCSI controllers * * Copyright (c) 1994-1998, 2000, 2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#32 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic7770.c#34 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #include #endif #define ID_AIC7770 0x04907770 #define ID_AHA_274x 0x04907771 #define ID_AHA_284xB 0x04907756 /* BIOS enabled */ #define ID_AHA_284x 0x04907757 /* BIOS disabled*/ #define ID_OLV_274x 0x04907782 /* Olivetti OEM */ #define ID_OLV_274xD 0x04907783 /* Olivetti OEM (Differential) */ static int aic7770_chip_init(struct ahc_softc *ahc); static int aic7770_suspend(struct ahc_softc *ahc); static int aic7770_resume(struct ahc_softc *ahc); static int aha2840_load_seeprom(struct ahc_softc *ahc); static ahc_device_setup_t ahc_aic7770_VL_setup; static ahc_device_setup_t ahc_aic7770_EISA_setup;; static ahc_device_setup_t ahc_aic7770_setup; struct aic7770_identity aic7770_ident_table[] = { { ID_AHA_274x, 0xFFFFFFFF, "Adaptec 274X SCSI adapter", ahc_aic7770_EISA_setup }, { ID_AHA_284xB, 0xFFFFFFFE, "Adaptec 284X SCSI adapter", ahc_aic7770_VL_setup }, { ID_AHA_284x, 0xFFFFFFFE, "Adaptec 284X SCSI adapter (BIOS Disabled)", ahc_aic7770_VL_setup }, { ID_OLV_274x, 0xFFFFFFFF, "Adaptec (Olivetti OEM) 274X SCSI adapter", ahc_aic7770_EISA_setup }, { ID_OLV_274xD, 0xFFFFFFFF, "Adaptec (Olivetti OEM) 274X Differential SCSI adapter", ahc_aic7770_EISA_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7770, 0xFFFFFFFF, "Adaptec aic7770 SCSI adapter", ahc_aic7770_EISA_setup } }; const int ahc_num_aic7770_devs = NUM_ELEMENTS(aic7770_ident_table); struct aic7770_identity * aic7770_find_device(uint32_t id) { struct aic7770_identity *entry; int i; for (i = 0; i < ahc_num_aic7770_devs; i++) { entry = &aic7770_ident_table[i]; if (entry->full_id == (id & entry->id_mask)) return (entry); } return (NULL); } int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) { u_long l; int error; int have_seeprom; u_int hostconf; u_int irq; u_int intdef; error = entry->setup(ahc); have_seeprom = 0; if (error != 0) return (error); error = aic7770_map_registers(ahc, io); if (error != 0) return (error); /* * Before we continue probing the card, ensure that * its interrupts are *disabled*. We don't want * a misstep to hang the machine in an interrupt * storm. */ ahc_intr_enable(ahc, FALSE); ahc->description = entry->name; error = ahc_softc_init(ahc); if (error != 0) return (error); ahc->bus_chip_init = aic7770_chip_init; ahc->bus_suspend = aic7770_suspend; ahc->bus_resume = aic7770_resume; error = ahc_reset(ahc, /*reinit*/FALSE); if (error != 0) return (error); /* Make sure we have a valid interrupt vector */ intdef = ahc_inb(ahc, INTDEF); irq = intdef & VECTOR; switch (irq) { case 9: case 10: case 11: case 12: case 14: case 15: break; default: - printf("aic7770_config: illegal irq setting %d\n", intdef); + printf("aic7770_config: invalid irq setting %d\n", intdef); return (ENXIO); } if ((intdef & EDGE_TRIG) != 0) ahc->flags |= AHC_EDGE_INTERRUPT; switch (ahc->chip & (AHC_EISA|AHC_VL)) { case AHC_EISA: { u_int biosctrl; u_int scsiconf; u_int scsiconf1; biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL); scsiconf = ahc_inb(ahc, SCSICONF); scsiconf1 = ahc_inb(ahc, SCSICONF + 1); /* Get the primary channel information */ if ((biosctrl & CHANNEL_B_PRIMARY) != 0) ahc->flags |= 1; if ((biosctrl & BIOSMODE) == BIOSDISABLED) { ahc->flags |= AHC_USEDEFAULTS; } else { if ((ahc->features & AHC_WIDE) != 0) { ahc->our_id = scsiconf1 & HWSCSIID; if (scsiconf & TERM_ENB) ahc->flags |= AHC_TERM_ENB_A; } else { ahc->our_id = scsiconf & HSCSIID; ahc->our_id_b = scsiconf1 & HSCSIID; if (scsiconf & TERM_ENB) ahc->flags |= AHC_TERM_ENB_A; if (scsiconf1 & TERM_ENB) ahc->flags |= AHC_TERM_ENB_B; } } if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS)) ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B; break; } case AHC_VL: { have_seeprom = aha2840_load_seeprom(ahc); break; } default: break; } if (have_seeprom == 0) { free(ahc->seep_config, M_DEVBUF); ahc->seep_config = NULL; } /* * Ensure autoflush is enabled */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); /* Setup the FIFO threshold and the bus off time */ hostconf = ahc_inb(ahc, HOSTCONF); ahc_outb(ahc, BUSSPD, hostconf & DFTHRSH); ahc_outb(ahc, BUSTIME, (hostconf << 2) & BOFF); ahc->bus_softc.aic7770_softc.busspd = hostconf & DFTHRSH; ahc->bus_softc.aic7770_softc.bustime = (hostconf << 2) & BOFF; /* * Generic aic7xxx initialization. */ error = ahc_init(ahc); if (error != 0) return (error); error = aic7770_map_int(ahc, irq); if (error != 0) return (error); ahc_list_lock(&l); /* * Link this softc in with all other ahc instances. */ ahc_softc_insert(ahc); /* * Enable the board's BUS drivers */ ahc_outb(ahc, BCTL, ENABLE); ahc_list_unlock(&l); return (0); } static int aic7770_chip_init(struct ahc_softc *ahc) { ahc_outb(ahc, BUSSPD, ahc->bus_softc.aic7770_softc.busspd); ahc_outb(ahc, BUSTIME, ahc->bus_softc.aic7770_softc.bustime); ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~AUTOFLUSHDIS); ahc_outb(ahc, BCTL, ENABLE); return (ahc_chip_init(ahc)); } static int aic7770_suspend(struct ahc_softc *ahc) { return (ahc_suspend(ahc)); } static int aic7770_resume(struct ahc_softc *ahc) { return (ahc_resume(ahc)); } /* * Read the 284x SEEPROM. */ static int aha2840_load_seeprom(struct ahc_softc *ahc) { struct seeprom_descriptor sd; struct seeprom_config *sc; int have_seeprom; uint8_t scsi_conf; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL_2840; sd.sd_status_offset = STATUS_2840; sd.sd_dataout_offset = STATUS_2840; sd.sd_chip = C46; sd.sd_MS = 0; sd.sd_RDY = EEPROM_TF; sd.sd_CS = CS_2840; sd.sd_CK = CK_2840; sd.sd_DO = DO_2840; sd.sd_DI = DI_2840; sc = ahc->seep_config; if (bootverbose) printf("%s: Reading SEEPROM...", ahc_name(ahc)); have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, /*start_addr*/0, sizeof(*sc)/2); if (have_seeprom) { if (ahc_verify_cksum(sc) == 0) { if(bootverbose) printf ("checksum error\n"); have_seeprom = 0; } else if (bootverbose) { printf("done.\n"); } } if (!have_seeprom) { if (bootverbose) printf("%s: No SEEPROM available\n", ahc_name(ahc)); ahc->flags |= AHC_USEDEFAULTS; } else { /* * Put the data we've collected down into SRAM * where ahc_init will find it. */ int i; int max_targ; uint16_t discenable; max_targ = (ahc->features & AHC_WIDE) != 0 ? 16 : 8; discenable = 0; for (i = 0; i < max_targ; i++){ uint8_t target_settings; target_settings = (sc->device_flags[i] & CFXFER) << 4; if (sc->device_flags[i] & CFSYNCH) target_settings |= SOFS; if (sc->device_flags[i] & CFWIDEB) target_settings |= WIDEXFER; if (sc->device_flags[i] & CFDISC) discenable |= (0x01 << i); ahc_outb(ahc, TARG_SCSIRATE + i, target_settings); } ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); ahc->our_id = sc->brtime_id & CFSCSIID; scsi_conf = (ahc->our_id & 0x7); if (sc->adapter_control & CFSPARITY) scsi_conf |= ENSPCHK; if (sc->adapter_control & CFRESETB) scsi_conf |= RESET_SCSI; if (sc->bios_control & CF284XEXTEND) ahc->flags |= AHC_EXTENDED_TRANS_A; /* Set SCSICONF info */ ahc_outb(ahc, SCSICONF, scsi_conf); if (sc->adapter_control & CF284XSTERM) ahc->flags |= AHC_TERM_ENB_A; } return (have_seeprom); } static int ahc_aic7770_VL_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7770_setup(ahc); ahc->chip |= AHC_VL; return (error); } static int ahc_aic7770_EISA_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7770_setup(ahc); ahc->chip |= AHC_EISA; return (error); } static int ahc_aic7770_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_AIC7770; ahc->features = AHC_AIC7770_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG; ahc->flags |= AHC_PAGESCBS; ahc->instruction_ram_size = 448; return (0); } Index: stable/4/sys/dev/aic7xxx/aic79xx.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx.c (revision 125849) @@ -1,9889 +1,10205 @@ /* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#199 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#238 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #include #endif /******************************** Globals *************************************/ struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq); +uint32_t ahd_attach_to_HostRAID_controllers = 1; /***************************** Lookup Tables **********************************/ char *ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; static const u_int num_chip_names = NUM_ELEMENTS(ahd_chip_names); /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; char *errmesg; }; static struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors); static struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #if AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static ahd_callback_t ahd_reset_poll; static ahd_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); +static void ahd_other_scb_timeout(struct ahd_softc *ahd, + struct scb *scb, + struct scb *other_scb); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif /******************************** Private Inlines *****************************/ static __inline void ahd_assert_atn(struct ahd_softc *ahd); static __inline int ahd_currently_packetized(struct ahd_softc *ahd); static __inline int ahd_set_active_fifo(struct ahd_softc *ahd); static __inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static __inline int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static __inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* - * Complete any SCBs that just finished being - * DMA'ed into the qoutfifo. + * Flush the good status FIFO for completed packetized commands. */ - ahd_run_qoutfifo(ahd); - - /* - * Flush the good status FIFO for compelted packetized commands. - */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; - scbid = (ahd_inb(ahd, GSFIFO+1) << 8) - | ahd_inb(ahd, GSFIFO); + scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; +rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); + if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* - * Clearing this transaction in this FIFO may - * cause a CFG4DATA for this same transaction - * to assert in the other FIFO. Make sure we - * loop one more time and check the other FIFO. + * Running this FIFO may cause a CFG4DATA for + * this same transaction to assert in the other + * FIFO or a new snapshot SAVEPTRS interrupt + * in this FIFO. Even running a FIFO may not + * clear the transaction if we are still waiting + * for data to drain to the host. We must loop + * until the transaction is not active in either + * FIFO just to be sure. Reset our loop counter + * so we will visit both FIFOs again before + * declaring this transaction finished. We + * also delay a bit so that status has a chance + * to change before we look at this FIFO again. */ - i = 0; + aic_delay(200); + goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list - * so that we Update our in-core copy of the + * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); - ahd_outw(ahd, SCB_TAG, SCB_GET_TAG(scb)); + ahd_outw(ahd, SCB_TAG, scbid); + ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); - ahd_outw(ahd, SCB_NEXT_COMPLETE, comp_head); - if (SCBID_IS_NULL(comp_head)) - ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, - SCB_GET_TAG(scb)); + if (SCBID_IS_NULL(comp_head)) { + ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); + } else { + u_int tail; + + tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); + ahd_set_scbptr(ahd, tail); + ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); + ahd_set_scbptr(ahd, scbid); + } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this if for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; - ahd_delay(200); + aic_delay(200); } - if ((ccscbctl & CCSCBDIR) != 0) + /* + * We leave the sequencer to cleanup in the case of DMA's to + * update the qoutfifo. In all other cases (DMA's to the + * chip or a push of an SCB from the COMPLETE_DMA_SCB list), + * we disable the DMA engine so that the sequencer will not + * attempt to handle the DMA completion. + */ + if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); + /* + * Complete any SCBs that just finished + * being DMA'ed into the qoutfifo. + */ + ahd_run_qoutfifo(ahd); + saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); + scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); + while (!SCBID_IS_NULL(scbid)) { + + ahd_set_scbptr(ahd, scbid); + next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + scb = ahd_lookup_scb(ahd, scbid); + if (scb == NULL) { + printf("%s: Warning - Complete Qfrz SCB %d invalid\n", + ahd_name(ahd), scbid); + continue; + } + + ahd_complete_scb(ahd, scb); + scbid = next_scbid; + } + ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); + scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; - while (1) { - seqintsrc = ahd_inb(ahd, SEQINTSRC); - if ((seqintsrc & CFG4DATA) != 0) { - uint32_t datacnt; - uint32_t sgptr; + seqintsrc = ahd_inb(ahd, SEQINTSRC); + if ((seqintsrc & CFG4DATA) != 0) { + uint32_t datacnt; + uint32_t sgptr; - /* - * Clear full residual flag. - */ - sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; - ahd_outb(ahd, SCB_SGPTR, sgptr); + /* + * Clear full residual flag. + */ + sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; + ahd_outb(ahd, SCB_SGPTR, sgptr); - /* - * Load datacnt and address. - */ - datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); - if ((datacnt & AHD_DMA_LAST_SEG) != 0) { - sgptr |= LAST_SEG; - ahd_outb(ahd, SG_STATE, 0); - } else - ahd_outb(ahd, SG_STATE, LOADING_NEEDED); - ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); - ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); - ahd_outb(ahd, SG_CACHE_PRE, sgptr); - ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); + /* + * Load datacnt and address. + */ + datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); + if ((datacnt & AHD_DMA_LAST_SEG) != 0) { + sgptr |= LAST_SEG; + ahd_outb(ahd, SG_STATE, 0); + } else + ahd_outb(ahd, SG_STATE, LOADING_NEEDED); + ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); + ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); + ahd_outb(ahd, SG_CACHE_PRE, sgptr); + ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); - /* - * Initialize Residual Fields. - */ - ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); - ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); + /* + * Initialize Residual Fields. + */ + ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); - /* - * Mark the SCB as having a FIFO in use. - */ - ahd_outb(ahd, SCB_FIFO_USE_COUNT, - ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); + /* + * Mark the SCB as having a FIFO in use. + */ + ahd_outb(ahd, SCB_FIFO_USE_COUNT, + ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); - /* - * Install a "fake" handler for this FIFO. - */ - ahd_outw(ahd, LONGJMP_ADDR, 0); + /* + * Install a "fake" handler for this FIFO. + */ + ahd_outw(ahd, LONGJMP_ADDR, 0); + /* + * Notify the hardware that we have satisfied + * this sequencer interrupt. + */ + ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); + } else if ((seqintsrc & SAVEPTRS) != 0) { + uint32_t sgptr; + uint32_t resid; + + if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* - * Notify the hardware that we have satisfied - * this sequencer interrupt. + * Snapshot Save Pointers. All that + * is necessary to clear the snapshot + * is a CLRCHN. */ - ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); - } else if ((seqintsrc & SAVEPTRS) != 0) { - uint32_t sgptr; - uint32_t resid; + goto clrchn; + } - if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { - /* - * Snapshot Save Pointers. Clear - * the snapshot and continue. - */ - ahd_outb(ahd, DFFSXFRCTL, CLRCHN); - continue; - } + /* + * Disable S/G fetch so the DMA engine + * is available to future users. + */ + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) + ahd_outb(ahd, CCSGCTL, 0); + ahd_outb(ahd, SG_STATE, 0); - /* - * Disable S/G fetch so the DMA engine - * is available to future users. - */ - if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) - ahd_outb(ahd, CCSGCTL, 0); - ahd_outb(ahd, SG_STATE, 0); + /* + * Flush the data FIFO. Strickly only + * necessary for Rev A parts. + */ + ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); + /* + * Calculate residual. + */ + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); + resid = ahd_inl(ahd, SHCNT); + resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; + ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); + if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* - * Flush the data FIFO. Strickly only - * necessary for Rev A parts. + * Must back up to the correct S/G element. + * Typically this just means resetting our + * low byte to the offset in the SG_CACHE, + * but if we wrapped, we have to correct + * the other bytes of the sgptr too. */ - ahd_outb(ahd, DFCNTRL, - ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); + if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 + && (sgptr & 0x80) == 0) + sgptr -= 0x100; + sgptr &= ~0xFF; + sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) + & SG_ADDR_MASK; + ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); + ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); + } else if ((resid & AHD_SG_LEN_MASK) == 0) { + ahd_outb(ahd, SCB_RESIDUAL_SGPTR, + sgptr | SG_LIST_NULL); + } + /* + * Save Pointers. + */ + ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); + ahd_outl(ahd, SCB_DATACNT, resid); + ahd_outl(ahd, SCB_SGPTR, sgptr); + ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); + ahd_outb(ahd, SEQIMODE, + ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); + /* + * If the data is to the SCSI bus, we are + * done, otherwise wait for FIFOEMP. + */ + if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) + goto clrchn; + } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { + uint32_t sgptr; + uint64_t data_addr; + uint32_t data_len; + u_int dfcntrl; - /* - * Calculate residual. - */ - sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); - resid = ahd_inl(ahd, SHCNT); - resid |= - ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; - ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); - if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { - /* - * Must back up to the correct S/G element. - * Typically this just means resetting our - * low byte to the offset in the SG_CACHE, - * but if we wrapped, we have to correct - * the other bytes of the sgptr too. - */ - if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 - && (sgptr & 0x80) == 0) - sgptr -= 0x100; - sgptr &= ~0xFF; - sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) - & SG_ADDR_MASK; - ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); - ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); - } else if ((resid & AHD_SG_LEN_MASK) == 0) { - ahd_outb(ahd, SCB_RESIDUAL_SGPTR, - sgptr | SG_LIST_NULL); - } - /* - * Save Pointers. - */ - ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); - ahd_outl(ahd, SCB_DATACNT, resid); - ahd_outl(ahd, SCB_SGPTR, sgptr); - ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); - ahd_outb(ahd, SEQIMODE, - ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); - /* - * If the data is to the SCSI bus, we are - * done, otherwise wait for FIFOEMP. - */ - if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) - break; - } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { - uint32_t sgptr; - uint64_t data_addr; - uint32_t data_len; - u_int dfcntrl; + /* + * Disable S/G fetch so the DMA engine + * is available to future users. We won't + * be using the DMA engine to load segments. + */ + if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { + ahd_outb(ahd, CCSGCTL, 0); + ahd_outb(ahd, SG_STATE, LOADING_NEEDED); + } - /* - * Disable S/G fetch so the DMA engine - * is available to future users. - */ - if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { - ahd_outb(ahd, CCSGCTL, 0); - ahd_outb(ahd, SG_STATE, LOADING_NEEDED); - } + /* + * Wait for the DMA engine to notice that the + * host transfer is enabled and that there is + * space in the S/G FIFO for new segments before + * loading more segments. + */ + if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 + && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* - * Wait for the DMA engine to notice that the - * host transfer is enabled and that there is - * space in the S/G FIFO for new segments before - * loading more segments. - */ - if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) == 0) - continue; - if ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) == 0) - continue; - - /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; - if ((ahd->features & AHD_NEW_DFCNTRL_OPTS)!=0) { + if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); - } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) - & LAST_SEG_DONE) != 0) { - - /* - * Transfer completed to the end of SG list - * and has flushed to the host. - */ - ahd_outb(ahd, SCB_SGPTR, - ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); - break; - } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { - break; } - ahd_delay(200); + } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { + + /* + * Transfer completed to the end of SG list + * and has flushed to the host. + */ + ahd_outb(ahd, SCB_SGPTR, + ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); + goto clrchn; + } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { +clrchn: + /* + * Clear any handler for this FIFO, decrement + * the FIFO use count for the SCB, and release + * the FIFO. + */ + ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); + ahd_outb(ahd, SCB_FIFO_USE_COUNT, + ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); + ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } - /* - * Clear any handler for this FIFO, decrement - * the FIFO use count for the SCB, and release - * the FIFO. - */ - ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); - ahd_outb(ahd, SCB_FIFO_USE_COUNT, - ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); - ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } +/* + * Look for entries in the QoutFIFO that have completed. + * The valid_tag completion field indicates the validity + * of the entry - the valid value toggles each time through + * the queue. We use the sg_status field in the completion + * entry to avoid referencing the hscb if the completion + * occurred with no errors and no residual. sg_status is + * a copy of the first byte (little endian) of the sgptr + * hscb field. + */ void ahd_run_qoutfifo(struct ahd_softc *ahd) { + struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); - while ((ahd->qoutfifo[ahd->qoutfifonext] - & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) { + for (;;) { + completion = &ahd->qoutfifo[ahd->qoutfifonext]; - scb_index = ahd_le16toh(ahd->qoutfifo[ahd->qoutfifonext] - & ~QOUTFIFO_ENTRY_VALID_LE); + if (completion->valid_tag != ahd->qoutfifonext_valid_tag) + break; + + scb_index = aic_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printf("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); ahd_dump_card_state(ahd); - } else - ahd_complete_scb(ahd, scb); + } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { + ahd_handle_scb_status(ahd, scb); + } else { + ahd_done(ahd, scb); + } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) - ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID_LE; + ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) printf("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { - case BAD_SCB_STATUS: - { - struct scb *scb; - u_int scbid; - int cmds_pending; - - scbid = ahd_get_scbptr(ahd); - scb = ahd_lookup_scb(ahd, scbid); - if (scb != NULL) { - ahd_complete_scb(ahd, scb); - } else { - printf("%s: WARNING no command for scb %d " - "(bad status)\n", ahd_name(ahd), scbid); - ahd_dump_card_state(ahd); - } - cmds_pending = ahd_inw(ahd, CMDS_PENDING); - if (cmds_pending > 0) - ahd_outw(ahd, CMDS_PENDING, cmds_pending - 1); - break; - } case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printf("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printf("%s: Invalid Sequencer interrupt occurred.\n", ahd_name(ahd)); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printf("%s: ", ahd_name(ahd)); printf("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printf("CFG4ISTAT: Free SCB %d referenced", scbid); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printf("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printf("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); - scb->flags &= ~(SCB_PACKETIZED); + scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT; ahd_freeze_devq(ahd, scb); - ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); - ahd_freeze_scb(scb); + aic_set_transaction_status(scb, CAM_REQUEUE_REQ); + aic_freeze_scb(scb); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printf("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transfered so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printf("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #if AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printf("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printf("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printf("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", - ahd_get_transfer_length(scb), scb->sg_count); + aic_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); - ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); - ahd_freeze_scb(scb); + aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); + aic_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printf("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printf("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printf("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printf("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printf("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printf("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printf("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } } else if ((status & SELTO) != 0) { u_int scbid; /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a sucessfull * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printf("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif /* * Force a renegotiation with this target just in * case the cable was pulled and will later be * re-attached. The target may forget its negotiation * settings with us should it attempt to reselect * during the interruption. The target will not issue * a unit attention in this case, so we must always * renegotiate. */ ahd_scb_devinfo(ahd, &devinfo, scb); ahd_force_renegotiation(ahd, &devinfo); - ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); + aic_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { u_int scbid; struct scb *scb; mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized - && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) + && ahd_inb(ahd, LASTPHASE) == P_BUSFREE + && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 + || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) + /* + * Assume packetized if we are not + * on the bus in a non-packetized + * capacity and any pending selection + * was a packetized selection. + */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printf("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int lqistat2; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); lqistat2 = ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printf("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = MSG_INITIATOR_DET_ERR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printf("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printf("\n"); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printf("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printf("LQIRetry for LQICRCI_LQ to release ACK\n"); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printf("LQICRC_NLQ\n"); if (scb == NULL) { printf("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printf("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printf("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printf("LQIRETRY for LQIPHASE_LQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printf("LQIRETRY for LQIPHASE_NLQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printf("Reseting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; if ((busfreetime & BUSFREE_LQO) == 0) printf("%s: Warning, BUSFREE time is 0x%x. " "Expected BUSFREE_LQO.\n", ahd_name(ahd), busfreetime); /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printf("Probable outgoing LQ CRC error. " "Retrying command\n"); } scb->crc_retry_count++; } else { - ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); - ahd_freeze_scb(scb); + aic_set_transaction_status(scb, CAM_UNCOR_PARITY); + aic_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printf("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printf("Unexpected PKT busfree condition\n"); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printf("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printf("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == MSG_ABORT_TAG ? "" : " Tag"); if (sent_msg == MSG_ABORT_TAG) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printf("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahd_match_scb(ahd, scb, target, 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) - ahd_set_transaction_status(scb, CAM_REQ_CMP); + aic_set_transaction_status(scb, CAM_REQ_CMP); #endif ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_qinfifo_requeue_tail(ahd, scb); printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, MSG_INITIATOR_DET_ERR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MSG_MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); - ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); - ahd_freeze_scb(scb); + aic_set_transaction_status(scb, CAM_REQUEUE_REQ); + aic_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printf("%s: ", ahd_name(ahd)); } if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); printf("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, - ahd_inb(ahd, PRGMCNT) - | (ahd_inb(ahd, PRGMCNT+1) << 8)); + ahd_inw(ahd, PRGMCNT)); ahd_dump_card_state(ahd); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printf("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printf("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { - ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); + aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printf("No or incomplete CDB sent to device.\n"); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printf("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printf("Unknown protocol violation.\n"); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printf("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); - seqaddr = ahd_inb(ahd, CURADDR) - | (ahd_inb(ahd, CURADDR+1) << 8); + seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printf("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) - ahd_delay(200); + aic_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* * SCSIINT seems to glitch occassionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printf("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printf("%#02x", hscb->shared_data.idata.cdb[i]); printf(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", - (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), - (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), - ahd_le32toh(hscb->datacnt), - ahd_le32toh(hscb->sgptr), + (uint32_t)((aic_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), + (uint32_t)(aic_le64toh(hscb->dataptr) & 0xFFFFFFFF), + aic_le32toh(hscb->datacnt), + aic_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; uint32_t len; - addr = ahd_le64toh(sg_list[i].addr); - len = ahd_le32toh(sg_list[i].len); + addr = aic_le64toh(sg_list[i].addr); + len = aic_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; - len = ahd_le32toh(sg_list[i].len); + len = aic_le32toh(sg_list[i].len); printf("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, - ahd_le32toh(sg_list[i].addr), + aic_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; if (tstate != NULL) free(tstate, M_DEVBUF); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = MAX(*period, transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = MIN(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = MIN(*offset, tinfo->user.offset); else *offset = MIN(*offset, tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = MIN(tinfo->user.width, *bus_width); else *bus_width = MIN(tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient oportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { if (offset != 0) { int options; printf("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printf("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printf("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printf("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printf("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printf("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printf(")\n"); else printf("\n"); } else { printf("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printf("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { printf("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ void ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { ahd_platform_set_tags(ahd, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG, &alg); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80Mhz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 - && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0) { + && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 + && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be - * compatible with devices that can't - * handle a CRC at full speed. + * compatible with non-packetized + * U160 devices that can't handle a + * CRC at full speed. */ con_opts |= ENSLOWCRC; } + + if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { + /* + * On H2A4, revert to a slower slewrate + * on non-paced transfers. + */ + iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= + ~AHD_SLEWRATE_MASK; + } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * During packetized transfers, the target will * give us the oportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; u_int scb_tag; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ for (scb_tag = 0; scb_tag < ahd->scb_data.maxhscbs; scb_tag++) { struct hardware_scb *pending_hscb; u_int control; pending_scb = ahd_lookup_scb(ahd, scb_tag); if (pending_scb == NULL) continue; ahd_set_scbptr(ahd, scb_tag); pending_hscb = pending_scb->hscb; control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printf("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { struct ahd_phase_table_entry *entry; struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printf("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; } else { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; } ahd->msgout_len++; ahd_print_path(ahd, scb); printf("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printf("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printf("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR; ahd->msgout_buf[ahd->msgout_index++] = period; ahd->msgout_buf[ahd->msgout_index++] = offset; ahd->msgout_len += 5; if (bootverbose) { printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR; ahd->msgout_buf[ahd->msgout_index++] = bus_width; ahd->msgout_len += 4; if (bootverbose) { printf("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN; ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR; ahd->msgout_buf[ahd->msgout_index++] = period; ahd->msgout_buf[ahd->msgout_index++] = 0; ahd->msgout_buf[ahd->msgout_index++] = offset; ahd->msgout_buf[ahd->msgout_index++] = bus_width; ahd->msgout_buf[ahd->msgout_index++] = ppr_options; ahd->msgout_len += 8; if (bootverbose) { printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, MSG_NOOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printf("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printf("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printf("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case MSG_EXT_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printf("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printf("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printf("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printf("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case MSG_BUS_DEV_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == MSG_ABORT_TAG) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case MSG_QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printf("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; /* FALLTHROUGH */ case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printf("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printf("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printf("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; - ahd_set_transaction_tag(scb, /*enabled*/FALSE, + aic_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target * currently in our posession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printf("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 - || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { + || aic_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; - sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; - sglen = ahd_le32toh(sg->len); + sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); - data_addr = ahd_le64toh(sg->addr) + data_addr = aic_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; - sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + sglen = aic_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; - sglen = ahd_le32toh(sg->len); + sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); - data_addr = ahd_le32toh(sg->addr) + data_addr = aic_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) - ahd_delay(100); + aic_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ - sgptr = (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24) - | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16) - | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8) - | ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); + sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; - dataptr = ahd_le64toh(sg->addr) - + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + dataptr = aic_le64toh(sg->addr) + + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; - ahd_outb(ahd, HADDR + 7, dataptr >> 56); - ahd_outb(ahd, HADDR + 6, dataptr >> 48); - ahd_outb(ahd, HADDR + 5, dataptr >> 40); - ahd_outb(ahd, HADDR + 4, dataptr >> 32); + ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; - dataptr = ahd_le32toh(sg->addr) - + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) + dataptr = aic_le32toh(sg->addr) + + (aic_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, - (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); + (aic_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } - ahd_outb(ahd, HADDR + 3, dataptr >> 24); - ahd_outb(ahd, HADDR + 2, dataptr >> 16); - ahd_outb(ahd, HADDR + 1, dataptr >> 8); - ahd_outb(ahd, HADDR, dataptr); + ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); ahd_send_async(ahd, devinfo->channel, devinfo->target, lun, AC_SENT_BDR, NULL); if (message != NULL && (verbose_level <= bootverbose)) printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; #ifndef __FreeBSD__ ahd = malloc(sizeof(*ahd), M_DEVBUF, M_NOWAIT); if (!ahd) { printf("aic7xxx: cannot malloc softc!\n"); free(name, M_DEVBUF); return NULL; } #else ahd = device_get_softc((device_t)platform_arg); #endif memset(ahd, 0, sizeof(*ahd)); ahd->seep_config = malloc(sizeof(*ahd->seep_config), M_DEVBUF, M_NOWAIT); if (ahd->seep_config == NULL) { #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif free(name, M_DEVBUF); return (NULL); } LIST_INIT(&ahd->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; - ahd_timer_init(&ahd->reset_timer); - ahd_timer_init(&ahd->stat_timer); + aic_timer_init(&ahd->reset_timer); + aic_timer_init(&ahd->stat_timer); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printf("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_softc_insert(struct ahd_softc *ahd) { struct ahd_softc *list_ahd; -#if AHD_PCI_CONFIG > 0 +#if AIC_PCI_CONFIG > 0 /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahd->features & AHD_MULTI_FUNC) != 0) { TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { - ahd_dev_softc_t list_pci; - ahd_dev_softc_t pci; + aic_dev_softc_t list_pci; + aic_dev_softc_t pci; list_pci = list_ahd->dev_softc; pci = ahd->dev_softc; - if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci) - && ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) { + if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci) + && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) { struct ahd_softc *master; struct ahd_softc *slave; - if (ahd_get_pci_function(list_pci) == 0) { + if (aic_get_pci_function(list_pci) == 0) { master = list_ahd; slave = ahd; } else { master = ahd; slave = list_ahd; } slave->flags &= ~AHD_BIOS_ENABLED; slave->flags |= master->flags & AHD_BIOS_ENABLED; break; } } } #endif /* * Insertion sort into our list of softcs. */ list_ahd = TAILQ_FIRST(&ahd_tailq); while (list_ahd != NULL && ahd_softc_comp(ahd, list_ahd) <= 0) list_ahd = TAILQ_NEXT(list_ahd, links); if (list_ahd != NULL) TAILQ_INSERT_BEFORE(list_ahd, ahd, links); else TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links); ahd->init_level++; } /* * Verify that the passed in softc pointer is for a * controller that is still configured. */ struct ahd_softc * ahd_find_softc(struct ahd_softc *ahd) { struct ahd_softc *list_ahd; TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { if (list_ahd == ahd) return (ahd); } return (NULL); } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { if (ahd->name != NULL) free(ahd->name, M_DEVBUF); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; + ahd_terminate_recovery_thread(ahd); switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); - TAILQ_REMOVE(&ahd_tailq, ahd, links); /* FALLTHROUGH */ case 4: - ahd_dmamap_unload(ahd, ahd->shared_data_dmat, - ahd->shared_data_dmamap); + aic_dmamap_unload(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 3: - ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, - ahd->shared_data_dmamap); - ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, - ahd->shared_data_dmamap); + aic_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, + ahd->shared_data_map.dmamap); + aic_dmamap_destroy(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 2: - ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); + aic_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: #ifndef __linux__ - ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); + aic_dma_tag_destroy(ahd, ahd->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ - ahd_dma_tag_destroy(ahd, ahd->parent_dmat); + aic_dma_tag_destroy(ahd, ahd->parent_dmat); #endif ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #if AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_DEVBUF); } } #endif free(tstate, M_DEVBUF); } } #if AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); free(ahd->black_hole, M_DEVBUF); } #endif if (ahd->name != NULL) free(ahd->name, M_DEVBUF); if (ahd->seep_config != NULL) free(ahd->seep_config, M_DEVBUF); if (ahd->saved_stack != NULL) free(ahd->saved_stack, M_DEVBUF); #ifndef __FreeBSD__ free(ahd, M_DEVBUF); #endif return; } void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ - ahd_timer_stop(&ahd->reset_timer); - ahd_timer_stop(&ahd->stat_timer); + aic_timer_stop(&ahd->reset_timer); + aic_timer_stop(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occured after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); - cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disble * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { - ahd_delay(1000); + aic_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printf("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ - ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printf("%s: No SCB space found\n", ahd_name(ahd)); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ - if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ - if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, + if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printf("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ - if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ ahd_alloc_scbs(ahd); if (scb_data->numscbs == 0) { printf("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* * Note that we were successfull */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); - ahd_dmamap_unload(ahd, scb_data->sense_dmat, + aic_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); - ahd_dmamem_free(ahd, scb_data->sense_dmat, + aic_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); free(sns_map, M_DEVBUF); } - ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); + aic_dma_tag_destroy(ahd, scb_data->sense_dmat); /* FALLTHROUGH */ } case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); - ahd_dmamap_unload(ahd, scb_data->sg_dmat, + aic_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); - ahd_dmamem_free(ahd, scb_data->sg_dmat, + aic_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); free(sg_map, M_DEVBUF); } - ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); + aic_dma_tag_destroy(ahd, scb_data->sg_dmat); /* FALLTHROUGH */ } case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); - ahd_dmamap_unload(ahd, scb_data->hscb_dmat, + aic_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); - ahd_dmamem_free(ahd, scb_data->hscb_dmat, + aic_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); free(hscb_map, M_DEVBUF); } - ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); + aic_dma_tag_destroy(ahd, scb_data->hscb_dmat); /* FALLTHROUGH */ } case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); ahd_alloc_scbs(ahd); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } - ahd_platform_scb_free(ahd, scb); + aic_platform_scb_free(ahd, scb); } void ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; bus_addr_t hscb_busaddr; bus_addr_t sg_busaddr; bus_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return; if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; - hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); + hscb_busaddr = hscb_map->busaddr + (offset * sizeof(*hscb)); } else { hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_NOWAIT); if (hscb_map == NULL) return; /* Allocate the next batch of hardware SCBs */ - if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, + if (aic_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { free(hscb_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); - ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, + aic_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, - &hscb_map->physaddr, /*flags*/0); + &hscb_map->busaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; - hscb_busaddr = hscb_map->physaddr; + hscb_busaddr = hscb_map->busaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); + if (ahd->next_queued_hscb == NULL) { + /* + * We need one HSCB to serve as the "next HSCB". Since + * the tag identifier in this HSCB will never be used, + * there is no point in using a valid SCB from the + * free pool for it. So, we allocate this "sentinel" + * specially. + */ + ahd->next_queued_hscb = hscb; + ahd->next_queued_hscb_map = hscb_map; + memset(hscb, 0, sizeof(*hscb)); + hscb->hscb_busaddr = aic_htole32(hscb_busaddr); + hscb++; + hscb_busaddr += sizeof(*hscb); + scb_data->scbs_left--; + } } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; - sg_busaddr = sg_map->physaddr + offset; + sg_busaddr = sg_map->busaddr + offset; } else { sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate the next batch of S/G lists */ - if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, + if (aic_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); - ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, + aic_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), - ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); + ahd_dmamap_cb, &sg_map->busaddr, /*flags*/0); segs = sg_map->vaddr; - sg_busaddr = sg_map->physaddr; + sg_busaddr = sg_map->busaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; - sense_busaddr = sense_map->physaddr + offset; + sense_busaddr = sense_map->busaddr + offset; } else { sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_NOWAIT); if (sense_map == NULL) return; /* Allocate the next batch of sense buffers */ - if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, + if (aic_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { free(sense_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); - ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, + aic_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, - &sense_map->physaddr, /*flags*/0); + &sense_map->busaddr, /*flags*/0); sense_data = sense_map->vaddr; - sense_busaddr = sense_map->physaddr; + sense_busaddr = sense_map->busaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printf("Mapped sense data\n"); #endif } newcount = MIN(scb_data->sense_left, scb_data->scbs_left); newcount = MIN(newcount, scb_data->sgs_left); newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); scb_data->sense_left -= newcount; scb_data->scbs_left -= newcount; scb_data->sgs_left -= newcount; for (i = 0; i < newcount; i++) { - u_int col_tag; - struct scb_platform_data *pdata; + u_int col_tag; #ifndef __linux__ int error; #endif + next_scb = (struct scb *)malloc(sizeof(*next_scb), M_DEVBUF, M_NOWAIT); if (next_scb == NULL) break; pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), M_DEVBUF, M_NOWAIT); if (pdata == NULL) { free(next_scb, M_DEVBUF); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; - hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); + hscb->hscb_busaddr = aic_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ - error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, + error = aic_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) { free(next_scb, M_DEVBUF); free(pdata, M_DEVBUF); break; } #endif - next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); + next_scb->hscb->tag = aic_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; } } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { - uint8_t *base_vaddr; uint8_t *next_vaddr; bus_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t), M_DEVBUF, M_NOWAIT); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-agressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ - if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING ? (bus_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahd->buffer_dmat) != 0) { return (ENOMEM); } #endif ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ - driver_data_size = AHD_SCB_MAX * sizeof(uint16_t) - + sizeof(struct hardware_scb); + driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; - if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ - if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, - (void **)&base_vaddr, - BUS_DMA_NOWAIT, &ahd->shared_data_dmamap) != 0) { + if (aic_dmamem_alloc(ahd, ahd->shared_data_dmat, + (void **)&ahd->shared_data_map.vaddr, + BUS_DMA_NOWAIT, + &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ - ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, - base_vaddr, driver_data_size, ahd_dmamap_cb, - &ahd->shared_data_busaddr, /*flags*/0); - ahd->qoutfifo = (uint16_t *)base_vaddr; + aic_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + ahd->shared_data_map.vaddr, driver_data_size, + ahd_dmamap_cb, &ahd->shared_data_map.busaddr, + /*flags*/0); + ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; - next_baddr = ahd->shared_data_busaddr + AHD_QOUT_SIZE*sizeof(uint16_t); + next_baddr = ahd->shared_data_map.busaddr + + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } - /* - * We need one SCB to serve as the "next SCB". Since the - * tag identifier in this SCB will never be used, there is - * no point in using a valid HSCB tag from an SCB pulled from - * the standard free pool. So, we allocate this "sentinel" - * specially from the DMA safe memory chunk used for the QOUTFIFO. - */ - ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; - ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); - ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printf("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printf("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printf("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, ¤t_sensing); if (error != 0) { printf("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printf("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printf("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printf("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); } init_done: ahd_restart(ahd); - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, + aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) - ahd_delay(100); + aic_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * An interrupt from LQOBUSFREE is made redundant by the * BUSFREE interrupt. We choose to have the sequencer catch * LQOPHCHGINPKT errors manually for the command phase at the * start of a packetized selection case. ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT); */ ahd_outb(ahd, LQOMODE1, 0); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #if NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; - ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID_LE; - ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID >> 8); + ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; + ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) - ahd->qoutfifo[i] = 0; + ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); + ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ + ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ - busaddr = ahd->shared_data_busaddr; - ahd_outb(ahd, SHARED_DATA_ADDR, busaddr & 0xFF); - ahd_outb(ahd, SHARED_DATA_ADDR + 1, (busaddr >> 8) & 0xFF); - ahd_outb(ahd, SHARED_DATA_ADDR + 2, (busaddr >> 16) & 0xFF); - ahd_outb(ahd, SHARED_DATA_ADDR + 3, (busaddr >> 24) & 0xFF); - ahd_outb(ahd, QOUTFIFO_NEXT_ADDR, busaddr & 0xFF); - ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 1, (busaddr >> 8) & 0xFF); - ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 2, (busaddr >> 16) & 0xFF); - ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 3, (busaddr >> 24) & 0xFF); + busaddr = ahd->shared_data_map.busaddr; + ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); + ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ - busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printf("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; - u_int qfreeze_cnt; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* - * Increment the QFreeze Count so that the sequencer - * will not start new selections. We do this only + * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ - ahd_outw(ahd, QFREEZE_COUNT, ahd_inw(ahd, QFREEZE_COUNT) + 1); + ahd->qfreeze_cnt--; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { struct scb *waiting_scb; ahd_unpause(ahd); + /* + * Give the sequencer some time to service + * any active selections. + */ + aic_delay(200); + ahd_intr(ahd); ahd_pause(ahd); ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); /* * In the non-packetized case, the sequencer (for Rev A), * relies on ENSELO remaining set after SELDO. The hardware * auto-clears ENSELO in the packetized case. */ waiting_scb = ahd_lookup_scb(ahd, ahd_inw(ahd, WAITING_TID_HEAD)); if (waiting_scb != NULL && (waiting_scb->flags & SCB_PACKETIZED) == 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) | ENSELO); } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printf("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); } - qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT); - if (qfreeze_cnt == 0) { - printf("%s: ahd_pause_and_flushwork with 0 qfreeze count!\n", - ahd_name(ahd)); - } else { - qfreeze_cnt--; - } - ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt); - if (qfreeze_cnt == 0) - ahd_outb(ahd, SEQ_FLAGS2, - ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN); + ahd->qfreeze_cnt++; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd_platform_flushwork(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } int ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } int ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); return (0); } /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static __inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #if AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; - busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + busaddr = aic_le32toh(scb->hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos); } void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printf("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; - busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF); - ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF); + busaddr = aic_le32toh(ahd->next_queued_hscb->hscb_busaddr); + ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printf("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; - ostat = ahd_get_transaction_status(scb); + ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) - ahd_set_transaction_status(scb, + aic_set_transaction_status(scb, status); - cstat = ahd_get_transaction_status(scb); + cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) - ahd_freeze_scb(scb); + aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in qinfifo\n"); ahd_done(ahd, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_PRINT: printf(" 0x%x", ahd->qinfifo[qinpos]); /* FALLTHROUGH */ case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printf("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; /* * We limit based on the number of SCBs since * MK_MESSAGE SCBs are not in the per-tid lists. */ targets++; if (targets > AHD_SCB_MAX) { panic("TID LIST LOOP"); } if (scbid >= ahd->scb_data.numscbs) { printf("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printf(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, SCB_GET_TARGET(ahd, scb)); if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printf(")\n"); } ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; - AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printf("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printf("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; - ostat = ahd_get_transaction_status(scb); + ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) - ahd_set_transaction_status(scb, status); - cstat = ahd_get_transaction_status(scb); + aic_set_transaction_status(scb, status); + cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) - ahd_freeze_scb(scb); + aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in Waiting List\n"); ahd_done(ahd, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); if (prev == SCB_LIST_NULL) *list_head = next; break; case SEARCH_PRINT: printf("0x%x ", scbid); case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { - AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; - AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); + AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that had MK_MESSAGE set in them will not * be queued to the per-target lists, so don't * blindly clear the tail pointer. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; - ostat = ahd_get_transaction_status(scbp); + ostat = aic_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) - ahd_set_transaction_status(scbp, status); - if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) - ahd_freeze_scb(scbp); + aic_set_transaction_status(scbp, status); + if (aic_get_transaction_status(scbp) != CAM_REQ_CMP) + aic_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printf("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); - ahd_delay(AHD_BUSRESET_DELAY); + aic_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); - ahd_delay(AHD_BUSRESET_DELAY); + aic_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo devinfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; ahd->pending_device = NULL; ahd_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); #if AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) - ahd_delay(10); + aic_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, - ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST|ENBUSFREE)); + ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Revert to async/narrow transfers until we renegotiate. */ max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } #ifdef AHD_TARGET_MODE max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); ahd_restart(ahd); /* * Freeze the SIMQ until our poller can determine that * the bus reset has really gone away. We set the initial * timer to 0 to have the check performed as soon as possible * from the timer context. */ if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) { ahd->flags |= AHD_RESET_POLL_ACTIVE; - ahd_freeze_simq(ahd); - ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd); + aic_freeze_simq(ahd); + aic_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd); } return (found); } #define AHD_RESET_POLL_US 1000 static void ahd_reset_poll(void *arg) { struct ahd_softc *ahd; u_int scsiseq1; u_long l; u_long s; ahd_list_lock(&l); ahd = ahd_find_softc((struct ahd_softc *)arg); if (ahd == NULL) { printf("ahd_reset_poll: Instance %p no longer exists\n", arg); ahd_list_unlock(&l); return; } ahd_lock(ahd, &s); ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) { - ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US, + aic_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US, ahd_reset_poll, ahd); ahd_unpause(ahd); ahd_unlock(ahd, &s); ahd_list_unlock(&l); return; } /* Reset is now low. Complete chip reinitialization. */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_unpause(ahd); ahd->flags &= ~AHD_RESET_POLL_ACTIVE; ahd_unlock(ahd, &s); - ahd_release_simq(ahd); + aic_release_simq(ahd); ahd_list_unlock(&l); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(void *arg) { struct ahd_softc *ahd; u_long l; u_long s; int enint_coal; ahd_list_lock(&l); ahd = ahd_find_softc((struct ahd_softc *)arg); if (ahd == NULL) { printf("ahd_stat_timer: Instance %p no longer exists\n", arg); ahd_list_unlock(&l); return; } ahd_lock(ahd, &s); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printf("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, + aic_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); ahd_unlock(ahd, &s); ahd_list_unlock(&l); } /****************************** Status Processing *****************************/ void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { - struct hardware_scb *hscb; - u_int qfreeze_cnt; + struct hardware_scb *hscb; + int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must - * handle the error and decrement the QFREEZE count - * to allow the sequencer to continue. + * handle the error and increment our qfreeze count + * to allow the sequencer to continue. We don't + * bother clearing critical sections here since all + * operations are on data structures that the sequencer + * is not touching once the queue is frozen. */ hscb = scb->hscb; - /* Freeze the queue until the client sees the error. */ - ahd_freeze_devq(ahd, scb); - ahd_freeze_scb(scb); - qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT); - if (qfreeze_cnt == 0) { - printf("%s: Bad status with 0 qfreeze count!\n", ahd_name(ahd)); + if (ahd_is_paused(ahd)) { + paused = 1; } else { - qfreeze_cnt--; - ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt); + paused = 0; + ahd_pause(ahd); } - if (qfreeze_cnt == 0) - ahd_outb(ahd, SEQ_FLAGS2, - ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN); + /* Freeze the queue until the client sees the error. */ + ahd_freeze_devq(ahd, scb); + aic_freeze_scb(scb); + ahd->qfreeze_cnt++; + ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); + + if (paused == 0) + ahd_unpause(ahd); + /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; - ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } - ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); - ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); + aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); + aic_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; - ahd_set_scsi_status(scb, siu->status); + aic_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printf("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printf("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printf("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printf("No packet failure found\n"); break; case SIU_PFC_CIU_FIELDS_INVALID: printf("Invalid Command IU Field\n"); break; case SIU_PFC_TMF_NOT_SUPPORTED: printf("TMF not supportd\n"); break; case SIU_PFC_TMF_FAILED: printf("TMF failed\n"); break; case SIU_PFC_INVALID_TYPE_CODE: printf("Invalid L_Q Type code\n"); break; case SIU_PFC_ILLEGAL_REQUEST: printf("Illegal request\n"); default: break; } } if (siu->status == SCSI_STATUS_OK) - ahd_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printf("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif - if (ahd_perform_autosense(scb) == 0) + if (aic_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printf("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), - ahd_get_sense_bufsize(ahd, scb), + aic_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; - sc->length = ahd_get_sense_bufsize(ahd, scb); + sc->length = aic_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ - if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { + if (aic_get_residual(scb) == aic_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); /* * Ensure we have enough time to actually * retrieve the sense. */ - ahd_scb_timer_reset(scb, 5 * 1000000); + aic_scb_timer_reset(scb, 5 * 1000000); break; } case SCSI_STATUS_OK: printf("%s: Interrupted for staus of 0???\n", ahd_name(ahd)); /* FALLTHROUGH */ default: ahd_done(ahd, scb); break; } } /* * Calculate the residual for a just completed SCB. */ void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; - sgptr = ahd_le32toh(hscb->sgptr); + sgptr = aic_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; - resid_sgptr = ahd_le32toh(spkt->residual_sgptr); + resid_sgptr = aic_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ - resid = ahd_get_transfer_length(scb); + resid = aic_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printf("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); - ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); - ahd_freeze_scb(scb); + aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); + aic_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ - resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; + resid = aic_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ - while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { + while ((aic_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; - resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; + resid += aic_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) - ahd_set_residual(scb, resid); + aic_set_residual(scb, resid); else - ahd_set_sense_residual(scb, resid); + aic_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printf("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printf("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); - ahd_outb(ahd, PRGMCNT, 0); - ahd_outb(ahd, PRGMCNT+1, 0); + ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printf("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printf("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 7 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); - ahd_outb(ahd, PRGMCNT, 0); - ahd_outb(ahd, PRGMCNT+1, 0); + ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); memcpy(ahd->critical_sections, cs_table, cs_count); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printf(" %d instructions downloaded\n", downloaded); printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch, u_int start_instr, u_int *skip_addr) { struct patch *cur_patch; struct patch *last_patch; u_int num_patches; num_patches = sizeof(patches)/sizeof(struct patch); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = MIN(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ - instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); + instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; /* FALLTHROUGH */ case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ - instr.integer = ahd_htole32(instr.integer); + instr.integer = aic_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } void ahd_dump_all_cards_state(void) { struct ahd_softc *list_ahd; TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { ahd_dump_card_state(list_ahd); } } int ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printf("\n"); *cur_column = 0; } printed = printf("%s[0x%x]", name, value); if (table == NULL) { printed += printf(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printf(") "); else printed += printf(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), - ahd_inb(ahd, CURADDR) | (ahd_inb(ahd, CURADDR+1) << 8), + ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printf("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printf("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printf("\n"); printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printf("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printf("\nTotal %d\n", i); printf("Kernel Free SCB list: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { printf("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printf("%d ", SCB_GET_TAG(scb)); } printf("\n"); printf("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); printf("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printf("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printf("\n"); + printf("Sequencer On QFreeze and Complete list: "); + scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); + i = 0; + while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { + ahd_set_scbptr(ahd, scb_index); + printf("%d ", scb_index); + scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); + } + printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printf("\n"); cur_col = 0; } cur_col += printf("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printf("\nLQIN: "); for (i = 0; i < 20; i++) printf("0x%x ", ahd_inb(ahd, LQIN + i)); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printf("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printf("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printf("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printf(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_platform_dump_card_state(ahd); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printf("%3d", i); printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printf("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } + +/*************************** Timeout Handling *********************************/ +void +ahd_timeout(struct scb *scb) +{ + struct ahd_softc *ahd; + + ahd = scb->ahd_softc; + if ((scb->flags & SCB_ACTIVE) != 0) { + if ((scb->flags & SCB_TIMEDOUT) == 0) { + LIST_INSERT_HEAD(&ahd->timedout_scbs, scb, + timedout_links); + scb->flags |= SCB_TIMEDOUT; + } + ahd_wakeup_recovery_thread(ahd); + } +} + +/* + * ahd_recover_commands determines if any of the commands that have currently + * timedout are the root cause for this timeout. Innocent commands are given + * a new timeout while we wait for the command executing on the bus to timeout. + * This routine is invoked from a thread context so we are allowed to sleep. + * Our lock is not held on entry. + */ +void +ahd_recover_commands(struct ahd_softc *ahd) +{ + struct scb *scb; + struct scb *active_scb; + long s; + int found; + int was_paused; + u_int active_scbptr; + u_int last_phase; + + ahd_lock(ahd, &s); + + /* + * Pause the controller and manually flush any + * commands that have just completed but that our + * interrupt handler has yet to see. + */ + was_paused = ahd_is_paused(ahd); + ahd_pause_and_flushwork(ahd); + + if (LIST_EMPTY(&ahd->timedout_scbs) != 0) { + /* + * The timedout commands have already + * completed. This typically means + * that either the timeout value was on + * the hairy edge of what the device + * requires or - more likely - interrupts + * are not happening. + */ + printf("%s: Timedout SCBs already complete. " + "Interrupts may not be functioning.\n", ahd_name(ahd)); + ahd_unpause(ahd); + ahd_unlock(ahd, &s); + return; + } + + printf("%s: Recovery Initiated - Card was %spaused\n", ahd_name(ahd), + was_paused ? "" : "not "); + ahd_dump_card_state(ahd); + + /* + * Determine identity of SCB acting on the bus. + * This test only catches non-packetized transactions. + * Due to the fleeting nature of packetized operations, + * we can't easily determine that a packetized operation + * is on the bus. + */ + ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); + last_phase = ahd_inb(ahd, LASTPHASE); + active_scbptr = ahd_get_scbptr(ahd); + active_scb = NULL; + if (last_phase != P_BUSFREE + || (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) + active_scb = ahd_lookup_scb(ahd, active_scbptr); + + while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) { + int target; + int lun; + char channel; + + target = SCB_GET_TARGET(ahd, scb); + channel = SCB_GET_CHANNEL(ahd, scb); + lun = SCB_GET_LUN(scb); + + ahd_print_path(ahd, scb); + printf("SCB 0x%x - timed out\n", scb->hscb->tag); + + if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { + /* + * Been down this road before. + * Do a full bus reset. + */ + aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); +bus_reset: + found = ahd_reset_channel(ahd, channel, + /*Initiate Reset*/TRUE); + printf("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahd_name(ahd), channel, + found); + continue; + } + + /* + * Remove the command from the timedout list in + * preparation for requeing it. + */ + LIST_REMOVE(scb, timedout_links); + scb->flags &= ~SCB_TIMEDOUT; + + if (active_scb != NULL) { + + if (active_scb != scb) { + /* + * If the active SCB is not us, assume that + * the active SCB has a longer timeout than + * the timedout SCB, and wait for the active + * SCB to timeout. + */ + ahd_other_scb_timeout(ahd, scb, active_scb); + continue; + } + + /* + * We're active on the bus, so assert ATN + * and hope that the target responds. + */ + ahd_set_recoveryscb(ahd, active_scb); + active_scb->flags |= SCB_RECOVERY_SCB|SCB_DEVICE_RESET; + ahd_outb(ahd, MSG_OUT, HOST_MSG); + ahd_outb(ahd, SCSISIGO, last_phase|ATNO); + ahd_print_path(ahd, active_scb); + printf("BDR message in message buffer\n"); + aic_scb_timer_reset(scb, 2 * 1000000); + break; + } else if (last_phase != P_BUSFREE + && ahd_inb(ahd, SCSIPHASE) == 0) { + /* + * SCB is not identified, there + * is no pending REQ, and the sequencer + * has not seen a busfree. Looks like + * a stuck connection waiting to + * go busfree. Reset the bus. + */ + printf("%s: Connection stuck awaiting busfree or " + "Identify Msg.\n", ahd_name(ahd)); + goto bus_reset; + } else if (ahd_search_qinfifo(ahd, target, channel, lun, + scb->hscb->tag, ROLE_INITIATOR, + /*status*/0, SEARCH_COUNT) > 0) { + + /* + * We haven't even gone out on the bus + * yet, so the timeout must be due to + * some other command. Reset the timer + * and go on. + */ + ahd_other_scb_timeout(ahd, scb, scb); + } else { + /* + * This SCB is for a disconnected transaction + * and we haven't found a better candidate on + * the bus to explain this timeout. + */ + ahd_set_recoveryscb(ahd, scb); + + /* + * Actually re-queue this SCB in an attempt + * to select the device before it reconnects. + * In either case (selection or reselection), + * we will now issue a target reset to the + * timed-out device. + * + * Set the MK_MESSAGE control bit indicating + * that we desire to send a message. We + * also set the disconnected flag since + * in the paging case there is no guarantee + * that our SCB control byte matches the + * version on the card. We don't want the + * sequencer to abort the command thinking + * an unsolicited reselection occurred. + */ + scb->flags |= SCB_DEVICE_RESET; + scb->hscb->cdb_len = 0; + scb->hscb->task_attribute = 0; + scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK; + + ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); + if ((scb->flags & SCB_PACKETIZED) != 0) { + /* + * Mark the SCB has having an outstanding + * task management function. Should the command + * complete normally before the task management + * function can be sent, the host will be + * notified to abort our requeued SCB. + */ + ahd_outb(ahd, SCB_TASK_MANAGEMENT, + scb->hscb->task_management); + } else { + /* + * If non-packetized, set the MK_MESSAGE control + * bit indicating that we desire to send a + * message. We also set the disconnected flag + * since there is no guarantee that our SCB + * control byte matches the version on the + * card. We don't want the sequencer to abort + * the command thinking an unsolicited + * reselection occurred. + */ + scb->hscb->control |= MK_MESSAGE|DISCONNECTED; + + /* + * The sequencer will never re-reference the + * in-core SCB. To make sure we are notified + * during reslection, set the MK_MESSAGE flag in + * the card's copy of the SCB. + */ + ahd_outb(ahd, SCB_CONTROL, + ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); + } + + /* + * Clear out any entries in the QINFIFO first + * so we are the next SCB for this target + * to run. + */ + ahd_search_qinfifo(ahd, target, channel, lun, + SCB_LIST_NULL, ROLE_INITIATOR, + CAM_REQUEUE_REQ, SEARCH_COMPLETE); + ahd_qinfifo_requeue_tail(ahd, scb); + ahd_set_scbptr(ahd, active_scbptr); + ahd_print_path(ahd, scb); + printf("Queuing a BDR SCB\n"); + aic_scb_timer_reset(scb, 2 * 1000000); + break; + } + } + + /* + * Any remaining SCBs were not the "culprit", so remove + * them from the timeout list. The timer for these commands + * will be reset once the recovery SCB completes. + */ + while ((scb = LIST_FIRST(&ahd->timedout_scbs)) != NULL) { + + LIST_REMOVE(scb, timedout_links); + scb->flags &= ~SCB_TIMEDOUT; + } + + ahd_unpause(ahd); + ahd_unlock(ahd, &s); +} + +static void +ahd_other_scb_timeout(struct ahd_softc *ahd, struct scb *scb, + struct scb *other_scb) +{ + u_int newtimeout; + + ahd_print_path(ahd, scb); + printf("Other SCB Timeout%s", + (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 + ? " again\n" : "\n"); + scb->flags |= SCB_OTHERTCL_TIMEOUT; + newtimeout = MAX(aic_get_timeout(other_scb), + aic_get_timeout(scb)); + aic_scb_timer_reset(scb, newtimeout); +} + /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* * Write the data. If we don't get throught the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; - cnt = 20; + cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) - ahd_delay(5); + aic_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) - ahd_delay(5); + aic_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 15 : 7; if (ccb->ccb_h.target_id > max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; printf("Configuring Target Mode\n"); ahd_lock(ahd, &s); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahd_unlock(ahd, &s); return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); ahd_unlock(ahd, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_DEVBUF); xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_lock(ahd, &s); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; - targid_mask = ahd_inb(ahd, TARGID) - | (ahd_inb(ahd, TARGID + 1) << 8); - + targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; - ahd_outb(ahd, TARGID, targid_mask); - ahd_outb(ahd, TARGID+1, (targid_mask >> 8)); - + ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahd_lock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printf("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahd_unlock(ahd, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahd_unlock(ahd, &s); return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); free(lstate, M_DEVBUF); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; - targid_mask = ahd_inb(ahd, TARGID) - | (ahd_inb(ahd, TARGID + 1) - << 8); - + targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; - ahd_outb(ahd, TARGID, targid_mask); - ahd_outb(ahd, TARGID+1, - (targid_mask >> 8)); + ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printf("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); ahd_unlock(ahd, &s); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printf("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif Index: stable/4/sys/dev/aic7xxx/aic79xx.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx.h (revision 125849) @@ -1,1535 +1,1567 @@ /* * Core definitions and data structures shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#92 $ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#106 $ * * $FreeBSD$ */ #ifndef _AIC79XX_H_ #define _AIC79XX_H_ /* Register Definitions */ #include "aic79xx_reg.h" /************************* Forward Declarations *******************************/ struct ahd_platform_data; struct scb_platform_data; /****************************** Useful Macros *********************************/ #ifndef MAX #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #endif #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define ALL_CHANNELS '\0' #define ALL_TARGETS_MASK 0xFFFF #define INITIATOR_WILDCARD (~0) #define SCB_LIST_NULL 0xFF00 -#define SCB_LIST_NULL_LE (ahd_htole16(SCB_LIST_NULL)) -#define QOUTFIFO_ENTRY_VALID 0x8000 -#define QOUTFIFO_ENTRY_VALID_LE (ahd_htole16(0x8000)) +#define SCB_LIST_NULL_LE (aic_htole16(SCB_LIST_NULL)) +#define QOUTFIFO_ENTRY_VALID 0x80 #define SCBID_IS_NULL(scbid) (((scbid) & 0xFF00 ) == SCB_LIST_NULL) #define SCSIID_TARGET(ahd, scsiid) \ (((scsiid) & TID) >> TID_SHIFT) #define SCSIID_OUR_ID(scsiid) \ ((scsiid) & OID) #define SCSIID_CHANNEL(ahd, scsiid) ('A') #define SCB_IS_SCSIBUS_B(ahd, scb) (0) #define SCB_GET_OUR_ID(scb) \ SCSIID_OUR_ID((scb)->hscb->scsiid) #define SCB_GET_TARGET(ahd, scb) \ SCSIID_TARGET((ahd), (scb)->hscb->scsiid) #define SCB_GET_CHANNEL(ahd, scb) \ SCSIID_CHANNEL(ahd, (scb)->hscb->scsiid) #define SCB_GET_LUN(scb) \ ((scb)->hscb->lun) #define SCB_GET_TARGET_OFFSET(ahd, scb) \ SCB_GET_TARGET(ahd, scb) #define SCB_GET_TARGET_MASK(ahd, scb) \ (0x01 << (SCB_GET_TARGET_OFFSET(ahd, scb))) #ifdef AHD_DEBUG #define SCB_IS_SILENT(scb) \ ((ahd_debug & AHD_SHOW_MASKED_ERRORS) == 0 \ && (((scb)->flags & SCB_SILENT) != 0)) #else #define SCB_IS_SILENT(scb) \ (((scb)->flags & SCB_SILENT) != 0) #endif /* * TCLs have the following format: TTTTLLLLLLLL */ #define TCL_TARGET_OFFSET(tcl) \ ((((tcl) >> 4) & TID) >> 4) #define TCL_LUN(tcl) \ (tcl & (AHD_NUM_LUNS - 1)) #define BUILD_TCL(scsiid, lun) \ ((lun) | (((scsiid) & TID) << 4)) #define BUILD_TCL_RAW(target, channel, lun) \ ((lun) | ((target) << 8)) #define SCB_GET_TAG(scb) \ - ahd_le16toh(scb->hscb->tag) + aic_le16toh(scb->hscb->tag) #ifndef AHD_TARGET_MODE #undef AHD_TMODE_ENABLE #define AHD_TMODE_ENABLE 0 #endif #define AHD_BUILD_COL_IDX(target, lun) \ (((lun) << 4) | target) #define AHD_GET_SCB_COL_IDX(ahd, scb) \ ((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb)) #define AHD_SET_SCB_COL_IDX(scb, col_idx) \ do { \ (scb)->hscb->scsiid = ((col_idx) << TID_SHIFT) & TID; \ (scb)->hscb->lun = ((col_idx) >> 4) & (AHD_NUM_LUNS_NONPKT-1); \ } while (0) #define AHD_COPY_SCB_COL_IDX(dst, src) \ do { \ dst->hscb->scsiid = src->hscb->scsiid; \ dst->hscb->lun = src->hscb->lun; \ } while (0) #define AHD_NEVER_COL_IDX 0xFFFF /**************************** Driver Constants ********************************/ /* * The maximum number of supported targets. */ #define AHD_NUM_TARGETS 16 /* * The maximum number of supported luns. * The identify message only supports 64 luns in non-packetized transfers. * You can have 2^64 luns when information unit transfers are enabled, * but until we see a need to support that many, we support 256. */ #define AHD_NUM_LUNS_NONPKT 64 #define AHD_NUM_LUNS 256 /* * The maximum transfer per S/G segment. */ #define AHD_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ /* * The maximum amount of SCB storage in hardware on a controller. * This value represents an upper bound. Due to software design, * we may not be able to use this number. */ #define AHD_SCB_MAX 512 /* * The maximum number of concurrent transactions supported per driver instance. * Sequencer Control Blocks (SCBs) store per-transaction information. */ #define AHD_MAX_QUEUE AHD_SCB_MAX /* * Define the size of our QIN and QOUT FIFOs. They must be a power of 2 * in size and accommodate as many transactions as can be queued concurrently. */ #define AHD_QIN_SIZE AHD_MAX_QUEUE #define AHD_QOUT_SIZE AHD_MAX_QUEUE #define AHD_QIN_WRAP(x) ((x) & (AHD_QIN_SIZE-1)) /* * The maximum amount of SCB storage we allocate in host memory. */ #define AHD_SCB_MAX_ALLOC AHD_MAX_QUEUE /* * Ring Buffer of incoming target commands. * We allocate 256 to simplify the logic in the sequencer * by using the natural wrap point of an 8bit counter. */ #define AHD_TMODE_CMDS 256 /* Reset line assertion time in us */ #define AHD_BUSRESET_DELAY 25 /******************* Chip Characteristics/Operating Settings *****************/ +extern uint32_t ahd_attach_to_HostRAID_controllers; + /* * Chip Type * The chip order is from least sophisticated to most sophisticated. */ typedef enum { AHD_NONE = 0x0000, AHD_CHIPID_MASK = 0x00FF, AHD_AIC7901 = 0x0001, AHD_AIC7902 = 0x0002, AHD_AIC7901A = 0x0003, AHD_PCI = 0x0100, /* Bus type PCI */ AHD_PCIX = 0x0200, /* Bus type PCIX */ AHD_BUS_MASK = 0x0F00 } ahd_chip; /* * Features available in each chip type. */ typedef enum { AHD_FENONE = 0x00000, AHD_WIDE = 0x00001,/* Wide Channel */ AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */ AHD_TARGETMODE = 0x01000,/* Has tested target mode support */ AHD_MULTIROLE = 0x02000,/* Space for two roles at a time */ AHD_RTI = 0x04000,/* Retained Training Support */ AHD_NEW_IOCELL_OPTS = 0x08000,/* More Signal knobs in the IOCELL */ AHD_NEW_DFCNTRL_OPTS = 0x10000,/* SCSIENWRDIS bit */ + AHD_FAST_CDB_DELIVERY = 0x20000,/* CDB acks released to Output Sync */ AHD_REMOVABLE = 0x00000,/* Hot-Swap supported - None so far*/ AHD_AIC7901_FE = AHD_FENONE, + AHD_AIC7901A_FE = AHD_FENONE, AHD_AIC7902_FE = AHD_MULTI_FUNC } ahd_feature; /* * Bugs in the silicon that we work around in software. */ typedef enum { AHD_BUGNONE = 0x0000, /* * Rev A hardware fails to update LAST/CURR/NEXTSCB * correctly in certain packetized selection cases. */ AHD_SENT_SCB_UPDATE_BUG = 0x0001, /* The wrong SCB is accessed to check the abort pending bit. */ AHD_ABORT_LQI_BUG = 0x0002, /* Packetized bitbucket crosses packet boundaries. */ AHD_PKT_BITBUCKET_BUG = 0x0004, /* The selection timer runs twice as long as its setting. */ AHD_LONG_SETIMO_BUG = 0x0008, /* The Non-LQ CRC error status is delayed until phase change. */ AHD_NLQICRC_DELAYED_BUG = 0x0010, /* The chip must be reset for all outgoing bus resets. */ AHD_SCSIRST_BUG = 0x0020, /* Some PCIX fields must be saved and restored across chip reset. */ AHD_PCIX_CHIPRST_BUG = 0x0040, /* MMAPIO is not functional in PCI-X mode. */ AHD_PCIX_MMAPIO_BUG = 0x0080, /* Reads to SCBRAM fail to reset the discard timer. */ AHD_PCIX_SCBRAM_RD_BUG = 0x0100, /* Bug workarounds that can be disabled on non-PCIX busses. */ AHD_PCIX_BUG_MASK = AHD_PCIX_CHIPRST_BUG | AHD_PCIX_MMAPIO_BUG | AHD_PCIX_SCBRAM_RD_BUG, /* * LQOSTOP0 status set even for forced selections with ATN * to perform non-packetized message delivery. */ AHD_LQO_ATNO_BUG = 0x0200, /* FIFO auto-flush does not always trigger. */ AHD_AUTOFLUSH_BUG = 0x0400, /* The CLRLQO registers are not self-clearing. */ AHD_CLRLQO_AUTOCLR_BUG = 0x0800, /* The PACKETIZED status bit refers to the previous connection. */ AHD_PKTIZED_STATUS_BUG = 0x1000, /* "Short Luns" are not placed into outgoing LQ packets correctly. */ AHD_PKT_LUN_BUG = 0x2000, /* * Only the FIFO allocated to the non-packetized connection may * be in use during a non-packetzied connection. */ AHD_NONPACKFIFO_BUG = 0x4000, /* * Writing to a DFF SCBPTR register may fail if concurent with * a hardware write to the other DFF SCBPTR register. This is * not currently a concern in our sequencer since all chips with * this bug have the AHD_NONPACKFIFO_BUG and all writes of concern * occur in non-packetized connections. */ AHD_MDFF_WSCBPTR_BUG = 0x8000, /* SGHADDR updates are slow. */ AHD_REG_SLOW_SETTLE_BUG = 0x10000, /* * Changing the MODE_PTR coincident with an interrupt that * switches to a different mode will cause the interrupt to * be in the mode written outside of interrupt context. */ AHD_SET_MODE_BUG = 0x20000, /* Non-packetized busfree revision does not work. */ AHD_BUSFREEREV_BUG = 0x40000, /* * Paced transfers are indicated with a non-standard PPR * option bit in the neg table, 160MHz is indicated by * sync factor 0x7, and the offset if off by a factor of 2. */ AHD_PACED_NEGTABLE_BUG = 0x80000, /* LQOOVERRUN false positives. */ AHD_LQOOVERRUN_BUG = 0x100000, /* * Controller write to INTSTAT will lose to a host * write to CLRINT. */ AHD_INTCOLLISION_BUG = 0x200000, /* * The GEM318 violates the SCSI spec by not waiting * the mandated bus settle delay between phase changes * in some situations. Some aic79xx chip revs. are more * strict in this regard and will treat REQ assertions * that fall within the bus settle delay window as * glitches. This flag tells the firmware to tolerate * early REQ assertions. */ AHD_EARLY_REQ_BUG = 0x400000, /* * The LED does not stay on long enough in packetized modes. */ AHD_FAINT_LED_BUG = 0x800000 } ahd_bug; /* * Configuration specific settings. * The driver determines these settings by probing the * chip/controller's configuration. */ typedef enum { AHD_FNONE = 0x00000, AHD_BOOT_CHANNEL = 0x00001,/* We were set as the boot channel. */ AHD_USEDEFAULTS = 0x00004,/* * For cards without an seeprom * or a BIOS to initialize the chip's * SRAM, we use the default target * settings. */ AHD_SEQUENCER_DEBUG = 0x00008, AHD_RESET_BUS_A = 0x00010, AHD_EXTENDED_TRANS_A = 0x00020, AHD_TERM_ENB_A = 0x00040, AHD_SPCHK_ENB_A = 0x00080, AHD_STPWLEVEL_A = 0x00100, AHD_INITIATORROLE = 0x00200,/* * Allow initiator operations on * this controller. */ AHD_TARGETROLE = 0x00400,/* * Allow target operations on this * controller. */ AHD_RESOURCE_SHORTAGE = 0x00800, AHD_TQINFIFO_BLOCKED = 0x01000,/* Blocked waiting for ATIOs */ AHD_INT50_SPEEDFLEX = 0x02000,/* * Internal 50pin connector * sits behind an aic3860 */ AHD_BIOS_ENABLED = 0x04000, AHD_ALL_INTERRUPTS = 0x08000, AHD_39BIT_ADDRESSING = 0x10000,/* Use 39 bit addressing scheme. */ AHD_64BIT_ADDRESSING = 0x20000,/* Use 64 bit addressing scheme. */ AHD_CURRENT_SENSING = 0x40000, AHD_SCB_CONFIG_USED = 0x80000,/* No SEEPROM but SCB had info. */ AHD_HP_BOARD = 0x100000, AHD_RESET_POLL_ACTIVE = 0x200000, AHD_UPDATE_PEND_CMDS = 0x400000, AHD_RUNNING_QOUTFIFO = 0x800000, - AHD_HAD_FIRST_SEL = 0x1000000 + AHD_HAD_FIRST_SEL = 0x1000000, + AHD_SHUTDOWN_RECOVERY = 0x2000000, /* Terminate recovery thread. */ + AHD_HOSTRAID_BOARD = 0x4000000 } ahd_flag; /************************* Hardware SCB Definition ***************************/ /* * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB * consists of a "hardware SCB" mirroring the fields available on the card * and additional information the kernel stores for each transaction. * * To minimize space utilization, a portion of the hardware scb stores * different data during different portions of a SCSI transaction. * As initialized by the host driver for the initiator role, this area * contains the SCSI cdb (or a pointer to the cdb) to be executed. After * the cdb has been presented to the target, this area serves to store * residual transfer information and the SCSI status byte. * For the target role, the contents of this area do not change, but * still serve a different purpose than for the initiator role. See * struct target_data for details. */ /* * Status information embedded in the shared poriton of * an SCB after passing the cdb to the target. The kernel * driver will only read this data for transactions that * complete abnormally. */ struct initiator_status { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sgptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* Standard SCSI status byte */ }; struct target_status { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sgptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; /* * Initiator mode SCB shared data area. * If the embedded CDB is 12 bytes or less, we embed * the sense buffer address in the SCB. This allows * us to retrieve sense information without interrupting * the host in packetized mode. */ typedef uint32_t sense_addr_t; #define MAX_CDB_LEN 16 #define MAX_CDB_LEN_WITH_SENSE_ADDR (MAX_CDB_LEN - sizeof(sense_addr_t)) union initiator_data { struct { uint64_t cdbptr; uint8_t cdblen; } cdb_from_host; uint8_t cdb[MAX_CDB_LEN]; struct { uint8_t cdb[MAX_CDB_LEN_WITH_SENSE_ADDR]; sense_addr_t sense_addr; } cdb_plus_saddr; }; /* * Target mode version of the shared data SCB segment. */ struct target_data { uint32_t spare[2]; uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; struct hardware_scb { /*0*/ union { union initiator_data idata; struct target_data tdata; struct initiator_status istatus; struct target_status tstatus; } shared_data; /* * A word about residuals. * The scb is presented to the sequencer with the dataptr and datacnt * fields initialized to the contents of the first S/G element to * transfer. The sgptr field is initialized to the bus address for * the S/G element that follows the first in the in core S/G array * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid * S/G entry for this transfer (single S/G element transfer with the * first elements address and length preloaded in the dataptr/datacnt * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. * The SG_FULL_RESID flag ensures that the residual will be correctly * noted even if no data transfers occur. Once the data phase is entered, * the residual sgptr and datacnt are loaded from the sgptr and the * datacnt fields. After each S/G element's dataptr and length are * loaded into the hardware, the residual sgptr is advanced. After * each S/G element is expired, its datacnt field is checked to see * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the * residual sg ptr and the transfer is considered complete. If the * sequencer determines that there is a residual in the tranfer, or * there is non-zero status, it will set the SG_STATUS_VALID flag in * sgptr and dma the scb back into host memory. To sumarize: * * Sequencer: * o A residual has occurred if SG_FULL_RESID is set in sgptr, * or residual_sgptr does not have SG_LIST_NULL set. * * o We are transfering the last segment if residual_datacnt has * the SG_LAST_SEG flag set. * * Host: * o A residual can only have occurred if a completed scb has the * SG_STATUS_VALID flag set. Inspection of the SCSI status field, * the residual_datacnt, and the residual_sgptr field will tell * for sure. * * o residual_sgptr and sgptr refer to the "next" sg entry * and so may point beyond the last valid sg entry for the * transfer. */ #define SG_PTR_MASK 0xFFFFFFF8 /*16*/ uint16_t tag; /* Reused by Sequencer. */ /*18*/ uint8_t control; /* See SCB_CONTROL in aic79xx.reg for details */ /*19*/ uint8_t scsiid; /* * Selection out Id * Our Id (bits 0-3) Their ID (bits 4-7) */ /*20*/ uint8_t lun; /*21*/ uint8_t task_attribute; /*22*/ uint8_t cdb_len; /*23*/ uint8_t task_management; /*24*/ uint64_t dataptr; /*32*/ uint32_t datacnt; /* Byte 3 is spare. */ /*36*/ uint32_t sgptr; /*40*/ uint32_t hscb_busaddr; /*44*/ uint32_t next_hscb_busaddr; /********** Long lun field only downloaded for full 8 byte lun support ********/ /*48*/ uint8_t pkt_long_lun[8]; /******* Fields below are not Downloaded (Sequencer may use for scratch) ******/ /*56*/ uint8_t spare[8]; }; /************************ Kernel SCB Definitions ******************************/ /* * Some fields of the SCB are OS dependent. Here we collect the * definitions for elements that all OS platforms need to include * in there SCB definition. */ /* * Definition of a scatter/gather element as transfered to the controller. * The aic7xxx chips only support a 24bit length. We use the top byte of * the length to store additional address bits and a flag to indicate * that a given segment terminates the transfer. This gives us an * addressable range of 512GB on machines with 64bit PCI or with chips * that can support dual address cycles on 32bit PCI busses. */ struct ahd_dma_seg { uint32_t addr; uint32_t len; #define AHD_DMA_LAST_SEG 0x80000000 #define AHD_SG_HIGH_ADDR_MASK 0x7F000000 #define AHD_SG_LEN_MASK 0x00FFFFFF }; struct ahd_dma64_seg { uint64_t addr; uint32_t len; uint32_t pad; }; struct map_node { bus_dmamap_t dmamap; - bus_addr_t physaddr; + bus_addr_t busaddr; uint8_t *vaddr; SLIST_ENTRY(map_node) links; }; /* * The current state of this SCB. */ typedef enum { SCB_FLAG_NONE = 0x00000, SCB_TRANSMISSION_ERROR = 0x00001,/* * We detected a parity or CRC * error that has effected the * payload of the command. This * flag is checked when normal * status is returned to catch * the case of a target not * responding to our attempt * to report the error. */ SCB_OTHERTCL_TIMEOUT = 0x00002,/* * Another device was active * during the first timeout for * this SCB so we gave ourselves * an additional timeout period * in case it was hogging the * bus. */ SCB_DEVICE_RESET = 0x00004, SCB_SENSE = 0x00008, SCB_CDB32_PTR = 0x00010, SCB_RECOVERY_SCB = 0x00020, SCB_AUTO_NEGOTIATE = 0x00040,/* Negotiate to achieve goal. */ SCB_NEGOTIATE = 0x00080,/* Negotiation forced for command. */ SCB_ABORT = 0x00100, SCB_ACTIVE = 0x00200, SCB_TARGET_IMMEDIATE = 0x00400, SCB_PACKETIZED = 0x00800, SCB_EXPECT_PPR_BUSFREE = 0x01000, SCB_PKT_SENSE = 0x02000, SCB_CMDPHASE_ABORT = 0x04000, SCB_ON_COL_LIST = 0x08000, - SCB_SILENT = 0x10000 /* + SCB_SILENT = 0x10000,/* * Be quiet about transmission type * errors. They are expected and we * don't want to upset the user. This * flag is typically used during DV. */ + SCB_TIMEDOUT = 0x20000/* + * SCB has timed out and is on the + * timedout list. + */ } scb_flag; struct scb { struct hardware_scb *hscb; union { SLIST_ENTRY(scb) sle; LIST_ENTRY(scb) le; TAILQ_ENTRY(scb) tqe; } links; union { SLIST_ENTRY(scb) sle; LIST_ENTRY(scb) le; TAILQ_ENTRY(scb) tqe; } links2; #define pending_links links2.le #define collision_links links2.le + LIST_ENTRY(scb) timedout_links; struct scb *col_scb; - ahd_io_ctx_t io_ctx; + aic_io_ctx_t io_ctx; struct ahd_softc *ahd_softc; scb_flag flags; #ifndef __linux__ bus_dmamap_t dmamap; #endif struct scb_platform_data *platform_data; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; void *sg_list; uint8_t *sense_data; bus_addr_t sg_list_busaddr; bus_addr_t sense_busaddr; u_int sg_count;/* How full ahd_dma_seg is */ #define AHD_MAX_LQ_CRC_ERRORS 5 u_int crc_retry_count; }; TAILQ_HEAD(scb_tailq, scb); LIST_HEAD(scb_list, scb); struct scb_data { /* * TAILQ of lists of free SCBs grouped by device * collision domains. */ struct scb_tailq free_scbs; /* * Per-device lists of SCBs whose tag ID would collide * with an already active tag on the device. */ struct scb_list free_scb_lists[AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT]; /* * SCBs that will not collide with any active device. */ struct scb_list any_dev_free_scb_list; /* * Mapping from tag to SCB. */ struct scb *scbindex[AHD_SCB_MAX]; /* * "Bus" addresses of our data structures. */ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */ SLIST_HEAD(, map_node) hscb_maps; SLIST_HEAD(, map_node) sg_maps; SLIST_HEAD(, map_node) sense_maps; int scbs_left; /* unallocated scbs in head map_node */ int sgs_left; /* unallocated sgs in head map_node */ int sense_left; /* unallocated sense in head map_node */ uint16_t numscbs; uint16_t maxhscbs; /* Number of SCBs on the card */ uint8_t init_level; /* * How far we've initialized * this structure. */ }; /************************ Target Mode Definitions *****************************/ /* * Connection desciptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ uint8_t identify; /* Identify message */ uint8_t bytes[22]; /* * Bytes contains any additional message * bytes terminated by 0xFF. The remainder * is the cdb to execute. */ uint8_t cmd_valid; /* * When a command is complete, the firmware * will set cmd_valid to all bits set. * After the host has seen the command, * the bits are cleared. This allows us * to just peek at host memory to determine * if more work is complete. cmd_valid is on * an 8 byte boundary to simplify setting * it on aic7880 hardware which only has * limited direct access to the DMA FIFO. */ uint8_t pad[7]; }; /* * Number of events we can buffer up if we run out * of immediate notify ccbs. */ #define AHD_TMODE_EVENT_BUFFER_SIZE 8 struct ahd_tmode_event { uint8_t initiator_id; uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ #define EVENT_TYPE_BUS_RESET 0xFF uint8_t event_arg; }; /* * Per enabled lun target mode state. * As this state is directly influenced by the host OS'es target mode * environment, we let the OS module define it. Forward declare the * structure here so we can store arrays of them, etc. in OS neutral * data structures. */ #ifdef AHD_TARGET_MODE struct ahd_tmode_lstate { struct cam_path *path; struct ccb_hdr_slist accept_tios; struct ccb_hdr_slist immed_notifies; struct ahd_tmode_event event_buffer[AHD_TMODE_EVENT_BUFFER_SIZE]; uint8_t event_r_idx; uint8_t event_w_idx; }; #else struct ahd_tmode_lstate; #endif /******************** Transfer Negotiation Datastructures *********************/ #define AHD_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define AHD_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ #define AHD_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define AHD_TRANS_USER 0x08 /* Modify user negotiation settings */ #define AHD_PERIOD_10MHz 0x19 #define AHD_WIDTH_UNKNOWN 0xFF #define AHD_PERIOD_UNKNOWN 0xFF #define AHD_OFFSET_UNKNOWN 0xFF #define AHD_PPR_OPTS_UNKNOWN 0xFF /* * Transfer Negotiation Information. */ struct ahd_transinfo { uint8_t protocol_version; /* SCSI Revision level */ uint8_t transport_version; /* SPI Revision level */ uint8_t width; /* Bus width */ uint8_t period; /* Sync rate factor */ uint8_t offset; /* Sync offset */ uint8_t ppr_options; /* Parallel Protocol Request options */ }; /* * Per-initiator current, goal and user transfer negotiation information. */ struct ahd_initiator_tinfo { struct ahd_transinfo curr; struct ahd_transinfo goal; struct ahd_transinfo user; }; /* * Per enabled target ID state. * Pointers to lun target state as well as sync/wide negotiation information * for each initiator<->target mapping. For the initiator role we pretend * that we are the target and the targets are the initiators since the * negotiation is the same regardless of role. */ struct ahd_tmode_tstate { struct ahd_tmode_lstate* enabled_luns[AHD_NUM_LUNS]; struct ahd_initiator_tinfo transinfo[AHD_NUM_TARGETS]; /* * Per initiator state bitmasks. */ uint16_t auto_negotiate;/* Auto Negotiation Required */ uint16_t discenable; /* Disconnection allowed */ uint16_t tagenable; /* Tagged Queuing allowed */ }; /* * Points of interest along the negotiated transfer scale. */ #define AHD_SYNCRATE_160 0x8 #define AHD_SYNCRATE_PACED 0x8 #define AHD_SYNCRATE_DT 0x9 #define AHD_SYNCRATE_ULTRA2 0xa #define AHD_SYNCRATE_ULTRA 0xc #define AHD_SYNCRATE_FAST 0x19 #define AHD_SYNCRATE_MIN_DT AHD_SYNCRATE_FAST #define AHD_SYNCRATE_SYNC 0x32 #define AHD_SYNCRATE_MIN 0x60 #define AHD_SYNCRATE_ASYNC 0xFF #define AHD_SYNCRATE_MAX AHD_SYNCRATE_160 /* Safe and valid period for async negotiations. */ #define AHD_ASYNC_XFER_PERIOD 0x44 /* * In RevA, the synctable uses a 120MHz rate for the period * factor 8 and 160MHz for the period factor 7. The 120MHz * rate never made it into the official SCSI spec, so we must * compensate when setting the negotiation table for Rev A * parts. */ #define AHD_SYNCRATE_REVA_120 0x8 #define AHD_SYNCRATE_REVA_160 0x7 /***************************** Lookup Tables **********************************/ /* * Phase -> name and message out response * to parity errors in each phase table. */ struct ahd_phase_table_entry { uint8_t phase; uint8_t mesg_out; /* Message response to parity errors */ char *phasemsg; }; /************************** Serial EEPROM Format ******************************/ struct seeprom_config { /* * Per SCSI ID Configuration Flags */ uint16_t device_flags[16]; /* words 0-15 */ #define CFXFER 0x003F /* synchronous transfer rate */ #define CFXFER_ASYNC 0x3F #define CFQAS 0x0040 /* Negotiate QAS */ #define CFPACKETIZED 0x0080 /* Negotiate Packetized Transfers */ #define CFSTART 0x0100 /* send start unit SCSI command */ #define CFINCBIOS 0x0200 /* include in BIOS scan */ #define CFDISC 0x0400 /* enable disconnection */ #define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ #define CFWIDEB 0x1000 /* wide bus device */ #define CFHOSTMANAGED 0x8000 /* Managed by a RAID controller */ /* * BIOS Control Bits */ uint16_t bios_control; /* word 16 */ #define CFSUPREM 0x0001 /* support all removeable drives */ #define CFSUPREMB 0x0002 /* support removeable boot drives */ #define CFBIOSSTATE 0x000C /* BIOS Action State */ #define CFBS_DISABLED 0x00 #define CFBS_ENABLED 0x04 #define CFBS_DISABLED_SCAN 0x08 #define CFENABLEDV 0x0010 /* Perform Domain Validation */ #define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ #define CFSPARITY 0x0040 /* SCSI parity */ #define CFEXTEND 0x0080 /* extended translation enabled */ #define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */ #define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ #define CFMSG_VERBOSE 0x0000 #define CFMSG_SILENT 0x0200 #define CFMSG_DIAG 0x0400 #define CFRESETB 0x0800 /* reset SCSI bus at boot */ /* UNUSED 0xf000 */ /* * Host Adapter Control Bits */ uint16_t adapter_control; /* word 17 */ #define CFAUTOTERM 0x0001 /* Perform Auto termination */ #define CFSTERM 0x0002 /* SCSI low byte termination */ #define CFWSTERM 0x0004 /* SCSI high byte termination */ #define CFSEAUTOTERM 0x0008 /* Ultra2 Perform secondary Auto Term*/ #define CFSELOWTERM 0x0010 /* Ultra2 secondary low term */ #define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */ #define CFSTPWLEVEL 0x0040 /* Termination level control */ #define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */ #define CFTERM_MENU 0x0100 /* BIOS displays termination menu */ #define CFCLUSTERENB 0x8000 /* Cluster Enable */ /* * Bus Release Time, Host Adapter ID */ uint16_t brtime_id; /* word 18 */ #define CFSCSIID 0x000f /* host adapter SCSI ID */ /* UNUSED 0x00f0 */ #define CFBRTIME 0xff00 /* bus release time/PCI Latency Time */ /* * Maximum targets */ uint16_t max_targets; /* word 19 */ #define CFMAXTARG 0x00ff /* maximum targets */ #define CFBOOTLUN 0x0f00 /* Lun to boot from */ #define CFBOOTID 0xf000 /* Target to boot from */ uint16_t res_1[10]; /* words 20-29 */ uint16_t signature; /* BIOS Signature */ #define CFSIGNATURE 0x400 uint16_t checksum; /* word 31 */ }; /* * Vital Product Data used during POST and by the BIOS. */ struct vpd_config { uint8_t bios_flags; #define VPDMASTERBIOS 0x0001 #define VPDBOOTHOST 0x0002 uint8_t reserved_1[21]; uint8_t resource_type; uint8_t resource_len[2]; uint8_t resource_data[8]; uint8_t vpd_tag; uint16_t vpd_len; uint8_t vpd_keyword[2]; uint8_t length; uint8_t revision; uint8_t device_flags; uint8_t termnation_menus[2]; uint8_t fifo_threshold; uint8_t end_tag; uint8_t vpd_checksum; uint16_t default_target_flags; uint16_t default_bios_flags; uint16_t default_ctrl_flags; uint8_t default_irq; uint8_t pci_lattime; uint8_t max_target; uint8_t boot_lun; uint16_t signature; uint8_t reserved_2; uint8_t checksum; uint8_t reserved_3[4]; }; /****************************** Flexport Logic ********************************/ #define FLXADDR_TERMCTL 0x0 #define FLX_TERMCTL_ENSECHIGH 0x8 #define FLX_TERMCTL_ENSECLOW 0x4 #define FLX_TERMCTL_ENPRIHIGH 0x2 #define FLX_TERMCTL_ENPRILOW 0x1 #define FLXADDR_ROMSTAT_CURSENSECTL 0x1 #define FLX_ROMSTAT_SEECFG 0xF0 #define FLX_ROMSTAT_EECFG 0x0F #define FLX_ROMSTAT_SEE_93C66 0x00 #define FLX_ROMSTAT_SEE_NONE 0xF0 #define FLX_ROMSTAT_EE_512x8 0x0 #define FLX_ROMSTAT_EE_1MBx8 0x1 #define FLX_ROMSTAT_EE_2MBx8 0x2 #define FLX_ROMSTAT_EE_4MBx8 0x3 #define FLX_ROMSTAT_EE_16MBx8 0x4 #define CURSENSE_ENB 0x1 #define FLXADDR_FLEXSTAT 0x2 #define FLX_FSTAT_BUSY 0x1 #define FLXADDR_CURRENT_STAT 0x4 #define FLX_CSTAT_SEC_HIGH 0xC0 #define FLX_CSTAT_SEC_LOW 0x30 #define FLX_CSTAT_PRI_HIGH 0x0C #define FLX_CSTAT_PRI_LOW 0x03 #define FLX_CSTAT_MASK 0x03 #define FLX_CSTAT_SHIFT 2 #define FLX_CSTAT_OKAY 0x0 #define FLX_CSTAT_OVER 0x1 #define FLX_CSTAT_UNDER 0x2 #define FLX_CSTAT_INVALID 0x3 int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bstream); int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count); int ahd_wait_seeprom(struct ahd_softc *ahd); int ahd_verify_vpd_cksum(struct vpd_config *vpd); int ahd_verify_cksum(struct seeprom_config *sc); int ahd_acquire_seeprom(struct ahd_softc *ahd); void ahd_release_seeprom(struct ahd_softc *ahd); /**************************** Message Buffer *********************************/ typedef enum { MSG_FLAG_NONE = 0x00, MSG_FLAG_EXPECT_PPR_BUSFREE = 0x01, MSG_FLAG_IU_REQ_CHANGED = 0x02, MSG_FLAG_EXPECT_IDE_BUSFREE = 0x04, MSG_FLAG_EXPECT_QASREJ_BUSFREE = 0x08, MSG_FLAG_PACKETIZED = 0x10 } ahd_msg_flags; typedef enum { MSG_TYPE_NONE = 0x00, MSG_TYPE_INITIATOR_MSGOUT = 0x01, MSG_TYPE_INITIATOR_MSGIN = 0x02, MSG_TYPE_TARGET_MSGOUT = 0x03, MSG_TYPE_TARGET_MSGIN = 0x04 } ahd_msg_type; typedef enum { MSGLOOP_IN_PROG, MSGLOOP_MSGCOMPLETE, MSGLOOP_TERMINATED } msg_loop_stat; /*********************** Software Configuration Structure *********************/ struct ahd_suspend_channel_state { uint8_t scsiseq; uint8_t sxfrctl0; uint8_t sxfrctl1; uint8_t simode0; uint8_t simode1; uint8_t seltimer; uint8_t seqctl; }; struct ahd_suspend_state { struct ahd_suspend_channel_state channel[2]; uint8_t optionmode; uint8_t dscommand0; uint8_t dspcistatus; /* hsmailbox */ uint8_t crccontrol1; uint8_t scbbaddr; /* Host and sequencer SCB counts */ uint8_t dff_thrsh; uint8_t *scratch_ram; uint8_t *btt; }; typedef void (*ahd_bus_intr_t)(struct ahd_softc *); typedef enum { AHD_MODE_DFF0, AHD_MODE_DFF1, AHD_MODE_CCHAN, AHD_MODE_SCSI, AHD_MODE_CFG, AHD_MODE_UNKNOWN } ahd_mode; #define AHD_MK_MSK(x) (0x01 << (x)) #define AHD_MODE_DFF0_MSK AHD_MK_MSK(AHD_MODE_DFF0) #define AHD_MODE_DFF1_MSK AHD_MK_MSK(AHD_MODE_DFF1) #define AHD_MODE_CCHAN_MSK AHD_MK_MSK(AHD_MODE_CCHAN) #define AHD_MODE_SCSI_MSK AHD_MK_MSK(AHD_MODE_SCSI) #define AHD_MODE_CFG_MSK AHD_MK_MSK(AHD_MODE_CFG) #define AHD_MODE_UNKNOWN_MSK AHD_MK_MSK(AHD_MODE_UNKNOWN) #define AHD_MODE_ANY_MSK (~0) typedef uint8_t ahd_mode_state; typedef void ahd_callback_t (void *); +struct ahd_completion +{ + uint16_t tag; + uint8_t sg_status; + uint8_t pad[4]; + uint8_t valid_tag; +}; + struct ahd_softc { bus_space_tag_t tags[2]; bus_space_handle_t bshs[2]; #ifndef __linux__ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ #endif struct scb_data scb_data; struct hardware_scb *next_queued_hscb; + struct map_node *next_queued_hscb_map; /* * SCBs that have been sent to the controller */ LIST_HEAD(, scb) pending_scbs; /* + * SCBs whose timeout routine has been called. + */ + LIST_HEAD(, scb) timedout_scbs; + + /* * Current register window mode information. */ ahd_mode dst_mode; ahd_mode src_mode; /* * Saved register window mode information * used for restore on next unpause. */ ahd_mode saved_dst_mode; ahd_mode saved_src_mode; /* * Platform specific data. */ struct ahd_platform_data *platform_data; /* * Platform specific device information. */ - ahd_dev_softc_t dev_softc; + aic_dev_softc_t dev_softc; /* * Bus specific device information. */ ahd_bus_intr_t bus_intr; /* * Target mode related state kept on a per enabled lun basis. * Targets that are not enabled will have null entries. * As an initiator, we keep one target entry for our initiator * ID to store our sync/wide transfer settings. */ struct ahd_tmode_tstate *enabled_targets[AHD_NUM_TARGETS]; /* * The black hole device responsible for handling requests for * disabled luns on enabled targets. */ struct ahd_tmode_lstate *black_hole; /* * Device instance currently on the bus awaiting a continue TIO * for a command that was not given the disconnect priveledge. */ struct ahd_tmode_lstate *pending_device; /* * Timer handles for timer driven callbacks. */ - ahd_timer_t reset_timer; - ahd_timer_t stat_timer; + aic_timer_t reset_timer; + aic_timer_t stat_timer; /* * Statistics. */ #define AHD_STAT_UPDATE_US 250000 /* 250ms */ #define AHD_STAT_BUCKETS 4 u_int cmdcmplt_bucket; uint32_t cmdcmplt_counts[AHD_STAT_BUCKETS]; uint32_t cmdcmplt_total; /* * Card characteristics */ ahd_chip chip; ahd_feature features; ahd_bug bugs; ahd_flag flags; struct seeprom_config *seep_config; - /* Values to store in the SEQCTL register for pause and unpause */ - uint8_t unpause; - uint8_t pause; - /* Command Queues */ + struct ahd_completion *qoutfifo; uint16_t qoutfifonext; uint16_t qoutfifonext_valid_tag; uint16_t qinfifonext; uint16_t qinfifo[AHD_SCB_MAX]; - uint16_t *qoutfifo; + /* + * Our qfreeze count. The sequencer compares + * this value with its own counter to determine + * whether to allow selections to occur. + */ + uint16_t qfreeze_cnt; + + /* Values to store in the SEQCTL register for pause and unpause */ + uint8_t unpause; + uint8_t pause; + /* Critical Section Data */ struct cs *critical_sections; u_int num_critical_sections; /* Buffer for handling packetized bitbucket. */ uint8_t *overrun_buf; /* Links for chaining softcs */ TAILQ_ENTRY(ahd_softc) links; /* Channel Names ('A', 'B', etc.) */ char channel; /* Initiator Bus ID */ uint8_t our_id; /* * Target incoming command FIFO. */ struct target_cmd *targetcmds; uint8_t tqinfifonext; /* * Cached verson of the hs_mailbox so we can avoid * pausing the sequencer during mailbox updates. */ uint8_t hs_mailbox; /* * Incoming and outgoing message handling. */ uint8_t send_msg_perror; ahd_msg_flags msg_flags; ahd_msg_type msg_type; uint8_t msgout_buf[12];/* Message we are sending */ uint8_t msgin_buf[12];/* Message we are receiving */ u_int msgout_len; /* Length of message to send */ u_int msgout_index; /* Current index in msgout */ u_int msgin_index; /* Current index in msgin */ /* * Mapping information for data structures shared * between the sequencer and kernel. */ bus_dma_tag_t parent_dmat; bus_dma_tag_t shared_data_dmat; - bus_dmamap_t shared_data_dmamap; - bus_addr_t shared_data_busaddr; + struct map_node shared_data_map; /* Information saved through suspend/resume cycles */ struct ahd_suspend_state suspend_state; /* Number of enabled target mode device on this card */ u_int enabled_luns; /* Initialization level of this data structure */ u_int init_level; /* PCI cacheline size. */ u_int pci_cachesize; /* IO Cell Parameters */ uint8_t iocell_opts[AHD_NUM_PER_DEV_ANNEXCOLS]; u_int stack_size; uint16_t *saved_stack; /* Per-Unit descriptive information */ const char *description; const char *bus_description; char *name; int unit; /* Selection Timer settings */ int seltime; /* * Interrupt coalescing settings. */ #define AHD_INT_COALESCING_TIMER_DEFAULT 250 /*us*/ #define AHD_INT_COALESCING_MAXCMDS_DEFAULT 10 #define AHD_INT_COALESCING_MAXCMDS_MAX 127 #define AHD_INT_COALESCING_MINCMDS_DEFAULT 5 #define AHD_INT_COALESCING_MINCMDS_MAX 127 #define AHD_INT_COALESCING_THRESHOLD_DEFAULT 2000 #define AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT 1000 u_int int_coalescing_timer; u_int int_coalescing_maxcmds; u_int int_coalescing_mincmds; u_int int_coalescing_threshold; u_int int_coalescing_stop_threshold; uint16_t user_discenable;/* Disconnection allowed */ uint16_t user_tagenable;/* Tagged Queuing allowed */ }; TAILQ_HEAD(ahd_softc_tailq, ahd_softc); extern struct ahd_softc_tailq ahd_tailq; /*************************** IO Cell Configuration ****************************/ #define AHD_PRECOMP_SLEW_INDEX \ (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) #define AHD_AMPLITUDE_INDEX \ (AHD_ANNEXCOL_AMPLITUDE - AHD_ANNEXCOL_PER_DEV0) #define AHD_SET_SLEWRATE(ahd, new_slew) \ do { \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ (((new_slew) << AHD_SLEWRATE_SHIFT) & AHD_SLEWRATE_MASK); \ } while (0) #define AHD_SET_PRECOMP(ahd, new_pcomp) \ do { \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; \ (ahd)->iocell_opts[AHD_PRECOMP_SLEW_INDEX] |= \ (((new_pcomp) << AHD_PRECOMP_SHIFT) & AHD_PRECOMP_MASK); \ } while (0) #define AHD_SET_AMPLITUDE(ahd, new_amp) \ do { \ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] &= ~AHD_AMPLITUDE_MASK; \ (ahd)->iocell_opts[AHD_AMPLITUDE_INDEX] |= \ (((new_amp) << AHD_AMPLITUDE_SHIFT) & AHD_AMPLITUDE_MASK); \ } while (0) /************************ Active Device Information ***************************/ typedef enum { ROLE_UNKNOWN, ROLE_INITIATOR, ROLE_TARGET } role_t; struct ahd_devinfo { int our_scsiid; int target_offset; uint16_t target_mask; u_int target; u_int lun; char channel; role_t role; /* * Only guaranteed to be correct if not * in the busfree state. */ }; /****************************** PCI Structures ********************************/ -#define AHD_PCI_IOADDR0 PCIR_MAPS /* I/O BAR*/ -#define AHD_PCI_MEMADDR (PCIR_MAPS + 4) /* Memory BAR */ -#define AHD_PCI_IOADDR1 (PCIR_MAPS + 12)/* Second I/O BAR */ +#define AHD_PCI_IOADDR0 PCIR_BAR(0) /* I/O BAR*/ +#define AHD_PCI_MEMADDR PCIR_BAR(1) /* Memory BAR */ +#define AHD_PCI_IOADDR1 PCIR_BAR(3) /* Second I/O BAR */ typedef int (ahd_device_setup_t)(struct ahd_softc *); struct ahd_pci_identity { uint64_t full_id; uint64_t id_mask; char *name; ahd_device_setup_t *setup; }; extern struct ahd_pci_identity ahd_pci_ident_table []; extern const u_int ahd_num_pci_devs; /***************************** VL/EISA Declarations ***************************/ struct aic7770_identity { uint32_t full_id; uint32_t id_mask; char *name; ahd_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table []; extern const int ahd_num_aic7770_devs; #define AHD_EISA_SLOT_OFFSET 0xc00 #define AHD_EISA_IOSIZE 0x100 /*************************** Function Declarations ****************************/ /******************************************************************************/ void ahd_reset_cmds_pending(struct ahd_softc *ahd); u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int busyid); static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl); static __inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /***************************** PCI Front End *********************************/ -struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t); +struct ahd_pci_identity *ahd_find_pci_device(aic_dev_softc_t); int ahd_pci_config(struct ahd_softc *, struct ahd_pci_identity *); int ahd_pci_test_register_access(struct ahd_softc *); /************************** SCB and SCB queue management **********************/ int ahd_probe_scbs(struct ahd_softc *); void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb); int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); /****************************** Initialization ********************************/ struct ahd_softc *ahd_alloc(void *platform_arg, char *name); int ahd_softc_init(struct ahd_softc *); void ahd_controller_info(struct ahd_softc *ahd, char *buf); int ahd_init(struct ahd_softc *ahd); int ahd_default_config(struct ahd_softc *ahd); int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd); int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc); void ahd_intr_enable(struct ahd_softc *ahd, int enable); void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); void ahd_pause_and_flushwork(struct ahd_softc *ahd); int ahd_suspend(struct ahd_softc *ahd); int ahd_resume(struct ahd_softc *ahd); void ahd_softc_insert(struct ahd_softc *); struct ahd_softc *ahd_find_softc(struct ahd_softc *ahd); void ahd_set_unit(struct ahd_softc *, int); void ahd_set_name(struct ahd_softc *, char *); struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb); void ahd_alloc_scbs(struct ahd_softc *ahd); void ahd_free(struct ahd_softc *ahd); int ahd_reset(struct ahd_softc *ahd, int reinit); void ahd_shutdown(void *arg); int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value); int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value); int ahd_wait_flexport(struct ahd_softc *ahd); /*************************** Interrupt Services *******************************/ void ahd_pci_intr(struct ahd_softc *ahd); void ahd_clear_intstat(struct ahd_softc *ahd); void ahd_flush_qoutfifo(struct ahd_softc *ahd); void ahd_run_qoutfifo(struct ahd_softc *ahd); #ifdef AHD_TARGET_MODE void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); #endif void ahd_handle_hwerrint(struct ahd_softc *ahd); void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat); void ahd_clear_critical_section(struct ahd_softc *ahd); /***************************** Error Recovery *********************************/ typedef enum { SEARCH_COMPLETE, SEARCH_COUNT, SEARCH_REMOVE, SEARCH_PRINT } ahd_search_action; int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action); int ahd_search_disc_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state); void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset); int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); void ahd_restart(struct ahd_softc *ahd); void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo); void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb); void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); +void ahd_timeout(struct scb *scb); +void ahd_recover_commands(struct ahd_softc *ahd); /*************************** Utility Functions ********************************/ struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role); /************************** Transfer Negotiation ******************************/ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync); void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role); void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role); /* * Negotiation types. These are used to qualify if we should renegotiate * even if our goal and current transport parameters are identical. */ typedef enum { AHD_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ AHD_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ AHD_NEG_ALWAYS /* Renegotiat even if goal is async. */ } ahd_neg_type; int ahd_update_neg_request(struct ahd_softc*, struct ahd_devinfo*, struct ahd_tmode_tstate*, struct ahd_initiator_tinfo*, ahd_neg_type); void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused); void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused); typedef enum { AHD_QUEUE_NONE, AHD_QUEUE_BASIC, AHD_QUEUE_TAGGED } ahd_queue_alg; void ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, ahd_queue_alg alg); /**************************** Target Mode *************************************/ #ifdef AHD_TARGET_MODE void ahd_send_lstate_events(struct ahd_softc *, struct ahd_tmode_lstate *); void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb); cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure); #ifndef AHD_TMODE_ENABLE #define AHD_TMODE_ENABLE 0 #endif #endif /******************************* Debug ***************************************/ #ifdef AHD_DEBUG extern uint32_t ahd_debug; #define AHD_SHOW_MISC 0x00001 #define AHD_SHOW_SENSE 0x00002 #define AHD_SHOW_RECOVERY 0x00004 #define AHD_DUMP_SEEPROM 0x00008 #define AHD_SHOW_TERMCTL 0x00010 #define AHD_SHOW_MEMORY 0x00020 #define AHD_SHOW_MESSAGES 0x00040 #define AHD_SHOW_MODEPTR 0x00080 #define AHD_SHOW_SELTO 0x00100 #define AHD_SHOW_FIFOS 0x00200 #define AHD_SHOW_QFULL 0x00400 #define AHD_SHOW_DV 0x00800 #define AHD_SHOW_MASKED_ERRORS 0x01000 #define AHD_SHOW_QUEUE 0x02000 #define AHD_SHOW_TQIN 0x04000 #define AHD_SHOW_SG 0x08000 #define AHD_SHOW_INT_COALESCING 0x10000 #define AHD_DEBUG_SEQUENCER 0x20000 #endif void ahd_print_scb(struct scb *scb); void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); void ahd_dump_sglist(struct scb *scb); void ahd_dump_all_cards_state(void); void ahd_dump_card_state(struct ahd_softc *ahd); int ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point); void ahd_dump_scbs(struct ahd_softc *ahd); #endif /* _AIC79XX_H_ */ Index: stable/4/sys/dev/aic7xxx/aic79xx.reg =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx.reg (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx.reg (revision 125849) @@ -1,3958 +1,3975 @@ /* * Aic79xx register and scratch ram definitions. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ -VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#70 $" +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#75 $" /* * This file is processed by the aic7xxx_asm utility for use in assembling * firmware for the aic79xx family of SCSI host adapters as well as to generate * a C header file for use in the kernel portion of the Aic79xx driver. */ /* Register window Modes */ #define M_DFF0 0 #define M_DFF1 1 #define M_CCHAN 2 #define M_SCSI 3 #define M_CFG 4 #define M_DST_SHIFT 4 #define MK_MODE(src, dst) ((src) | ((dst) << M_DST_SHIFT)) #define SET_MODE(src, dst) \ SET_SRC_MODE src; \ SET_DST_MODE dst; \ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ mvi MK_MODE(src, dst) call set_mode_work_around; \ } else { \ mvi MODE_PTR, MK_MODE(src, dst); \ } -#define TOGGLE_DFF_MODE \ - if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ - call toggle_dff_mode_work_around; \ - } else { \ - xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); \ - } - #define RESTORE_MODE(mode) \ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { \ mov mode call set_mode_work_around; \ } else { \ mov MODE_PTR, mode; \ } #define SET_SEQINTCODE(code) \ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { \ mvi code call set_seqint_work_around; \ } else { \ mvi SEQINTCODE, code; \ } /* * Mode Pointer * Controls which of the 5, 512byte, address spaces should be used * as the source and destination of any register accesses in our * register window. */ register MODE_PTR { address 0x000 access_mode RW field DST_MODE 0x70 field SRC_MODE 0x07 mode_pointer } const SRC_MODE_SHIFT 0 const DST_MODE_SHIFT 4 /* * Host Interrupt Status */ register INTSTAT { address 0x001 access_mode RW field HWERRINT 0x80 field BRKADRINT 0x40 field SWTMINT 0x20 field PCIINT 0x10 field SCSIINT 0x08 field SEQINT 0x04 field CMDCMPLT 0x02 field SPLTINT 0x01 mask INT_PEND 0xFF } /* * Sequencer Interrupt Code */ register SEQINTCODE { address 0x002 access_mode RW field { NO_SEQINT, /* No seqint pending. */ BAD_PHASE, /* unknown scsi bus phase */ SEND_REJECT, /* sending a message reject */ PROTO_VIOLATION, /* Protocol Violation */ NO_MATCH, /* no cmd match for reconnect */ IGN_WIDE_RES, /* Complex IGN Wide Res Msg */ PDATA_REINIT, /* * Returned to data phase * that requires data * transfer pointers to be * recalculated from the * transfer residual. */ HOST_MSG_LOOP, /* * The bus is ready for the * host to perform another * message transaction. This * mechanism is used for things * like sync/wide negotiation * that require a kernel based * message state engine. */ BAD_STATUS, /* Bad status from target */ DATA_OVERRUN, /* * Target attempted to write * beyond the bounds of its * command. */ MKMSG_FAILED, /* * Target completed command * without honoring our ATN * request to issue a message. */ MISSED_BUSFREE, /* * The sequencer never saw * the bus go free after * either a command complete * or disconnect message. */ DUMP_CARD_STATE, ILLEGAL_PHASE, INVALID_SEQINT, CFG4ISTAT_INTR, STATUS_OVERRUN, CFG4OVERRUN, ENTERING_NONPACK, TASKMGMT_FUNC_COMPLETE, /* * Task management function * request completed with * an expected busfree. */ TASKMGMT_CMD_CMPLT_OKAY, /* * A command with a non-zero * task management function * has completed via the normal * command completion method * for commands with a zero * task management function. * This happens when an attempt * to abort a command loses * the race for the command to * complete normally. */ TRACEPOINT0, TRACEPOINT1, TRACEPOINT2, TRACEPOINT3, SAW_HWERR, BAD_SCB_STATUS } } /* * Clear Host Interrupt */ register CLRINT { address 0x003 access_mode WO field CLRHWERRINT 0x80 /* Rev B or greater */ field CLRBRKADRINT 0x40 field CLRSWTMINT 0x20 field CLRPCIINT 0x10 field CLRSCSIINT 0x08 field CLRSEQINT 0x04 field CLRCMDINT 0x02 field CLRSPLTINT 0x01 } /* * Error Register */ register ERROR { address 0x004 access_mode RO field CIOPARERR 0x80 field CIOACCESFAIL 0x40 /* Rev B or greater */ field MPARERR 0x20 field DPARERR 0x10 field SQPARERR 0x08 field ILLOPCODE 0x04 field DSCTMOUT 0x02 } /* * Clear Error */ register CLRERR { address 0x004 access_mode WO field CLRCIOPARERR 0x80 field CLRCIOACCESFAIL 0x40 /* Rev B or greater */ field CLRMPARERR 0x20 field CLRDPARERR 0x10 field CLRSQPARERR 0x08 field CLRILLOPCODE 0x04 field CLRDSCTMOUT 0x02 } /* * Host Control Register * Overall host control of the device. */ register HCNTRL { address 0x005 access_mode RW field SEQ_RESET 0x80 /* Rev B or greater */ field POWRDN 0x40 field SWINT 0x10 field SWTIMER_START_B 0x08 /* Rev B or greater */ field PAUSE 0x04 field INTEN 0x02 field CHIPRST 0x01 field CHIPRSTACK 0x01 } /* * Host New SCB Queue Offset */ register HNSCB_QOFF { address 0x006 access_mode RW size 2 } /* * Host Empty SCB Queue Offset */ register HESCB_QOFF { address 0x008 access_mode RW } /* * Host Mailbox */ register HS_MAILBOX { address 0x00B access_mode RW mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ mask ENINT_COALESCE 0x40 /* Perform interrupt coalescing */ } /* * Sequencer Interupt Status */ register SEQINTSTAT { address 0x00C access_mode RO field SEQ_SWTMRTO 0x10 field SEQ_SEQINT 0x08 field SEQ_SCSIINT 0x04 field SEQ_PCIINT 0x02 field SEQ_SPLTINT 0x01 } /* * Clear SEQ Interrupt */ register CLRSEQINTSTAT { address 0x00C access_mode WO field CLRSEQ_SWTMRTO 0x10 field CLRSEQ_SEQINT 0x08 field CLRSEQ_SCSIINT 0x04 field CLRSEQ_PCIINT 0x02 field CLRSEQ_SPLTINT 0x01 } /* * Software Timer */ register SWTIMER { address 0x00E access_mode RW size 2 } /* * SEQ New SCB Queue Offset */ register SNSCB_QOFF { address 0x010 access_mode RW size 2 modes M_CCHAN } /* * SEQ Empty SCB Queue Offset */ register SESCB_QOFF { address 0x012 access_mode RW modes M_CCHAN } /* * SEQ Done SCB Queue Offset */ register SDSCB_QOFF { address 0x014 access_mode RW modes M_CCHAN size 2 } /* * Queue Offset Control & Status */ register QOFF_CTLSTA { address 0x016 access_mode RW modes M_CCHAN field EMPTY_SCB_AVAIL 0x80 field NEW_SCB_AVAIL 0x40 field SDSCB_ROLLOVR 0x20 field HS_MAILBOX_ACT 0x10 field SCB_QSIZE 0x0F { SCB_QSIZE_4, SCB_QSIZE_8, SCB_QSIZE_16, SCB_QSIZE_32, SCB_QSIZE_64, SCB_QSIZE_128, SCB_QSIZE_256, SCB_QSIZE_512, SCB_QSIZE_1024, SCB_QSIZE_2048, SCB_QSIZE_4096, SCB_QSIZE_8192, SCB_QSIZE_16384 } } /* * Interrupt Control */ register INTCTL { address 0x018 access_mode RW field SWTMINTMASK 0x80 field SWTMINTEN 0x40 field SWTIMER_START 0x20 field AUTOCLRCMDINT 0x10 field PCIINTEN 0x08 field SCSIINTEN 0x04 field SEQINTEN 0x02 field SPLTINTEN 0x01 } /* * Data FIFO Control */ register DFCNTRL { address 0x019 access_mode RW modes M_DFF0, M_DFF1 field PRELOADEN 0x80 field SCSIENWRDIS 0x40 /* Rev B only. */ field SCSIEN 0x20 field SCSIENACK 0x20 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 field DIRECTIONACK 0x04 field FIFOFLUSH 0x02 field FIFOFLUSHACK 0x02 field DIRECTIONEN 0x01 } /* * Device Space Command 0 */ register DSCOMMAND0 { address 0x019 access_mode RW modes M_CFG field CACHETHEN 0x80 /* Cache Threshold enable */ field DPARCKEN 0x40 /* Data Parity Check Enable */ field MPARCKEN 0x20 /* Memory Parity Check Enable */ field EXTREQLCK 0x10 /* External Request Lock */ field DISABLE_TWATE 0x02 /* Rev B or greater */ field CIOPARCKEN 0x01 /* Internal bus parity error enable */ } /* * Data FIFO Status */ register DFSTATUS { address 0x01A access_mode RO modes M_DFF0, M_DFF1 field PRELOAD_AVAIL 0x80 field PKT_PRELOAD_AVAIL 0x40 field MREQPEND 0x10 field HDONE 0x08 field DFTHRESH 0x04 field FIFOFULL 0x02 field FIFOEMP 0x01 } /* * S/G Cache Pointer */ register SG_CACHE_PRE { address 0x01B access_mode WO modes M_DFF0, M_DFF1 field SG_ADDR_MASK 0xf8 field ODD_SEG 0x04 field LAST_SEG 0x02 } register SG_CACHE_SHADOW { address 0x01B access_mode RO modes M_DFF0, M_DFF1 field SG_ADDR_MASK 0xf8 field ODD_SEG 0x04 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } /* * Arbiter Control */ register ARBCTL { address 0x01B access_mode RW modes M_CFG field RESET_HARB 0x80 field RETRY_SWEN 0x08 field USE_TIME 0x07 } /* * Data Channel Host Address */ register HADDR { address 0x070 access_mode RW size 8 modes M_DFF0, M_DFF1 } /* * Host Overlay DMA Address */ register HODMAADR { address 0x070 access_mode RW size 8 modes M_SCSI } /* * PCI PLL Delay. */ register PLLDELAY { address 0x070 access_mode RW size 1 modes M_CFG field SPLIT_DROP_REQ 0x80 } /* * Data Channel Host Count */ register HCNT { address 0x078 access_mode RW size 3 modes M_DFF0, M_DFF1 } /* * Host Overlay DMA Count */ register HODMACNT { address 0x078 access_mode RW size 2 modes M_SCSI } /* * Host Overlay DMA Enable */ register HODMAEN { address 0x07A access_mode RW modes M_SCSI } /* * Scatter/Gather Host Address */ register SGHADDR { address 0x07C access_mode RW size 8 modes M_DFF0, M_DFF1 } /* * SCB Host Address */ register SCBHADDR { address 0x07C access_mode RW size 8 modes M_CCHAN } /* * Scatter/Gather Host Count */ register SGHCNT { address 0x084 access_mode RW modes M_DFF0, M_DFF1 } /* * SCB Host Count */ register SCBHCNT { address 0x084 access_mode RW modes M_CCHAN } /* * Data FIFO Threshold */ register DFF_THRSH { address 0x088 access_mode RW modes M_CFG field WR_DFTHRSH 0x70 { WR_DFTHRSH_MIN, WR_DFTHRSH_25, WR_DFTHRSH_50, WR_DFTHRSH_63, WR_DFTHRSH_75, WR_DFTHRSH_85, WR_DFTHRSH_90, WR_DFTHRSH_MAX } field RD_DFTHRSH 0x07 { RD_DFTHRSH_MIN, RD_DFTHRSH_25, RD_DFTHRSH_50, RD_DFTHRSH_63, RD_DFTHRSH_75, RD_DFTHRSH_85, RD_DFTHRSH_90, RD_DFTHRSH_MAX } } /* * ROM Address */ register ROMADDR { address 0x08A access_mode RW size 3 } /* * ROM Control */ register ROMCNTRL { address 0x08D access_mode RW field ROMOP 0xE0 field ROMSPD 0x18 field REPEAT 0x02 field RDY 0x01 } /* * ROM Data */ register ROMDATA { address 0x08E access_mode RW } /* * Data Channel Receive Message 0 */ register DCHRXMSG0 { address 0x090 access_mode RO modes M_DFF0, M_DFF1 field CDNUM 0xF8 field CFNUM 0x07 } /* * CMC Recieve Message 0 */ register CMCRXMSG0 { address 0x090 access_mode RO modes M_CCHAN field CDNUM 0xF8 field CFNUM 0x07 } /* * Overlay Recieve Message 0 */ register OVLYRXMSG0 { address 0x090 access_mode RO modes M_SCSI field CDNUM 0xF8 field CFNUM 0x07 } /* * Relaxed Order Enable */ register ROENABLE { address 0x090 access_mode RW modes M_CFG field MSIROEN 0x20 field OVLYROEN 0x10 field CMCROEN 0x08 field SGROEN 0x04 field DCH1ROEN 0x02 field DCH0ROEN 0x01 } /* * Data Channel Receive Message 1 */ register DCHRXMSG1 { address 0x091 access_mode RO modes M_DFF0, M_DFF1 field CBNUM 0xFF } /* * CMC Recieve Message 1 */ register CMCRXMSG1 { address 0x091 access_mode RO modes M_CCHAN field CBNUM 0xFF } /* * Overlay Recieve Message 1 */ register OVLYRXMSG1 { address 0x091 access_mode RO modes M_SCSI field CBNUM 0xFF } /* * No Snoop Enable */ register NSENABLE { address 0x091 access_mode RW modes M_CFG field MSINSEN 0x20 field OVLYNSEN 0x10 field CMCNSEN 0x08 field SGNSEN 0x04 field DCH1NSEN 0x02 field DCH0NSEN 0x01 } /* * Data Channel Receive Message 2 */ register DCHRXMSG2 { address 0x092 access_mode RO modes M_DFF0, M_DFF1 field MINDEX 0xFF } /* * CMC Recieve Message 2 */ register CMCRXMSG2 { address 0x092 access_mode RO modes M_CCHAN field MINDEX 0xFF } /* * Overlay Recieve Message 2 */ register OVLYRXMSG2 { address 0x092 access_mode RO modes M_SCSI field MINDEX 0xFF } /* * Outstanding Split Transactions */ register OST { address 0x092 access_mode RW modes M_CFG } /* * Data Channel Receive Message 3 */ register DCHRXMSG3 { address 0x093 access_mode RO modes M_DFF0, M_DFF1 field MCLASS 0x0F } /* * CMC Recieve Message 3 */ register CMCRXMSG3 { address 0x093 access_mode RO modes M_CCHAN field MCLASS 0x0F } /* * Overlay Recieve Message 3 */ register OVLYRXMSG3 { address 0x093 access_mode RO modes M_SCSI field MCLASS 0x0F } /* * PCI-X Control */ register PCIXCTL { address 0x093 access_mode RW modes M_CFG field SERRPULSE 0x80 field UNEXPSCIEN 0x20 field SPLTSMADIS 0x10 field SPLTSTADIS 0x08 field SRSPDPEEN 0x04 field TSCSERREN 0x02 field CMPABCDIS 0x01 } /* * CMC Sequencer Byte Count */ register CMCSEQBCNT { address 0x094 access_mode RO modes M_CCHAN } /* * Overlay Sequencer Byte Count */ register OVLYSEQBCNT { address 0x094 access_mode RO modes M_SCSI } /* * Data Channel Sequencer Byte Count */ register DCHSEQBCNT { address 0x094 access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Data Channel Split Status 0 */ register DCHSPLTSTAT0 { address 0x096 access_mode RW modes M_DFF0, M_DFF1 field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * CMC Split Status 0 */ register CMCSPLTSTAT0 { address 0x096 access_mode RW modes M_CCHAN field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * Overlay Split Status 0 */ register OVLYSPLTSTAT0 { address 0x096 access_mode RW modes M_SCSI field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * Data Channel Split Status 1 */ register DCHSPLTSTAT1 { address 0x097 access_mode RW modes M_DFF0, M_DFF1 field RXDATABUCKET 0x01 } /* * CMC Split Status 1 */ register CMCSPLTSTAT1 { address 0x097 access_mode RW modes M_CCHAN field RXDATABUCKET 0x01 } /* * Overlay Split Status 1 */ register OVLYSPLTSTAT1 { address 0x097 access_mode RW modes M_SCSI field RXDATABUCKET 0x01 } /* * S/G Receive Message 0 */ register SGRXMSG0 { address 0x098 access_mode RO modes M_DFF0, M_DFF1 field CDNUM 0xF8 field CFNUM 0x07 } /* * S/G Receive Message 1 */ register SGRXMSG1 { address 0x099 access_mode RO modes M_DFF0, M_DFF1 field CBNUM 0xFF } /* * S/G Receive Message 2 */ register SGRXMSG2 { address 0x09A access_mode RO modes M_DFF0, M_DFF1 field MINDEX 0xFF } /* * S/G Receive Message 3 */ register SGRXMSG3 { address 0x09B access_mode RO modes M_DFF0, M_DFF1 field MCLASS 0x0F } /* * Slave Split Out Address 0 */ register SLVSPLTOUTADR0 { address 0x098 access_mode RO modes M_SCSI field LOWER_ADDR 0x7F } /* * Slave Split Out Address 1 */ register SLVSPLTOUTADR1 { address 0x099 access_mode RO modes M_SCSI field REQ_DNUM 0xF8 field REQ_FNUM 0x07 } /* * Slave Split Out Address 2 */ register SLVSPLTOUTADR2 { address 0x09A access_mode RO modes M_SCSI field REQ_BNUM 0xFF } /* * Slave Split Out Address 3 */ register SLVSPLTOUTADR3 { address 0x09B access_mode RO modes M_SCSI field RLXORD 020 field TAG_NUM 0x1F } /* * SG Sequencer Byte Count */ register SGSEQBCNT { address 0x09C access_mode RO modes M_DFF0, M_DFF1 } /* * Slave Split Out Attribute 0 */ register SLVSPLTOUTATTR0 { address 0x09C access_mode RO modes M_SCSI field LOWER_BCNT 0xFF } /* * Slave Split Out Attribute 1 */ register SLVSPLTOUTATTR1 { address 0x09D access_mode RO modes M_SCSI field CMPLT_DNUM 0xF8 field CMPLT_FNUM 0x07 } /* * Slave Split Out Attribute 2 */ register SLVSPLTOUTATTR2 { address 0x09E access_mode RO size 2 modes M_SCSI field CMPLT_BNUM 0xFF } /* * S/G Split Status 0 */ register SGSPLTSTAT0 { address 0x09E access_mode RW modes M_DFF0, M_DFF1 field STAETERM 0x80 field SCBCERR 0x40 field SCADERR 0x20 field SCDATBUCKET 0x10 field CNTNOTCMPLT 0x08 field RXOVRUN 0x04 field RXSCEMSG 0x02 field RXSPLTRSP 0x01 } /* * S/G Split Status 1 */ register SGSPLTSTAT1 { address 0x09F access_mode RW modes M_DFF0, M_DFF1 field RXDATABUCKET 0x01 } /* * Special Function */ register SFUNCT { address 0x09f access_mode RW modes M_CFG field TEST_GROUP 0xF0 field TEST_NUM 0x0F } /* * Data FIFO 0 PCI Status */ register DF0PCISTAT { address 0x0A0 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * Data FIFO 1 PCI Status */ register DF1PCISTAT { address 0x0A1 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * S/G PCI Status */ register SGPCISTAT { address 0x0A2 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field DPR 0x01 } /* * CMC PCI Status */ register CMCPCISTAT { address 0x0A3 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field TWATERR 0x02 field DPR 0x01 } /* * Overlay PCI Status */ register OVLYPCISTAT { address 0x0A4 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field RMA 0x20 field RTA 0x10 field SCAAPERR 0x08 field RDPERR 0x04 field DPR 0x01 } /* * PCI Status for MSI Master DMA Transfer */ register MSIPCISTAT { address 0x0A6 access_mode RW modes M_CFG field SSE 0x40 field RMA 0x20 field RTA 0x10 field CLRPENDMSI 0x08 field TWATERR 0x02 field DPR 0x01 } /* * PCI Status for Target */ register TARGPCISTAT { address 0x0A7 access_mode RW modes M_CFG field DPE 0x80 field SSE 0x40 field STA 0x08 field TWATERR 0x02 } /* * LQ Packet In * The last LQ Packet recieved */ register LQIN { address 0x020 access_mode RW size 20 modes M_DFF0, M_DFF1, M_SCSI } /* * SCB Type Pointer * SCB offset for Target Mode SCB type information */ register TYPEPTR { address 0x020 access_mode RW modes M_CFG } /* * Queue Tag Pointer * SCB offset to the Two Byte tag identifier used for target mode. */ register TAGPTR { address 0x021 access_mode RW modes M_CFG } /* * Logical Unit Number Pointer * SCB offset to the LSB (little endian) of the lun field. */ register LUNPTR { address 0x022 access_mode RW modes M_CFG } /* * Data Length Pointer * SCB offset for the 4 byte data length field in target mode. */ register DATALENPTR { address 0x023 access_mode RW modes M_CFG } /* * Status Length Pointer * SCB offset to the two byte status field in target SCBs. */ register STATLENPTR { address 0x024 access_mode RW modes M_CFG } /* * Command Length Pointer * Scb offset for the CDB length field in initiator SCBs. */ register CMDLENPTR { address 0x025 access_mode RW modes M_CFG } /* * Task Attribute Pointer * Scb offset for the byte field specifying the attribute byte * to be used in command packets. */ register ATTRPTR { address 0x026 access_mode RW modes M_CFG } /* * Task Management Flags Pointer * Scb offset for the byte field specifying the attribute flags * byte to be used in command packets. */ register FLAGPTR { address 0x027 access_mode RW modes M_CFG } /* * Command Pointer * Scb offset for the first byte in the CDB for initiator SCBs. */ register CMDPTR { address 0x028 access_mode RW modes M_CFG } /* * Queue Next Pointer * Scb offset for the 2 byte "next scb link". */ register QNEXTPTR { address 0x029 access_mode RW modes M_CFG } /* * SCSI ID Pointer * Scb offset to the value to place in the SCSIID register * during target mode connections. */ register IDPTR { address 0x02A access_mode RW modes M_CFG } /* * Command Aborted Byte Pointer * Offset to the SCB flags field that includes the * "SCB aborted" status bit. */ register ABRTBYTEPTR { address 0x02B access_mode RW modes M_CFG } /* * Command Aborted Bit Pointer * Bit offset in the SCB flags field for "SCB aborted" status. */ register ABRTBITPTR { address 0x02C access_mode RW modes M_CFG } /* * Rev B or greater. */ register MAXCMDBYTES { address 0x02D access_mode RW modes M_CFG } /* * Rev B or greater. */ register MAXCMD2RCV { address 0x02E access_mode RW modes M_CFG } /* * Rev B or greater. */ register SHORTTHRESH { address 0x02F access_mode RW modes M_CFG } /* * Logical Unit Number Length * The length, in bytes, of the SCB lun field. */ register LUNLEN { address 0x030 access_mode RW modes M_CFG mask ILUNLEN 0x0F mask TLUNLEN 0xF0 } const LUNLEN_SINGLE_LEVEL_LUN 0xF /* * CDB Limit * The size, in bytes, of the embedded CDB field in initator SCBs. */ register CDBLIMIT { address 0x031 access_mode RW modes M_CFG } /* * Maximum Commands * The maximum number of commands to issue during a * single packetized connection. */ register MAXCMD { address 0x032 access_mode RW modes M_CFG } /* * Maximum Command Counter * The number of commands already sent during this connection */ register MAXCMDCNT { address 0x033 access_mode RW modes M_CFG } /* * LQ Packet Reserved Bytes * The bytes to be sent in the currently reserved fileds * of all LQ packets. */ register LQRSVD01 { address 0x034 access_mode RW modes M_SCSI } register LQRSVD16 { address 0x035 access_mode RW modes M_SCSI } register LQRSVD17 { address 0x036 access_mode RW modes M_SCSI } /* * Command Reserved 0 * The byte to be sent for the reserved byte 0 of * outgoing command packets. */ register CMDRSVD0 { address 0x037 access_mode RW modes M_CFG } /* * LQ Manager Control 0 */ register LQCTL0 { address 0x038 access_mode RW modes M_CFG field LQITARGCLT 0xC0 field LQIINITGCLT 0x30 field LQ0TARGCLT 0x0C field LQ0INITGCLT 0x03 } /* * LQ Manager Control 1 */ register LQCTL1 { address 0x038 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field PCI2PCI 0x04 field SINGLECMD 0x02 field ABORTPENDING 0x01 } /* * LQ Manager Control 2 */ register LQCTL2 { address 0x039 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field LQIRETRY 0x80 field LQICONTINUE 0x40 field LQITOIDLE 0x20 field LQIPAUSE 0x10 field LQORETRY 0x08 field LQOCONTINUE 0x04 field LQOTOIDLE 0x02 field LQOPAUSE 0x01 } /* * SCSI RAM BIST0 */ register SCSBIST0 { address 0x039 access_mode RW modes M_CFG field GSBISTERR 0x40 field GSBISTDONE 0x20 field GSBISTRUN 0x10 field OSBISTERR 0x04 field OSBISTDONE 0x02 field OSBISTRUN 0x01 } /* * SCSI Sequence Control0 */ register SCSISEQ0 { address 0x03A access_mode RW modes M_DFF0, M_DFF1, M_SCSI field TEMODEO 0x80 field ENSELO 0x40 field ENARBO 0x20 field FORCEBUSFREE 0x10 field SCSIRSTO 0x01 } /* * SCSI RAM BIST 1 */ register SCSBIST1 { address 0x03A access_mode RW modes M_CFG field NTBISTERR 0x04 field NTBISTDONE 0x02 field NTBISTRUN 0x01 } /* * SCSI Sequence Control 1 */ register SCSISEQ1 { address 0x03B access_mode RW modes M_DFF0, M_DFF1, M_SCSI field MANUALCTL 0x40 field ENSELI 0x20 field ENRSELI 0x10 field MANUALP 0x0C field ENAUTOATNP 0x02 field ALTSTIM 0x01 } /* * SCSI Transfer Control 0 */ register SXFRCTL0 { address 0x03C access_mode RW modes M_SCSI field DFON 0x80 field DFPEXP 0x40 field BIOSCANCELEN 0x10 field SPIOEN 0x08 } /* * SCSI Transfer Control 1 */ register SXFRCTL1 { address 0x03D access_mode RW modes M_SCSI field BITBUCKET 0x80 field ENSACHK 0x40 field ENSPCHK 0x20 field STIMESEL 0x18 field ENSTIMER 0x04 field ACTNEGEN 0x02 field STPWEN 0x01 } /* * SCSI Transfer Control 2 */ register SXFRCTL2 { address 0x03E access_mode RW modes M_SCSI field AUTORSTDIS 0x10 field CMDDMAEN 0x08 field ASU 0x07 } /* * SCSI Bus Initiator IDs * Bitmask of observed initiators on the bus. */ register BUSINITID { address 0x03C access_mode RW modes M_CFG size 2 } /* * Data Length Counters * Packet byte counter. */ register DLCOUNT { address 0x03C access_mode RW modes M_DFF0, M_DFF1 size 3 } /* * Data FIFO Status */ register DFFSTAT { address 0x03F access_mode RW modes M_SCSI field FIFO1FREE 0x20 field FIFO0FREE 0x10 /* * On the B, this enum only works * in the read direction. For writes, * you must use the B version of the * CURRFIFO_0 definition which is defined * as a constant outside of this register * definition to avoid confusing the * register pretty printing code. */ enum CURRFIFO 0x03 { CURRFIFO_0, CURRFIFO_1, CURRFIFO_NONE 0x3 } } const B_CURRFIFO_0 0x2 /* * SCSI Bus Target IDs * Bitmask of observed targets on the bus. */ register BUSTARGID { address 0x03E access_mode RW modes M_CFG size 2 } /* * SCSI Control Signal Out */ register SCSISIGO { address 0x040 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field CDO 0x80 field IOO 0x40 field MSGO 0x20 field ATNO 0x10 field SELO 0x08 field BSYO 0x04 field REQO 0x02 field ACKO 0x01 /* * Possible phases to write into SCSISIG0 */ enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } register SCSISIGI { address 0x041 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field CDI 0x80 field IOI 0x40 field MSGI 0x20 field ATNI 0x10 field SELI 0x08 field BSYI 0x04 field REQI 0x02 field ACKI 0x01 /* * Possible phases in SCSISIGI */ enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } /* * Multiple Target IDs * Bitmask of ids to respond as a target. */ register MULTARGID { address 0x040 access_mode RW modes M_CFG size 2 } /* * SCSI Phase */ register SCSIPHASE { address 0x042 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field STATUS_PHASE 0x20 field COMMAND_PHASE 0x10 field MSG_IN_PHASE 0x08 field MSG_OUT_PHASE 0x04 field DATA_PHASE_MASK 0x03 { DATA_OUT_PHASE 0x01, DATA_IN_PHASE 0x02 } } /* * SCSI Data 0 Image */ register SCSIDAT0_IMG { address 0x043 access_mode RW modes M_DFF0, M_DFF1, M_SCSI } /* * SCSI Latched Data */ register SCSIDAT { address 0x044 access_mode RW modes M_DFF0, M_DFF1, M_SCSI size 2 } /* * SCSI Data Bus */ register SCSIBUS { address 0x046 access_mode RW modes M_DFF0, M_DFF1, M_SCSI size 2 } /* * Target ID In */ register TARGIDIN { address 0x048 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field CLKOUT 0x80 field TARGID 0x0F } /* * Selection/Reselection ID * Upper four bits are the device id. The ONEBIT is set when the re/selecting * device did not set its own ID. */ register SELID { address 0x049 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field SELID_MASK 0xf0 field ONEBIT 0x08 } /* * SCSI Block Control * Controls Bus type and channel selection. SELWIDE allows for the * coexistence of 8bit and 16bit devices on a wide bus. */ register SBLKCTL { address 0x04A access_mode RW modes M_DFF0, M_DFF1, M_SCSI field DIAGLEDEN 0x80 field DIAGLEDON 0x40 field ENAB40 0x08 /* LVD transceiver active */ field ENAB20 0x04 /* SE/HVD transceiver active */ field SELWIDE 0x02 } /* * Option Mode */ register OPTIONMODE { address 0x04A access_mode RW modes M_CFG field BIOSCANCTL 0x80 field AUTOACKEN 0x40 field BIASCANCTL 0x20 field BUSFREEREV 0x10 field ENDGFORMCHK 0x04 field AUTO_MSGOUT_DE 0x02 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE } /* * SCSI Status 0 */ register SSTAT0 { address 0x04B access_mode RO modes M_DFF0, M_DFF1, M_SCSI field TARGET 0x80 /* Board acting as target */ field SELDO 0x40 /* Selection Done */ field SELDI 0x20 /* Board has been selected */ field SELINGO 0x10 /* Selection In Progress */ field IOERR 0x08 /* LVD Tranceiver mode changed */ field OVERRUN 0x04 /* SCSI Offset overrun detected */ field SPIORDY 0x02 /* SCSI PIO Ready */ field ARBDO 0x01 /* Arbitration Done Out */ } /* * Clear SCSI Interrupt 0 * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. */ register CLRSINT0 { address 0x04B access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRSELDO 0x40 field CLRSELDI 0x20 field CLRSELINGO 0x10 field CLRIOERR 0x08 field CLROVERRUN 0x04 field CLRSPIORDY 0x02 field CLRARBDO 0x01 } /* * SCSI Interrupt Mode 0 * Setting any bit will enable the corresponding function * in SIMODE0 to interrupt via the IRQ pin. */ register SIMODE0 { address 0x04B access_mode RW modes M_CFG field ENSELDO 0x40 field ENSELDI 0x20 field ENSELINGO 0x10 field ENIOERR 0x08 field ENOVERRUN 0x04 field ENSPIORDY 0x02 field ENARBDO 0x01 } /* * SCSI Status 1 */ register SSTAT1 { address 0x04C access_mode RO modes M_DFF0, M_DFF1, M_SCSI field SELTO 0x80 field ATNTARG 0x40 field SCSIRSTI 0x20 field PHASEMIS 0x10 field BUSFREE 0x08 field SCSIPERR 0x04 field STRB2FAST 0x02 field REQINIT 0x01 } /* * Clear SCSI Interrupt 1 * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. */ register CLRSINT1 { address 0x04C access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRSELTIMEO 0x80 field CLRATNO 0x40 field CLRSCSIRSTI 0x20 field CLRBUSFREE 0x08 field CLRSCSIPERR 0x04 field CLRSTRB2FAST 0x02 field CLRREQINIT 0x01 } /* * SCSI Status 2 */ register SSTAT2 { address 0x04d access_mode RO modes M_DFF0, M_DFF1, M_SCSI field BUSFREETIME 0xc0 { BUSFREE_LQO 0x40, BUSFREE_DFF0 0x80, BUSFREE_DFF1 0xC0 } field NONPACKREQ 0x20 field EXP_ACTIVE 0x10 /* SCSI Expander Active */ field BSYX 0x08 /* Busy Expander */ field WIDE_RES 0x04 /* Modes 0 and 1 only */ field SDONE 0x02 /* Modes 0 and 1 only */ field DMADONE 0x01 /* Modes 0 and 1 only */ } /* * Clear SCSI Interrupt 2 */ register CLRSINT2 { address 0x04D access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRNONPACKREQ 0x20 field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */ field CLRSDONE 0x02 /* Modes 0 and 1 only */ field CLRDMADONE 0x01 /* Modes 0 and 1 only */ } /* * SCSI Interrupt Mode 2 */ register SIMODE2 { address 0x04D access_mode RW modes M_CFG field ENWIDE_RES 0x04 field ENSDONE 0x02 field ENDMADONE 0x01 } /* * Physical Error Diagnosis */ register PERRDIAG { address 0x04E access_mode RO modes M_DFF0, M_DFF1, M_SCSI field HIZERO 0x80 field HIPERR 0x40 field PREVPHASE 0x20 field PARITYERR 0x10 field AIPERR 0x08 field CRCERR 0x04 field DGFORMERR 0x02 field DTERR 0x01 } /* * LQI Manager Current State */ register LQISTATE { address 0x04E access_mode RO modes M_CFG } /* * SCSI Offset Count */ register SOFFCNT { address 0x04F access_mode RO modes M_DFF0, M_DFF1, M_SCSI } /* * LQO Manager Current State */ register LQOSTATE { address 0x04F access_mode RO modes M_CFG } /* * LQI Manager Status */ register LQISTAT0 { address 0x050 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQIATNQAS 0x20 field LQICRCT1 0x10 field LQICRCT2 0x08 field LQIBADLQT 0x04 field LQIATNLQ 0x02 field LQIATNCMD 0x01 } /* * Clear LQI Interrupts 0 */ register CLRLQIINT0 { address 0x050 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQIATNQAS 0x20 field CLRLQICRCT1 0x10 field CLRLQICRCT2 0x08 field CLRLQIBADLQT 0x04 field CLRLQIATNLQ 0x02 field CLRLQIATNCMD 0x01 } /* * LQI Manager Interrupt Mode 0 */ register LQIMODE0 { address 0x050 access_mode RW modes M_CFG field ENLQIATNQASK 0x20 field ENLQICRCT1 0x10 field ENLQICRCT2 0x08 field ENLQIBADLQT 0x04 field ENLQIATNLQ 0x02 field ENLQIATNCMD 0x01 } /* * LQI Manager Status 1 */ register LQISTAT1 { address 0x051 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQIPHASE_LQ 0x80 field LQIPHASE_NLQ 0x40 field LQIABORT 0x20 field LQICRCI_LQ 0x10 field LQICRCI_NLQ 0x08 field LQIBADLQI 0x04 field LQIOVERI_LQ 0x02 field LQIOVERI_NLQ 0x01 } /* * Clear LQI Manager Interrupts1 */ register CLRLQIINT1 { address 0x051 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQIPHASE_LQ 0x80 field CLRLQIPHASE_NLQ 0x40 field CLRLIQABORT 0x20 field CLRLQICRCI_LQ 0x10 field CLRLQICRCI_NLQ 0x08 field CLRLQIBADLQI 0x04 field CLRLQIOVERI_LQ 0x02 field CLRLQIOVERI_NLQ 0x01 } /* * LQI Manager Interrupt Mode 1 */ register LQIMODE1 { address 0x051 access_mode RW modes M_CFG field ENLQIPHASE_LQ 0x80 /* LQIPHASE1 */ field ENLQIPHASE_NLQ 0x40 /* LQIPHASE2 */ field ENLIQABORT 0x20 field ENLQICRCI_LQ 0x10 /* LQICRCI1 */ field ENLQICRCI_NLQ 0x08 /* LQICRCI2 */ field ENLQIBADLQI 0x04 field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */ field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */ } /* * LQI Manager Status 2 */ register LQISTAT2 { address 0x052 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field PACKETIZED 0x80 field LQIPHASE_OUTPKT 0x40 field LQIWORKONLQ 0x20 field LQIWAITFIFO 0x10 field LQISTOPPKT 0x08 field LQISTOPLQ 0x04 field LQISTOPCMD 0x02 field LQIGSAVAIL 0x01 } /* * SCSI Status 3 */ register SSTAT3 { address 0x053 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field NTRAMPERR 0x02 field OSRAMPERR 0x01 } /* * Clear SCSI Status 3 */ register CLRSINT3 { address 0x053 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRNTRAMPERR 0x02 field CLROSRAMPERR 0x01 } /* * SCSI Interrupt Mode 3 */ register SIMODE3 { address 0x053 access_mode RW modes M_CFG field ENNTRAMPERR 0x02 field ENOSRAMPERR 0x01 } /* * LQO Manager Status 0 */ register LQOSTAT0 { address 0x054 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOTARGSCBPERR 0x10 field LQOSTOPT2 0x08 field LQOATNLQ 0x04 field LQOATNPKT 0x02 field LQOTCRC 0x01 } /* * Clear LQO Manager interrupt 0 */ register CLRLQOINT0 { address 0x054 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQOTARGSCBPERR 0x10 field CLRLQOSTOPT2 0x08 field CLRLQOATNLQ 0x04 field CLRLQOATNPKT 0x02 field CLRLQOTCRC 0x01 } /* * LQO Manager Interrupt Mode 0 */ register LQOMODE0 { address 0x054 access_mode RW modes M_CFG field ENLQOTARGSCBPERR 0x10 field ENLQOSTOPT2 0x08 field ENLQOATNLQ 0x04 field ENLQOATNPKT 0x02 field ENLQOTCRC 0x01 } /* * LQO Manager Status 1 */ register LQOSTAT1 { address 0x055 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOINITSCBPERR 0x10 field LQOSTOPI2 0x08 field LQOBADQAS 0x04 field LQOBUSFREE 0x02 field LQOPHACHGINPKT 0x01 } /* * Clear LOQ Interrupt 1 */ register CLRLQOINT1 { address 0x055 access_mode WO modes M_DFF0, M_DFF1, M_SCSI field CLRLQOINITSCBPERR 0x10 field CLRLQOSTOPI2 0x08 field CLRLQOBADQAS 0x04 field CLRLQOBUSFREE 0x02 field CLRLQOPHACHGINPKT 0x01 } /* * LQO Manager Interrupt Mode 1 */ register LQOMODE1 { address 0x055 access_mode RW modes M_CFG field ENLQOINITSCBPERR 0x10 field ENLQOSTOPI2 0x08 field ENLQOBADQAS 0x04 field ENLQOBUSFREE 0x02 field ENLQOPHACHGINPKT 0x01 } /* * LQO Manager Status 2 */ register LQOSTAT2 { address 0x056 access_mode RO modes M_DFF0, M_DFF1, M_SCSI field LQOPKT 0xE0 field LQOWAITFIFO 0x10 field LQOPHACHGOUTPKT 0x02 /* outside of packet boundaries. */ field LQOSTOP0 0x01 /* Stopped after sending all packets */ } /* * Output Synchronizer Space Count */ register OS_SPACE_CNT { address 0x056 access_mode RO modes M_CFG } /* * SCSI Interrupt Mode 1 * Setting any bit will enable the corresponding function * in SIMODE1 to interrupt via the IRQ pin. */ register SIMODE1 { address 0x057 access_mode RW modes M_DFF0, M_DFF1, M_SCSI field ENSELTIMO 0x80 field ENATNTARG 0x40 field ENSCSIRST 0x20 field ENPHASEMIS 0x10 field ENBUSFREE 0x08 field ENSCSIPERR 0x04 field ENSTRB2FAST 0x02 field ENREQINIT 0x01 } /* * Good Status FIFO */ register GSFIFO { address 0x058 access_mode RO size 2 modes M_DFF0, M_DFF1, M_SCSI } /* * Data FIFO SCSI Transfer Control */ register DFFSXFRCTL { address 0x05A access_mode RW modes M_DFF0, M_DFF1 field DFFBITBUCKET 0x08 field CLRSHCNT 0x04 field CLRCHN 0x02 field RSTCHN 0x01 } /* * Next SCSI Control Block */ register NEXTSCB { address 0x05A access_mode RW size 2 modes M_SCSI } /* Rev B only. */ register LQOSCSCTL { address 0x05A access_mode RW size 1 modes M_CFG field LQOH2A_VERSION 0x80 field LQONOCHKOVER 0x01 } /* * SEQ Interrupts */ register SEQINTSRC { address 0x05B access_mode RO modes M_DFF0, M_DFF1 field CTXTDONE 0x40 field SAVEPTRS 0x20 field CFG4DATA 0x10 field CFG4ISTAT 0x08 field CFG4TSTAT 0x04 field CFG4ICMD 0x02 field CFG4TCMD 0x01 } /* * Clear Arp Interrupts */ register CLRSEQINTSRC { address 0x05B access_mode WO modes M_DFF0, M_DFF1 field CLRCTXTDONE 0x40 field CLRSAVEPTRS 0x20 field CLRCFG4DATA 0x10 field CLRCFG4ISTAT 0x08 field CLRCFG4TSTAT 0x04 field CLRCFG4ICMD 0x02 field CLRCFG4TCMD 0x01 } /* * SEQ Interrupt Enabled (Shared) */ register SEQIMODE { address 0x05C access_mode RW modes M_DFF0, M_DFF1 field ENCTXTDONE 0x40 field ENSAVEPTRS 0x20 field ENCFG4DATA 0x10 field ENCFG4ISTAT 0x08 field ENCFG4TSTAT 0x04 field ENCFG4ICMD 0x02 field ENCFG4TCMD 0x01 } /* * Current SCSI Control Block */ register CURRSCB { address 0x05C access_mode RW size 2 modes M_SCSI } /* * Data FIFO Status */ register MDFFSTAT { address 0x05D access_mode RO modes M_DFF0, M_DFF1 field SHCNTNEGATIVE 0x40 /* Rev B or higher */ field SHCNTMINUS1 0x20 /* Rev B or higher */ field LASTSDONE 0x10 field SHVALID 0x08 field DLZERO 0x04 /* FIFO data ends on packet boundary. */ field DATAINFIFO 0x02 field FIFOFREE 0x01 } /* * CRC Control */ register CRCCONTROL { address 0x05d access_mode RW modes M_CFG field CRCVALCHKEN 0x40 } /* * SCSI Test Control */ register SCSITEST { address 0x05E access_mode RW modes M_CFG field CNTRTEST 0x08 field SEL_TXPLL_DEBUG 0x04 } /* * Data FIFO Queue Tag */ register DFFTAG { address 0x05E access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * Last SCSI Control Block */ register LASTSCB { address 0x05E access_mode RW size 2 modes M_SCSI } /* * SCSI I/O Cell Power-down Control */ register IOPDNCTL { address 0x05F access_mode RW modes M_CFG field DISABLE_OE 0x80 field PDN_IDIST 0x04 field PDN_DIFFSENSE 0x01 } /* * Shaddow Host Address. */ register SHADDR { address 0x060 access_mode RO size 8 modes M_DFF0, M_DFF1 } /* * Data Group CRC Interval. */ register DGRPCRCI { address 0x060 access_mode RW size 2 modes M_CFG } /* * Data Transfer Negotiation Address */ register NEGOADDR { address 0x060 access_mode RW modes M_SCSI } /* * Data Transfer Negotiation Data - Period Byte */ register NEGPERIOD { address 0x061 access_mode RW modes M_SCSI } /* * Packetized CRC Interval */ register PACKCRCI { address 0x062 access_mode RW size 2 modes M_CFG } /* * Data Transfer Negotiation Data - Offset Byte */ register NEGOFFSET { address 0x062 access_mode RW modes M_SCSI } /* * Data Transfer Negotiation Data - PPR Options */ register NEGPPROPTS { address 0x063 access_mode RW modes M_SCSI field PPROPT_PACE 0x08 field PPROPT_QAS 0x04 field PPROPT_DT 0x02 field PPROPT_IUT 0x01 } /* * Data Transfer Negotiation Data - Connection Options */ register NEGCONOPTS { address 0x064 access_mode RW modes M_SCSI field ENSNAPSHOT 0x40 field RTI_WRTDIS 0x20 field RTI_OVRDTRN 0x10 field ENSLOWCRC 0x08 field ENAUTOATNI 0x04 field ENAUTOATNO 0x02 field WIDEXFER 0x01 } /* * Negotiation Table Annex Column Index. */ register ANNEXCOL { address 0x065 access_mode RW modes M_SCSI } register SCSCHKN { address 0x066 access_mode RW modes M_CFG field STSELSKIDDIS 0x40 field CURRFIFODEF 0x20 field WIDERESEN 0x10 field SDONEMSKDIS 0x08 field DFFACTCLR 0x04 field SHVALIDSTDIS 0x02 field LSTSGCLRDIS 0x01 } const AHD_ANNEXCOL_PER_DEV0 4 const AHD_NUM_PER_DEV_ANNEXCOLS 4 const AHD_ANNEXCOL_PRECOMP_SLEW 4 const AHD_PRECOMP_MASK 0x07 const AHD_PRECOMP_SHIFT 0 const AHD_PRECOMP_CUTBACK_17 0x04 const AHD_PRECOMP_CUTBACK_29 0x06 const AHD_PRECOMP_CUTBACK_37 0x07 const AHD_SLEWRATE_MASK 0x78 const AHD_SLEWRATE_SHIFT 3 /* * Rev A has only a single bit (high bit of field) of slew adjustment. * Rev B has 4 bits. The current default happens to be the same for both. */ const AHD_SLEWRATE_DEF_REVA 0x08 const AHD_SLEWRATE_DEF_REVB 0x08 /* Rev A does not have any amplitude setting. */ const AHD_ANNEXCOL_AMPLITUDE 6 const AHD_AMPLITUDE_MASK 0x7 const AHD_AMPLITUDE_SHIFT 0 const AHD_AMPLITUDE_DEF 0x7 /* * Negotiation Table Annex Data Port. */ register ANNEXDAT { address 0x066 access_mode RW modes M_SCSI } /* * Initiator's Own Id. * The SCSI ID to use for Selection Out and seen during a reselection.. */ register IOWNID { address 0x067 access_mode RW modes M_SCSI } /* * 960MHz Phase-Locked Loop Control 0 */ register PLL960CTL0 { address 0x068 access_mode RW modes M_CFG field PLL_VCOSEL 0x80 field PLL_PWDN 0x40 field PLL_NS 0x30 field PLL_ENLUD 0x08 field PLL_ENLPF 0x04 field PLL_DLPF 0x02 field PLL_ENFBM 0x01 } /* * Target Own Id */ register TOWNID { address 0x069 access_mode RW modes M_SCSI } /* * 960MHz Phase-Locked Loop Control 1 */ register PLL960CTL1 { address 0x069 access_mode RW modes M_CFG field PLL_CNTEN 0x80 field PLL_CNTCLR 0x40 field PLL_RST 0x01 } /* * Expander Signature */ register XSIG { address 0x06A access_mode RW modes M_SCSI } /* * Shadow Byte Count */ register SHCNT { address 0x068 access_mode RW size 3 modes M_DFF0, M_DFF1 } /* * Selection Out ID */ register SELOID { address 0x06B access_mode RW modes M_SCSI } /* * 960-MHz Phase-Locked Loop Test Count */ register PLL960CNT0 { address 0x06A access_mode RO size 2 modes M_CFG } /* * 400-MHz Phase-Locked Loop Control 0 */ register PLL400CTL0 { address 0x06C access_mode RW modes M_CFG field PLL_VCOSEL 0x80 field PLL_PWDN 0x40 field PLL_NS 0x30 field PLL_ENLUD 0x08 field PLL_ENLPF 0x04 field PLL_DLPF 0x02 field PLL_ENFBM 0x01 } /* * Arbitration Fairness */ register FAIRNESS { address 0x06C access_mode RW size 2 modes M_SCSI } /* * 400-MHz Phase-Locked Loop Control 1 */ register PLL400CTL1 { address 0x06D access_mode RW modes M_CFG field PLL_CNTEN 0x80 field PLL_CNTCLR 0x40 field PLL_RST 0x01 } /* * Arbitration Unfairness */ register UNFAIRNESS { address 0x06E access_mode RW size 2 modes M_SCSI } /* * 400-MHz Phase-Locked Loop Test Count */ register PLL400CNT0 { address 0x06E access_mode RO size 2 modes M_CFG } /* * SCB Page Pointer */ register SCBPTR { address 0x0A8 access_mode RW size 2 modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI } /* * CMC SCB Array Count * Number of bytes to transfer between CMC SCB memory and SCBRAM. * Transfers must be 8byte aligned and sized. */ register CCSCBACNT { address 0x0AB access_mode RW modes M_CCHAN } /* * SCB Autopointer * SCB-Next Address Snooping logic. When an SCB is transferred to * the card, the next SCB address to be used by the CMC array can * be autoloaded from that transfer. */ register SCBAUTOPTR { address 0x0AB access_mode RW modes M_CFG field AUSCBPTR_EN 0x80 field SCBPTR_ADDR 0x38 field SCBPTR_OFF 0x07 } /* * CMC SG Ram Address Pointer */ register CCSGADDR { address 0x0AC access_mode RW modes M_DFF0, M_DFF1 } /* * CMC SCB RAM Address Pointer */ register CCSCBADDR { address 0x0AC access_mode RW modes M_CCHAN } /* * CMC SCB Ram Back-up Address Pointer * Indicates the true stop location of transfers halted prior * to SCBHCNT going to 0. */ register CCSCBADR_BK { address 0x0AC access_mode RO modes M_CFG } /* * CMC SG Control */ register CCSGCTL { address 0x0AD access_mode RW modes M_DFF0, M_DFF1 field CCSGDONE 0x80 field SG_CACHE_AVAIL 0x10 field CCSGENACK 0x08 mask CCSGEN 0x0C field SG_FETCH_REQ 0x02 field CCSGRESET 0x01 } /* * CMD SCB Control */ register CCSCBCTL { address 0x0AD access_mode RW modes M_CCHAN field CCSCBDONE 0x80 field ARRDONE 0x40 field CCARREN 0x10 field CCSCBEN 0x08 field CCSCBDIR 0x04 field CCSCBRESET 0x01 } /* * CMC Ram BIST */ register CMC_RAMBIST { address 0x0AD access_mode RW modes M_CFG field SG_ELEMENT_SIZE 0x80 field SCBRAMBIST_FAIL 0x40 field SG_BIST_FAIL 0x20 field SG_BIST_EN 0x10 field CMC_BUFFER_BIST_FAIL 0x02 field CMC_BUFFER_BIST_EN 0x01 } /* * CMC SG RAM Data Port */ register CCSGRAM { address 0x0B0 access_mode RW modes M_DFF0, M_DFF1 } /* * CMC SCB RAM Data Port */ register CCSCBRAM { address 0x0B0 access_mode RW modes M_CCHAN } /* * Flex DMA Address. */ register FLEXADR { address 0x0B0 access_mode RW size 3 modes M_SCSI } /* * Flex DMA Byte Count */ register FLEXCNT { address 0x0B3 access_mode RW size 2 modes M_SCSI } /* * Flex DMA Status */ register FLEXDMASTAT { address 0x0B5 access_mode RW modes M_SCSI field FLEXDMAERR 0x02 field FLEXDMADONE 0x01 } /* * Flex DMA Data Port */ register FLEXDATA { address 0x0B6 access_mode RW modes M_SCSI } /* * Board Data */ register BRDDAT { address 0x0B8 access_mode RW modes M_SCSI } /* * Board Control */ register BRDCTL { address 0x0B9 access_mode RW modes M_SCSI field FLXARBACK 0x80 field FLXARBREQ 0x40 field BRDADDR 0x38 field BRDEN 0x04 field BRDRW 0x02 field BRDSTB 0x01 } /* * Serial EEPROM Address */ register SEEADR { address 0x0BA access_mode RW modes M_SCSI } /* * Serial EEPROM Data */ register SEEDAT { address 0x0BC access_mode RW size 2 modes M_SCSI } /* * Serial EEPROM Status */ register SEESTAT { address 0x0BE access_mode RO modes M_SCSI field INIT_DONE 0x80 field SEEOPCODE 0x70 field LDALTID_L 0x08 field SEEARBACK 0x04 field SEEBUSY 0x02 field SEESTART 0x01 } /* * Serial EEPROM Control */ register SEECTL { address 0x0BE access_mode RW modes M_SCSI field SEEOPCODE 0x70 { SEEOP_ERASE 0x70, SEEOP_READ 0x60, SEEOP_WRITE 0x50, /* * The following four commands use special * addresses for differentiation. */ SEEOP_ERAL 0x40 } mask SEEOP_EWEN 0x40 mask SEEOP_WALL 0x40 mask SEEOP_EWDS 0x40 field SEERST 0x02 field SEESTART 0x01 } const SEEOP_ERAL_ADDR 0x80 const SEEOP_EWEN_ADDR 0xC0 const SEEOP_WRAL_ADDR 0x40 const SEEOP_EWDS_ADDR 0x00 /* * SCB Counter */ register SCBCNT { address 0x0BF access_mode RW modes M_SCSI } /* * Data FIFO Write Address * Pointer to the next QWD location to be written to the data FIFO. */ register DFWADDR { address 0x0C0 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * DSP Filter Control */ register DSPFLTRCTL { address 0x0C0 access_mode RW modes M_CFG field FLTRDISABLE 0x20 field EDGESENSE 0x10 field DSPFCNTSEL 0x0F } /* * DSP Data Channel Control */ register DSPDATACTL { address 0x0C1 access_mode RW modes M_CFG field BYPASSENAB 0x80 field DESQDIS 0x10 field RCVROFFSTDIS 0x04 field XMITOFFSTDIS 0x02 } /* * Data FIFO Read Address * Pointer to the next QWD location to be read from the data FIFO. */ register DFRADDR { address 0x0C2 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * DSP REQ Control */ register DSPREQCTL { address 0x0C2 access_mode RW modes M_CFG field MANREQCTL 0xC0 field MANREQDLY 0x3F } /* * DSP ACK Control */ register DSPACKCTL { address 0x0C3 access_mode RW modes M_CFG field MANACKCTL 0xC0 field MANACKDLY 0x3F } /* * Data FIFO Data * Read/Write byte port into the data FIFO. The read and write * FIFO pointers increment with each read and write respectively * to this port. */ register DFDAT { address 0x0C4 access_mode RW modes M_DFF0, M_DFF1 } /* * DSP Channel Select */ register DSPSELECT { address 0x0C4 access_mode RW modes M_CFG field AUTOINCEN 0x80 field DSPSEL 0x1F } const NUMDSPS 0x14 /* * Write Bias Control */ register WRTBIASCTL { address 0x0C5 access_mode WO modes M_CFG field AUTOXBCDIS 0x80 field XMITMANVAL 0x3F } /* * Currently the WRTBIASCTL is the same as the default. */ const WRTBIASCTL_HP_DEFAULT 0x0 /* * Receiver Bias Control */ register RCVRBIOSCTL { address 0x0C6 access_mode WO modes M_CFG field AUTORBCDIS 0x80 field RCVRMANVAL 0x3F } /* * Write Bias Calculator */ register WRTBIASCALC { address 0x0C7 access_mode RO modes M_CFG } /* * Data FIFO Pointers * Contains the byte offset from DFWADDR and DWRADDR to the current * FIFO write/read locations. */ register DFPTRS { address 0x0C8 access_mode RW modes M_DFF0, M_DFF1 } /* * Receiver Bias Calculator */ register RCVRBIASCALC { address 0x0C8 access_mode RO modes M_CFG } /* * Data FIFO Backup Read Pointer * Contains the data FIFO address to be restored if the last * data accessed from the data FIFO was not transferred successfully. */ register DFBKPTR { address 0x0C9 access_mode RW size 2 modes M_DFF0, M_DFF1 } /* * Skew Calculator */ register SKEWCALC { address 0x0C9 access_mode RO modes M_CFG } /* * Data FIFO Debug Control */ register DFDBCTL { address 0x0CB access_mode RW modes M_DFF0, M_DFF1 field DFF_CIO_WR_RDY 0x20 field DFF_CIO_RD_RDY 0x10 field DFF_DIR_ERR 0x08 field DFF_RAMBIST_FAIL 0x04 field DFF_RAMBIST_DONE 0x02 field DFF_RAMBIST_EN 0x01 } /* * Data FIFO Space Count * Number of FIFO locations that are free. */ register DFSCNT { address 0x0CC access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Data FIFO Byte Count * Number of filled FIFO locations. */ register DFBCNT { address 0x0CE access_mode RO size 2 modes M_DFF0, M_DFF1 } /* * Sequencer Program Overlay Address. * Low address must be written prior to high address. */ register OVLYADDR { address 0x0D4 modes M_SCSI size 2 access_mode RW } /* * Sequencer Control 0 * Error detection mode, speed configuration, * single step, breakpoints and program load. */ register SEQCTL0 { address 0x0D6 access_mode RW field PERRORDIS 0x80 field PAUSEDIS 0x40 field FAILDIS 0x20 field FASTMODE 0x10 field BRKADRINTEN 0x08 field STEP 0x04 field SEQRESET 0x02 field LOADRAM 0x01 } /* * Sequencer Control 1 * Instruction RAM Diagnostics */ register SEQCTL1 { address 0x0D7 access_mode RW field OVRLAY_DATA_CHK 0x08 field RAMBIST_DONE 0x04 field RAMBIST_FAIL 0x02 field RAMBIST_EN 0x01 } /* * Sequencer Flags * Zero and Carry state of the ALU. */ register FLAGS { address 0x0D8 access_mode RO field ZERO 0x02 field CARRY 0x01 } /* * Sequencer Interrupt Control */ register SEQINTCTL { address 0x0D9 access_mode RW field INTVEC1DSL 0x80 field INT1_CONTEXT 0x20 field SCS_SEQ_INT1M1 0x10 field SCS_SEQ_INT1M0 0x08 field INTMASK2 0x04 field INTMASK1 0x02 field IRET 0x01 } /* * Sequencer RAM Data Port * Single byte window into the Sequencer Instruction Ram area starting * at the address specified by OVLYADDR. To write a full instruction word, * simply write four bytes in succession. OVLYADDR will increment after the * most significant instrution byte (the byte with the parity bit) is written. */ register SEQRAM { address 0x0DA access_mode RW } /* * Sequencer Program Counter * Low byte must be written prior to high byte. */ register PRGMCNT { address 0x0DE access_mode RW size 2 } /* * Accumulator */ register ACCUM { address 0x0E0 access_mode RW accumulator } /* * Source Index Register * Incrementing index for reads of SINDIR and the destination (low byte only) * for any immediate operands passed in jmp, jc, jnc, call instructions. * Example: * mvi 0xFF call some_routine; * * Will set SINDEX[0] to 0xFF and call the routine "some_routine. */ register SINDEX { address 0x0E2 access_mode RW size 2 sindex } /* * Destination Index Register * Incrementing index for writes to DINDIR. Can be used as a scratch register. */ register DINDEX { address 0x0E4 access_mode RW size 2 } /* * Break Address * Sequencer instruction breakpoint address address. */ register BRKADDR0 { address 0x0E6 access_mode RW } register BRKADDR1 { address 0x0E6 access_mode RW field BRKDIS 0x80 /* Disable Breakpoint */ } /* * All Ones * All reads to this register return the value 0xFF. */ register ALLONES { address 0x0E8 access_mode RO allones } /* * All Zeros * All reads to this register return the value 0. */ register ALLZEROS { address 0x0EA access_mode RO allzeros } /* * No Destination * Writes to this register have no effect. */ register NONE { address 0x0EA access_mode WO none } /* * Source Index Indirect * Reading this register is equivalent to reading (register_base + SINDEX) and * incrementing SINDEX by 1. */ register SINDIR { address 0x0EC access_mode RO } /* * Destination Index Indirect * Writing this register is equivalent to writing to (register_base + DINDEX) * and incrementing DINDEX by 1. */ register DINDIR { address 0x0ED access_mode WO } /* * Function One * 2's complement to bit value conversion. Write the 2's complement value * (0-7 only) to the top nibble and retrieve the bit indexed by that value * on the next read of this register. * Example: * Write 0x60 * Read 0x40 */ register FUNCTION1 { address 0x0F0 access_mode RW } /* * Stack * Window into the stack. Each stack location is 10 bits wide reported * low byte followed by high byte. There are 8 stack locations. */ register STACK { address 0x0F2 access_mode RW } /* * Interrupt Vector 1 Address * Interrupt branch address for SCS SEQ_INT1 mode 0 and 1 interrupts. */ register INTVEC1_ADDR { address 0x0F4 access_mode RW size 2 modes M_CFG } /* * Current Address * Address of the SEQRAM instruction currently executing instruction. */ register CURADDR { address 0x0F4 access_mode RW size 2 modes M_SCSI } /* * Interrupt Vector 2 Address * Interrupt branch address for HST_SEQ_INT2 interrupts. */ register INTVEC2_ADDR { address 0x0F6 access_mode RW size 2 modes M_CFG } /* * Last Address * Address of the SEQRAM instruction executed prior to the current instruction. */ register LASTADDR { address 0x0F6 access_mode RW size 2 modes M_SCSI } register AHD_PCI_CONFIG_BASE { address 0x100 access_mode RW size 256 modes M_CFG } /* ---------------------- Scratch RAM Offsets ------------------------- */ scratch_ram { /* Mode Specific */ address 0x0A0 size 8 modes 0, 1, 2, 3 REG0 { size 2 } REG1 { size 2 } REG_ISR { size 2 } SG_STATE { size 1 field SEGS_AVAIL 0x01 field LOADING_NEEDED 0x02 field FETCH_INPROG 0x04 } /* * Track whether the transfer byte count for * the current data phase is odd. */ DATA_COUNT_ODD { size 1 } } scratch_ram { /* Mode Specific */ address 0x0F8 size 8 modes 0, 1, 2, 3 LONGJMP_ADDR { size 2 } ACCUM_SAVE { size 1 } } scratch_ram { address 0x100 size 128 modes 0, 1, 2, 3 /* * Per "other-id" execution queues. We use an array of * tail pointers into lists of SCBs sorted by "other-id". * The execution head pointer threads the head SCBs for * each list. */ WAITING_SCB_TAILS { size 32 } WAITING_TID_HEAD { size 2 } WAITING_TID_TAIL { size 2 } /* * SCBID of the next SCB in the new SCB queue. */ NEXT_QUEUED_SCB_ADDR { size 4 } /* * head of list of SCBs that have * completed but have not been * put into the qoutfifo. */ COMPLETE_SCB_HEAD { size 2 } /* * The list of completed SCBs in * the active DMA. */ COMPLETE_SCB_DMAINPROG_HEAD { size 2 } /* * head of list of SCBs that have * completed but need to be uploaded * to the host prior to being completed. */ COMPLETE_DMA_SCB_HEAD { size 2 } - /* Counting semaphore to prevent new select-outs */ + /* + * tail of list of SCBs that have + * completed but need to be uploaded + * to the host prior to being completed. + */ + COMPLETE_DMA_SCB_TAIL { + size 2 + } + /* + * head of list of SCBs that have + * been uploaded to the host, but cannot + * be completed until the QFREEZE is in + * full effect (i.e. no selections pending). + */ + COMPLETE_ON_QFREEZE_HEAD { + size 2 + } + /* + * Counting semaphore to prevent new select-outs + * The queue is frozen so long as the sequencer + * and kernel freeze counts differ. + */ QFREEZE_COUNT { size 2 } + KERNEL_QFREEZE_COUNT { + size 2 + } /* * Mode to restore on legacy idle loop exit. */ SAVED_MODE { size 1 } /* * Single byte buffer used to designate the type or message * to send to a target. */ MSG_OUT { size 1 } /* Parameters for DMA Logic */ DMAPARAMS { size 1 field PRELOADEN 0x80 field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ field FIFOFLUSH 0x02 field FIFORESET 0x01 } SEQ_FLAGS { size 1 field NOT_IDENTIFIED 0x80 field NO_CDB_SENT 0x40 field TARGET_CMD_IS_TAGGED 0x40 field DPHASE 0x20 /* Target flags */ field TARG_CMD_PENDING 0x10 field CMDPHASE_PENDING 0x08 field DPHASE_PENDING 0x04 field SPHASE_PENDING 0x02 field NO_DISCONNECT 0x01 } /* * Temporary storage for the * target/channel/lun of a * reconnecting target */ SAVED_SCSIID { size 1 } SAVED_LUN { size 1 } /* * The last bus phase as seen by the sequencer. */ LASTPHASE { size 1 field CDI 0x80 field IOI 0x40 field MSGI 0x20 field P_BUSFREE 0x01 enum PHASE_MASK CDO|IOO|MSGO { P_DATAOUT 0x0, P_DATAIN IOO, P_DATAOUT_DT P_DATAOUT|MSGO, P_DATAIN_DT P_DATAIN|MSGO, P_COMMAND CDO, P_MESGOUT CDO|MSGO, P_STATUS CDO|IOO, P_MESGIN CDO|IOO|MSGO } } /* * Value to "or" into the SCBPTR[1] value to * indicate that an entry in the QINFIFO is valid. */ QOUTFIFO_ENTRY_VALID_TAG { size 1 } /* + * Kernel and sequencer offsets into the queue of + * incoming target mode command descriptors. The + * queue is full when the KERNEL_TQINPOS == TQINPOS. + */ + KERNEL_TQINPOS { + size 1 + } + TQINPOS { + size 1 + } + /* * Base address of our shared data with the kernel driver in host * memory. This includes the qoutfifo and target mode * incoming command queue. */ SHARED_DATA_ADDR { size 4 } /* * Pointer to location in host memory for next * position in the qoutfifo. */ QOUTFIFO_NEXT_ADDR { size 4 - } - /* - * Kernel and sequencer offsets into the queue of - * incoming target mode command descriptors. The - * queue is full when the KERNEL_TQINPOS == TQINPOS. - */ - KERNEL_TQINPOS { - size 1 - } - TQINPOS { - size 1 } ARG_1 { size 1 mask SEND_MSG 0x80 mask SEND_SENSE 0x40 mask SEND_REJ 0x20 mask MSGOUT_PHASEMIS 0x10 mask EXIT_MSG_LOOP 0x08 mask CONT_MSG_LOOP_WRITE 0x04 mask CONT_MSG_LOOP_READ 0x03 mask CONT_MSG_LOOP_TARG 0x02 alias RETURN_1 } ARG_2 { size 1 alias RETURN_2 } /* * Snapshot of MSG_OUT taken after each message is sent. */ LAST_MSG { size 1 } /* * Sequences the kernel driver has okayed for us. This allows * the driver to do things like prevent initiator or target * operations. */ SCSISEQ_TEMPLATE { size 1 field MANUALCTL 0x40 field ENSELI 0x20 field ENRSELI 0x10 field MANUALP 0x0C field ENAUTOATNP 0x02 field ALTSTIM 0x01 } /* * The initiator specified tag for this target mode transaction. */ INITIATOR_TAG { size 1 } SEQ_FLAGS2 { size 1 field TARGET_MSG_PENDING 0x02 field SELECTOUT_QFROZEN 0x04 } ALLOCFIFO_SCBPTR { size 2 } /* * The maximum amount of time to wait, when interrupt coalescing * is enabled, before issueing a CMDCMPLT interrupt for a completed * command. */ INT_COALESCING_TIMER { size 2 } /* * The maximum number of commands to coalesce into a single interrupt. * Actually the 2's complement of that value to simplify sequencer * code. */ INT_COALESCING_MAXCMDS { size 1 } /* * The minimum number of commands still outstanding required * to continue coalescing (2's complement of value). */ INT_COALESCING_MINCMDS { size 1 } /* * Number of commands "in-flight". */ CMDS_PENDING { size 2 } /* * The count of commands that have been coalesced. */ INT_COALESCING_CMDCOUNT { size 1 } /* * Since the HS_MAIBOX is self clearing, copy its contents to * this position in scratch ram every time it changes. */ LOCAL_HS_MAILBOX { size 1 } /* * Target-mode CDB type to CDB length table used * in non-packetized operation. */ CMDSIZE_TABLE { size 8 } } /************************* Hardware SCB Definition ****************************/ scb { address 0x180 size 64 modes 0, 1, 2, 3 SCB_RESIDUAL_DATACNT { size 4 alias SCB_CDB_STORE alias SCB_HOST_CDB_PTR } SCB_RESIDUAL_SGPTR { size 4 field SG_ADDR_MASK 0xf8 /* In the last byte */ field SG_OVERRUN_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_SCSI_STATUS { size 1 alias SCB_HOST_CDB_LEN } SCB_TARGET_PHASES { size 1 } SCB_TARGET_DATA_DIR { size 1 } SCB_TARGET_ITAG { size 1 } SCB_SENSE_BUSADDR { /* * Only valid if CDB length is less than 13 bytes or * we are using a CDB pointer. Otherwise contains * the last 4 bytes of embedded cdb information. */ size 4 alias SCB_NEXT_COMPLETE } SCB_TAG { alias SCB_FIFO_USE_COUNT size 2 } SCB_CONTROL { size 1 field TARGET_SCB 0x80 field DISCENB 0x40 field TAG_ENB 0x20 field MK_MESSAGE 0x10 field STATUS_RCVD 0x08 field DISCONNECTED 0x04 field SCB_TAG_TYPE 0x03 } SCB_SCSIID { size 1 field TID 0xF0 field OID 0x0F } SCB_LUN { size 1 field LID 0xff } SCB_TASK_ATTRIBUTE { size 1 /* * Overloaded field for non-packetized * ignore wide residue message handling. */ field SCB_XFERLEN_ODD 0x01 } SCB_CDB_LEN { size 1 field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */ } SCB_TASK_MANAGEMENT { size 1 } SCB_DATAPTR { size 8 } SCB_DATACNT { /* * The last byte is really the high address bits for * the data address. */ size 4 field SG_LAST_SEG 0x80 /* In the fourth byte */ field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ } SCB_SGPTR { size 4 field SG_STATUS_VALID 0x04 /* In the first byte */ field SG_FULL_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_BUSADDR { size 4 } SCB_NEXT { alias SCB_NEXT_SCB_BUSADDR size 2 } SCB_NEXT2 { size 2 } SCB_SPARE { size 8 alias SCB_PKT_LUN } SCB_DISCONNECTED_LISTS { size 8 } } /*********************************** Constants ********************************/ const MK_MESSAGE_BIT_OFFSET 4 const TID_SHIFT 4 const TARGET_CMD_CMPLT 0xfe const INVALID_ADDR 0x80 #define SCB_LIST_NULL 0xff #define QOUTFIFO_ENTRY_VALID_TOGGLE 0x80 const CCSGADDR_MAX 0x80 const CCSCBADDR_MAX 0x80 const CCSGRAM_MAXSEGS 16 /* Selection Timeout Timer Constants */ const STIMESEL_SHIFT 3 const STIMESEL_MIN 0x18 const STIMESEL_BUG_ADJ 0x8 /* WDTR Message values */ const BUS_8_BIT 0x00 const BUS_16_BIT 0x01 const BUS_32_BIT 0x02 /* Offset maximums */ const MAX_OFFSET 0xfe const MAX_OFFSET_PACED 0xfe const MAX_OFFSET_PACED_BUG 0x7f /* * Some 160 devices incorrectly accept 0xfe as a * sync offset, but will overrun this value. Limit * to 0x7f for speed lower than U320 which will * avoid the persistent sync offset overruns. */ const MAX_OFFSET_NON_PACED 0x7f const HOST_MSG 0xff /* * The size of our sense buffers. * Sense buffer mapping can be handled in either of two ways. * The first is to allocate a dmamap for each transaction. * Depending on the architecture, dmamaps can be costly. The * alternative is to statically map the buffers in much the same * way we handle our scatter gather lists. The driver implements * the later. */ const AHD_SENSE_BUFSIZE 256 /* Target mode command processing constants */ const CMD_GROUP_CODE_SHIFT 0x05 const STATUS_BUSY 0x08 const STATUS_QUEUE_FULL 0x28 const STATUS_PKT_SENSE 0xFF const TARGET_DATA_IN 1 const SCB_TRANSFER_SIZE_FULL_LUN 56 const SCB_TRANSFER_SIZE_1BYTE_LUN 48 /* PKT_OVERRUN_BUFSIZE must be a multiple of 256 less than 64K */ const PKT_OVERRUN_BUFSIZE 512 /* * Timer parameters. */ const AHD_TIMER_US_PER_TICK 25 const AHD_TIMER_MAX_TICKS 0xFFFF const AHD_TIMER_MAX_US (AHD_TIMER_MAX_TICKS * AHD_TIMER_US_PER_TICK) /* * Downloaded (kernel inserted) constants */ const SG_PREFETCH_CNT download const SG_PREFETCH_CNT_LIMIT download const SG_PREFETCH_ALIGN_MASK download const SG_PREFETCH_ADDR_MASK download const SG_SIZEOF download const PKT_OVERRUN_BUFOFFSET download const SCB_TRANSFER_SIZE download /* * BIOS SCB offsets */ const NVRAM_SCB_OFFSET 0x2C Index: stable/4/sys/dev/aic7xxx/aic79xx.seq =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx.seq (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx.seq (revision 125849) @@ -1,2031 +1,2181 @@ /* * Adaptec U320 device driver firmware for Linux and FreeBSD. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ -VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#94 $" +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#118 $" PATCH_ARG_LIST = "struct ahd_softc *ahd" PREFIX = "ahd_" #include "aic79xx.reg" #include "scsi_message.h" restart: if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { test SEQINTCODE, 0xFF jz idle_loop; SET_SEQINTCODE(NO_SEQINT) } idle_loop: if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Convert ERROR status into a sequencer * interrupt to handle the case of an * interrupt collision on the hardware * setting of HWERR. */ test ERROR, 0xFF jz no_error_set; SET_SEQINTCODE(SAW_HWERR) no_error_set: } SET_MODE(M_SCSI, M_SCSI) test SCSISEQ0, ENSELO|ENARBO jnz idle_loop_checkbus; - test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz idle_loop_checkbus; + test SEQ_FLAGS2, SELECTOUT_QFROZEN jz check_waiting_list; + /* + * If the kernel has caught up with us, thaw the queue. + */ + mov A, KERNEL_QFREEZE_COUNT; + cmp QFREEZE_COUNT, A jne check_frozen_completions; + mov A, KERNEL_QFREEZE_COUNT[1]; + cmp QFREEZE_COUNT[1], A jne check_frozen_completions; + and SEQ_FLAGS2, ~SELECTOUT_QFROZEN; + jmp check_waiting_list; +check_frozen_completions: + test SSTAT0, SELDO|SELINGO jnz idle_loop_checkbus; +BEGIN_CRITICAL; + /* + * If we have completions stalled waiting for the qfreeze + * to take effect, move them over to the complete_scb list + * now that no selections are pending. + */ + cmp COMPLETE_ON_QFREEZE_HEAD[1],SCB_LIST_NULL je idle_loop_checkbus; + /* + * Find the end of the qfreeze list. The first element has + * to be treated specially. + */ + bmov SCBPTR, COMPLETE_ON_QFREEZE_HEAD, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je join_lists; + /* + * Now the normal loop. + */ + bmov SCBPTR, SCB_NEXT_COMPLETE, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . - 1; +join_lists: + bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; + bmov COMPLETE_SCB_HEAD, COMPLETE_ON_QFREEZE_HEAD, 2; + mvi COMPLETE_ON_QFREEZE_HEAD[1], SCB_LIST_NULL; + jmp idle_loop_checkbus; +check_waiting_list: cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je idle_loop_checkbus; /* * ENSELO is cleared by a SELDO, so we must test for SELDO * one last time. */ -BEGIN_CRITICAL; test SSTAT0, SELDO jnz select_out; END_CRITICAL; call start_selection; idle_loop_checkbus: BEGIN_CRITICAL; test SSTAT0, SELDO jnz select_out; END_CRITICAL; test SSTAT0, SELDI jnz select_in; test SCSIPHASE, ~DATA_PHASE_MASK jz idle_loop_check_nonpackreq; test SCSISIGO, ATNO jz idle_loop_check_nonpackreq; call unexpected_nonpkt_phase_find_ctxt; idle_loop_check_nonpackreq: test SSTAT2, NONPACKREQ jz . + 2; call unexpected_nonpkt_phase_find_ctxt; if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on so + * long as one of our data FIFOs is active. + */ and A, FIFO0FREE|FIFO1FREE, DFFSTAT; cmp A, FIFO0FREE|FIFO1FREE jne . + 3; and SBLKCTL, ~DIAGLEDEN|DIAGLEDON; jmp . + 2; or SBLKCTL, DIAGLEDEN|DIAGLEDON; } call idle_loop_gsfifo_in_scsi_mode; call idle_loop_service_fifos; call idle_loop_cchan; jmp idle_loop; -BEGIN_CRITICAL; idle_loop_gsfifo: SET_MODE(M_SCSI, M_SCSI) +BEGIN_CRITICAL; idle_loop_gsfifo_in_scsi_mode: test LQISTAT2, LQIGSAVAIL jz return; /* * We have received good status for this transaction. There may * still be data in our FIFOs draining to the host. Complete * the SCB only if all data has transferred to the host. */ good_status_IU_done: bmov SCBPTR, GSFIFO, 2; clr SCB_SCSI_STATUS; /* * If a command completed before an attempted task management * function completed, notify the host after disabling any * pending select-outs. */ test SCB_TASK_MANAGEMENT, 0xFF jz gsfifo_complete_normally; test SSTAT0, SELDO|SELINGO jnz . + 2; and SCSISEQ0, ~ENSELO; SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) gsfifo_complete_normally: or SCB_CONTROL, STATUS_RCVD; /* * Since this status did not consume a FIFO, we have to * be a bit more dilligent in how we check for FIFOs pertaining * to this transaction. There are two states that a FIFO still * transferring data may be in. * * 1) Configured and draining to the host, with a FIFO handler. * 2) Pending cfg4data, fifo not empty. * * Case 1 can be detected by noticing a non-zero FIFO active * count in the SCB. In this case, we allow the routine servicing * the FIFO to complete the SCB. * * Case 2 implies either a pending or yet to occur save data * pointers for this same context in the other FIFO. So, if * we detect case 1, we will properly defer the post of the SCB * and achieve the desired result. The pending cfg4data will * notice that status has been received and complete the SCB. */ test SCB_FIFO_USE_COUNT, 0xFF jnz idle_loop_gsfifo_in_scsi_mode; call complete; END_CRITICAL; jmp idle_loop_gsfifo_in_scsi_mode; idle_loop_service_fifos: SET_MODE(M_DFF0, M_DFF0) +BEGIN_CRITICAL; test LONGJMP_ADDR[1], INVALID_ADDR jnz idle_loop_next_fifo; call longjmp; +END_CRITICAL; idle_loop_next_fifo: SET_MODE(M_DFF1, M_DFF1) +BEGIN_CRITICAL; test LONGJMP_ADDR[1], INVALID_ADDR jz longjmp; +END_CRITICAL; return: ret; idle_loop_cchan: SET_MODE(M_CCHAN, M_CCHAN) test QOFF_CTLSTA, HS_MAILBOX_ACT jz hs_mailbox_empty; - mov LOCAL_HS_MAILBOX, HS_MAILBOX; or QOFF_CTLSTA, HS_MAILBOX_ACT; + mov LOCAL_HS_MAILBOX, HS_MAILBOX; hs_mailbox_empty: BEGIN_CRITICAL; test CCSCBCTL, CCARREN|CCSCBEN jz scbdma_idle; test CCSCBCTL, CCSCBDIR jnz fetch_new_scb_inprog; test CCSCBCTL, CCSCBDONE jz return; -END_CRITICAL; /* FALLTHROUGH */ scbdma_tohost_done: test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; /* * An SCB has been succesfully uploaded to the host. * If the SCB was uploaded for some reason other than * bad SCSI status (currently only for underruns), we * queue the SCB for normal completion. Otherwise, we * wait until any select-out activity has halted, and - * then notify the host so that the transaction can be - * dealt with. + * then queue the completion. */ - test SCB_SCSI_STATUS, 0xff jnz scbdma_notify_host; and CCSCBCTL, ~(CCARREN|CCSCBEN); bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; + cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL jne . + 2; + mvi COMPLETE_DMA_SCB_TAIL[1], SCB_LIST_NULL; + test SCB_SCSI_STATUS, 0xff jz scbdma_queue_completion; + bmov SCB_NEXT_COMPLETE, COMPLETE_ON_QFREEZE_HEAD, 2; + bmov COMPLETE_ON_QFREEZE_HEAD, SCBPTR, 2 ret; +scbdma_queue_completion: bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; -scbdma_notify_host: - SET_MODE(M_SCSI, M_SCSI) - test SCSISEQ0, ENSELO jnz return; - test SSTAT0, (SELDO|SELINGO) jnz return; - SET_MODE(M_CCHAN, M_CCHAN) - /* - * Remove SCB and notify host. - */ - and CCSCBCTL, ~(CCARREN|CCSCBEN); - bmov COMPLETE_DMA_SCB_HEAD, SCB_NEXT_COMPLETE, 2; - SET_SEQINTCODE(BAD_SCB_STATUS) - ret; fill_qoutfifo_dmadone: and CCSCBCTL, ~(CCARREN|CCSCBEN); call qoutfifo_updated; mvi COMPLETE_SCB_DMAINPROG_HEAD[1], SCB_LIST_NULL; bmov QOUTFIFO_NEXT_ADDR, SCBHADDR, 4; test QOFF_CTLSTA, SDSCB_ROLLOVR jz return; bmov QOUTFIFO_NEXT_ADDR, SHARED_DATA_ADDR, 4; xor QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID_TOGGLE ret; +END_CRITICAL; qoutfifo_updated: /* * If there are more commands waiting to be dma'ed * to the host, always coalesce. Otherwise honor the * host's wishes. */ cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL jne coalesce_by_count; test LOCAL_HS_MAILBOX, ENINT_COALESCE jz issue_cmdcmplt; /* * If we have relatively few commands outstanding, don't * bother waiting for another command to complete. */ test CMDS_PENDING[1], 0xFF jnz coalesce_by_count; /* Add -1 so that jnc means <= not just < */ add A, -1, INT_COALESCING_MINCMDS; add NONE, A, CMDS_PENDING; jnc issue_cmdcmplt; /* * If coalescing, only coalesce up to the limit * provided by the host driver. */ coalesce_by_count: mov A, INT_COALESCING_MAXCMDS; add NONE, A, INT_COALESCING_CMDCOUNT; jc issue_cmdcmplt; /* * If the timer is not currently active, * fire it up. */ test INTCTL, SWTMINTMASK jz return; bmov SWTIMER, INT_COALESCING_TIMER, 2; mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; or INTCTL, SWTMINTEN|SWTIMER_START; and INTCTL, ~SWTMINTMASK ret; issue_cmdcmplt: mvi INTSTAT, CMDCMPLT; clr INT_COALESCING_CMDCOUNT; or INTCTL, SWTMINTMASK ret; BEGIN_CRITICAL; fetch_new_scb_inprog: test CCSCBCTL, ARRDONE jz return; fetch_new_scb_done: and CCSCBCTL, ~(CCARREN|CCSCBEN); bmov REG0, SCBPTR, 2; clr A; add CMDS_PENDING, 1; adc CMDS_PENDING[1], A; if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { /* * "Short Luns" are not placed into outgoing LQ * packets in the correct byte order. Use a full * sized lun field instead and fill it with the * one byte of lun information we support. */ mov SCB_PKT_LUN[6], SCB_LUN; } /* * The FIFO use count field is shared with the * tag set by the host so that our SCB dma engine * knows the correct location to store the SCB. * Set it to zero before processing the SCB. */ clr SCB_FIFO_USE_COUNT; /* Update the next SCB address to download. */ bmov NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4; mvi SCB_NEXT[1], SCB_LIST_NULL; mvi SCB_NEXT2[1], SCB_LIST_NULL; /* Increment our position in the QINFIFO. */ mov NONE, SNSCB_QOFF; /* * SCBs that want to send messages are always * queued independently. This ensures that they * are at the head of the SCB list to select out * to a target and we will see the MK_MESSAGE flag. */ test SCB_CONTROL, MK_MESSAGE jnz first_new_target_scb; shr SINDEX, 3, SCB_SCSIID; and SINDEX, ~0x1; mvi SINDEX[1], (WAITING_SCB_TAILS >> 8); bmov DINDEX, SINDEX, 2; bmov SCBPTR, SINDIR, 2; bmov DINDIR, REG0, 2; cmp SCBPTR[1], SCB_LIST_NULL je first_new_target_scb; bmov SCB_NEXT, REG0, 2 ret; first_new_target_scb: cmp WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb; bmov SCBPTR, WAITING_TID_TAIL, 2; bmov SCB_NEXT2, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2 ret; first_new_scb: bmov WAITING_TID_HEAD, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2 ret; END_CRITICAL; scbdma_idle: /* * Give precedence to downloading new SCBs to execute * unless select-outs are currently frozen. */ test SEQ_FLAGS2, SELECTOUT_QFROZEN jnz . + 2; BEGIN_CRITICAL; test QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb; cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb; cmp COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return; /* FALLTHROUGH */ fill_qoutfifo: /* * Keep track of the SCBs we are dmaing just * in case the DMA fails or is aborted. */ - mov A, QOUTFIFO_ENTRY_VALID_TAG; bmov COMPLETE_SCB_DMAINPROG_HEAD, COMPLETE_SCB_HEAD, 2; mvi CCSCBCTL, CCSCBRESET; bmov SCBHADDR, QOUTFIFO_NEXT_ADDR, 4; bmov SCBPTR, COMPLETE_SCB_HEAD, 2; fill_qoutfifo_loop: - mov CCSCBRAM, SCBPTR; - or CCSCBRAM, A, SCBPTR[1]; + bmov CCSCBRAM, SCBPTR, 2; + mov CCSCBRAM, SCB_SGPTR[0]; + bmov CCSCBRAM, ALLZEROS, 4; + mov CCSCBRAM, QOUTFIFO_ENTRY_VALID_TAG; mov NONE, SDSCB_QOFF; inc INT_COALESCING_CMDCOUNT; add CMDS_PENDING, -1; adc CMDS_PENDING[1], -1; cmp SCB_NEXT_COMPLETE[1], SCB_LIST_NULL je fill_qoutfifo_done; cmp CCSCBADDR, CCSCBADDR_MAX je fill_qoutfifo_done; test QOFF_CTLSTA, SDSCB_ROLLOVR jnz fill_qoutfifo_done; bmov SCBPTR, SCB_NEXT_COMPLETE, 2; jmp fill_qoutfifo_loop; fill_qoutfifo_done: mov SCBHCNT, CCSCBADDR; mvi CCSCBCTL, CCSCBEN|CCSCBRESET; bmov COMPLETE_SCB_HEAD, SCB_NEXT_COMPLETE, 2; mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL ret; fetch_new_scb: bmov SCBHADDR, NEXT_QUEUED_SCB_ADDR, 4; mvi CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET jmp dma_scb; dma_complete_scb: bmov SCBPTR, COMPLETE_DMA_SCB_HEAD, 2; bmov SCBHADDR, SCB_BUSADDR, 4; mvi CCARREN|CCSCBEN|CCSCBRESET jmp dma_scb; -END_CRITICAL; /* * Either post or fetch an SCB from host memory. The caller * is responsible for polling for transfer completion. * * Prerequisits: Mode == M_CCHAN * SINDEX contains CCSCBCTL flags * SCBHADDR set to Host SCB address * SCBPTR set to SCB src location on "push" operations */ SET_SRC_MODE M_CCHAN; SET_DST_MODE M_CCHAN; dma_scb: mvi SCBHCNT, SCB_TRANSFER_SIZE; mov CCSCBCTL, SINDEX ret; -BEGIN_CRITICAL; setjmp: - bmov LONGJMP_ADDR, STACK, 2 ret; + /* + * At least on the A, a return in the same + * instruction as the bmov results in a return + * to the caller, not to the new address at the + * top of the stack. Since we want the latter + * (we use setjmp to register a handler from an + * interrupt context but not invoke that handler + * until we return to our idle loop), use a + * separate ret instruction. + */ + bmov LONGJMP_ADDR, STACK, 2; + ret; setjmp_inline: bmov LONGJMP_ADDR, STACK, 2; longjmp: bmov STACK, LONGJMP_ADDR, 2 ret; END_CRITICAL; /*************************** Chip Bug Work Arounds ****************************/ /* * Must disable interrupts when setting the mode pointer * register as an interrupt occurring mid update will * fail to store the new mode value for restoration on * an iret. */ if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { set_mode_work_around: mvi SEQINTCTL, INTVEC1DSL; mov MODE_PTR, SINDEX; clr SEQINTCTL ret; - -toggle_dff_mode_work_around: - mvi SEQINTCTL, INTVEC1DSL; - xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); - clr SEQINTCTL ret; } if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { set_seqint_work_around: mov SEQINTCODE, SINDEX; mvi SEQINTCODE, NO_SEQINT ret; } /************************ Packetized LongJmp Routines *************************/ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; start_selection: BEGIN_CRITICAL; if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { /* * Razor #494 * Rev A hardware fails to update LAST/CURR/NEXTSCB * correctly after a packetized selection in several * situations: * * 1) If only one command existed in the queue, the * LAST/CURR/NEXTSCB are unchanged. * * 2) In a non QAS, protocol allowed phase change, * the queue is shifted 1 too far. LASTSCB is * the last SCB that was correctly processed. * * 3) In the QAS case, if the full list of commands * was successfully sent, NEXTSCB is NULL and neither * CURRSCB nor LASTSCB can be trusted. We must * manually walk the list counting MAXCMDCNT elements * to find the last SCB that was sent correctly. * * To simplify the workaround for this bug in SELDO * handling, we initialize LASTSCB prior to enabling * selection so we can rely on it even for case #1 above. */ bmov LASTSCB, WAITING_TID_HEAD, 2; } bmov CURRSCB, WAITING_TID_HEAD, 2; bmov SCBPTR, WAITING_TID_HEAD, 2; shr SELOID, 4, SCB_SCSIID; /* * If we want to send a message to the device, ensure * we are selecting with atn irregardless of our packetized * agreement. Since SPI4 only allows target reset or PPR * messages if this is a packetized connection, the change * to our negotiation table entry for this selection will * be cleared when the message is acted on. */ test SCB_CONTROL, MK_MESSAGE jz . + 3; mov NEGOADDR, SELOID; or NEGCONOPTS, ENAUTOATNO; or SCSISEQ0, ENSELO ret; END_CRITICAL; /* * Allocate a FIFO for a non-packetized transaction. * In RevA hardware, both FIFOs must be free before we * can allocate a FIFO for a non-packetized transaction. */ allocate_fifo_loop: /* * Do whatever work is required to free a FIFO. */ call idle_loop_service_fifos; SET_MODE(M_SCSI, M_SCSI) allocate_fifo: if ((ahd->bugs & AHD_NONPACKFIFO_BUG) != 0) { and A, FIFO0FREE|FIFO1FREE, DFFSTAT; cmp A, FIFO0FREE|FIFO1FREE jne allocate_fifo_loop; } else { test DFFSTAT, FIFO1FREE jnz allocate_fifo1; test DFFSTAT, FIFO0FREE jz allocate_fifo_loop; mvi DFFSTAT, B_CURRFIFO_0; SET_MODE(M_DFF0, M_DFF0) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; } SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; allocate_fifo1: mvi DFFSTAT, CURRFIFO_1; SET_MODE(M_DFF1, M_DFF1) bmov SCBPTR, ALLOCFIFO_SCBPTR, 2 ret; /* * We have been reselected as an initiator * or selected as a target. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; select_in: + if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on from + * the point of selection until our idle + * loop determines that neither of our FIFOs + * are busy. This handles the non-packetized + * case nicely as we will not return to the + * idle loop until the busfree at the end of + * each transaction. + */ + or SBLKCTL, DIAGLEDEN|DIAGLEDON; + } if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { /* - * This exposes a window whereby a - * busfree just after a selection will - * be missed, but there is no other safe - * way to enable busfree detection if - * the busfreerev function is broken. + * Test to ensure that the bus has not + * already gone free prior to clearing + * any stale busfree status. This avoids + * a window whereby a busfree just after + * a selection could be missed. */ + test SCSISIGI, BSYI jz . + 2; mvi CLRSINT1,CLRBUSFREE; or SIMODE1, ENBUSFREE; } or SXFRCTL0, SPIOEN; and SAVED_SCSIID, SELID_MASK, SELID; and A, OID, IOWNID; or SAVED_SCSIID, A; mvi CLRSINT0, CLRSELDI; jmp ITloop; /* * We have successfully selected out. * * Clear SELDO. * Dequeue all SCBs sent from the waiting queue * Requeue all SCBs *not* sent to the tail of the waiting queue * Take Razor #494 into account for above. * * In Packetized Mode: * Return to the idle loop. Our interrupt handler will take * care of any incoming L_Qs. * * In Non-Packetize Mode: * Continue to our normal state machine. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; select_out: BEGIN_CRITICAL; + if ((ahd->bugs & AHD_FAINT_LED_BUG) != 0) { + /* + * On Rev A. hardware, the busy LED is only + * turned on automaically during selections + * and re-selections. Make the LED status + * more useful by forcing it to be on from + * the point of re-selection until our idle + * loop determines that neither of our FIFOs + * are busy. This handles the non-packetized + * case nicely as we will not return to the + * idle loop until the busfree at the end of + * each transaction. + */ + or SBLKCTL, DIAGLEDEN|DIAGLEDON; + } /* Clear out all SCBs that have been successfully sent. */ if ((ahd->bugs & AHD_SENT_SCB_UPDATE_BUG) != 0) { /* * For packetized, the LQO manager clears ENSELO on * the assertion of SELDO. If we are non-packetized, * LASTSCB and CURRSCB are accurate. */ test SCSISEQ0, ENSELO jnz use_lastscb; /* * The update is correct for LQOSTAT1 errors. All * but LQOBUSFREE are handled by kernel interrupts. * If we see LQOBUSFREE, return to the idle loop. * Once we are out of the select_out critical section, * the kernel will cleanup the LQOBUSFREE and we will * eventually restart the selection if appropriate. */ test LQOSTAT1, LQOBUSFREE jnz idle_loop; /* * On a phase change oustside of packet boundaries, * LASTSCB points to the currently active SCB context * on the bus. */ test LQOSTAT2, LQOPHACHGOUTPKT jnz use_lastscb; /* * If the hardware has traversed the whole list, NEXTSCB * will be NULL, CURRSCB and LASTSCB cannot be trusted, * but MAXCMDCNT is accurate. If we stop part way through * the list or only had one command to issue, NEXTSCB[1] is * not NULL and LASTSCB is the last command to go out. */ cmp NEXTSCB[1], SCB_LIST_NULL jne use_lastscb; /* * Brute force walk. */ bmov SCBPTR, WAITING_TID_HEAD, 2; mvi SEQINTCTL, INTVEC1DSL; mvi MODE_PTR, MK_MODE(M_CFG, M_CFG); mov A, MAXCMDCNT; mvi MODE_PTR, MK_MODE(M_SCSI, M_SCSI); clr SEQINTCTL; find_lastscb_loop: dec A; test A, 0xFF jz found_last_sent_scb; bmov SCBPTR, SCB_NEXT, 2; jmp find_lastscb_loop; use_lastscb: bmov SCBPTR, LASTSCB, 2; found_last_sent_scb: bmov CURRSCB, SCBPTR, 2; curscb_ww_done: } else { bmov SCBPTR, CURRSCB, 2; } /* * Requeue any SCBs not sent, to the tail of the waiting Q. */ cmp SCB_NEXT[1], SCB_LIST_NULL je select_out_list_done; /* * We know that neither the per-TID list nor the list of * TIDs is empty. Use this knowledge to our advantage. */ bmov REG0, SCB_NEXT, 2; bmov SCBPTR, WAITING_TID_TAIL, 2; bmov SCB_NEXT2, REG0, 2; bmov WAITING_TID_TAIL, REG0, 2; jmp select_out_inc_tid_q; select_out_list_done: /* * The whole list made it. Just clear our TID's tail pointer * unless we were queued independently due to our need to * send a message. */ test SCB_CONTROL, MK_MESSAGE jnz select_out_inc_tid_q; shr DINDEX, 3, SCB_SCSIID; or DINDEX, 1; /* Want only the second byte */ mvi DINDEX[1], ((WAITING_SCB_TAILS) >> 8); mvi DINDIR, SCB_LIST_NULL; select_out_inc_tid_q: bmov SCBPTR, WAITING_TID_HEAD, 2; bmov WAITING_TID_HEAD, SCB_NEXT2, 2; cmp WAITING_TID_HEAD[1], SCB_LIST_NULL jne . + 2; mvi WAITING_TID_TAIL[1], SCB_LIST_NULL; bmov SCBPTR, CURRSCB, 2; mvi CLRSINT0, CLRSELDO; test LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_phase; test LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_phase; /* * If this is a packetized connection, return to our * idle_loop and let our interrupt handler deal with * any connection setup/teardown issues. The only * exceptions are the case of MK_MESSAGE and task management * SCBs. */ if ((ahd->bugs & AHD_LQO_ATNO_BUG) != 0) { /* * In the A, the LQO manager transitions to LQOSTOP0 even if * we have selected out with ATN asserted and the target * REQs in a non-packet phase. */ test SCB_CONTROL, MK_MESSAGE jz select_out_no_message; test SCSISIGO, ATNO jnz select_out_non_packetized; select_out_no_message: } test LQOSTAT2, LQOSTOP0 jz select_out_non_packetized; test SCB_TASK_MANAGEMENT, 0xFF jz idle_loop; SET_SEQINTCODE(TASKMGMT_FUNC_COMPLETE) jmp idle_loop; select_out_non_packetized: /* Non packetized request. */ and SCSISEQ0, ~ENSELO; if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { /* - * This exposes a window whereby a - * busfree just after a selection will - * be missed, but there is no other safe - * way to enable busfree detection if - * the busfreerev function is broken. + * Test to ensure that the bus has not + * already gone free prior to clearing + * any stale busfree status. This avoids + * a window whereby a busfree just after + * a selection could be missed. */ + test SCSISIGI, BSYI jz . + 2; mvi CLRSINT1,CLRBUSFREE; or SIMODE1, ENBUSFREE; } mov SAVED_SCSIID, SCB_SCSIID; mov SAVED_LUN, SCB_LUN; mvi SEQ_FLAGS, NO_CDB_SENT; END_CRITICAL; or SXFRCTL0, SPIOEN; /* * As soon as we get a successful selection, the target * should go into the message out phase since we have ATN * asserted. */ mvi MSG_OUT, MSG_IDENTIFYFLAG; /* * Main loop for information transfer phases. Wait for the * target to assert REQ before checking MSG, C/D and I/O for * the bus phase. */ mesgin_phasemis: ITloop: call phase_lock; mov A, LASTPHASE; test A, ~P_DATAIN_DT jz p_data; cmp A,P_COMMAND je p_command; cmp A,P_MESGOUT je p_mesgout; cmp A,P_STATUS je p_status; cmp A,P_MESGIN je p_mesgin; SET_SEQINTCODE(BAD_PHASE) jmp ITloop; /* Try reading the bus again. */ /* * Command phase. Set up the DMA registers and let 'er rip. */ p_command: test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; SET_SEQINTCODE(PROTO_VIOLATION) p_command_okay: test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz p_command_allocate_fifo; /* * Command retry. Free our current FIFO and * re-allocate a FIFO so transfer state is * reset. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) p_command_allocate_fifo: bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; call allocate_fifo; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; add NONE, -17, SCB_CDB_LEN; jnc p_command_embedded; p_command_from_host: bmov HADDR[0], SCB_HOST_CDB_PTR, 9; mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); jmp p_command_xfer; p_command_embedded: bmov SHCNT[0], SCB_CDB_LEN, 1; bmov DFDAT, SCB_CDB_STORE, 16; mvi DFCNTRL, SCSIEN; p_command_xfer: and SEQ_FLAGS, ~NO_CDB_SENT; - test DFCNTRL, SCSIEN jnz .; + if ((ahd->features & AHD_FAST_CDB_DELIVERY) != 0) { + /* + * To speed up CDB delivery in Rev B, all CDB acks + * are "released" to the output sync as soon as the + * command phase starts. There is only one problem + * with this approach. If the target changes phase + * before all data are sent, we have left over acks + * that can go out on the bus in a data phase. Due + * to other chip contraints, this only happens if + * the target goes to data-in, but if the acks go + * out before we can test SDONE, we'll think that + * the transfer has completed successfully. Work + * around this by taking advantage of the 400ns or + * 800ns dead time between command phase and the REQ + * of the new phase. If the transfer has completed + * successfully, SCSIEN should fall *long* before we + * see a phase change. We thus treat any phasemiss + * that occurs before SCSIEN falls as an incomplete + * transfer. + */ + test SSTAT1, PHASEMIS jnz p_command_xfer_failed; + test DFCNTRL, SCSIEN jnz . - 1; + } else { + test DFCNTRL, SCSIEN jnz .; + } /* * DMA Channel automatically disabled. * Don't allow a data phase if the command * was not fully transferred. */ test SSTAT2, SDONE jnz ITloop; +p_command_xfer_failed: or SEQ_FLAGS, NO_CDB_SENT; jmp ITloop; /* * Status phase. Wait for the data byte to appear, then read it * and store it into the SCB. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; p_status: test SEQ_FLAGS,NOT_IDENTIFIED jnz mesgin_proto_violation; p_status_okay: mov SCB_SCSI_STATUS, SCSIDAT; or SCB_CONTROL, STATUS_RCVD; jmp ITloop; /* * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full * indentify message sequence and send it to the target. The host may * override this behavior by setting the MK_MESSAGE bit in the SCB * control byte. This will cause us to interrupt the host and allow * it to handle the message phase completely on its own. If the bit * associated with this target is set, we will also interrupt the host, * thereby allowing it to send a message on the next selection regardless * of the transaction being sent. * * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. * This is done to allow the host to send messages outside of an identify * sequence while protecting the seqencer from testing the MK_MESSAGE bit * on an SCB that might not be for the current nexus. (For example, a * BDR message in responce to a bad reselection would leave us pointed to * an SCB that doesn't have anything to do with the current target). * * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, * bus device reset). * * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, * in case the target decides to put us in this phase for some strange * reason. */ p_mesgout_retry: /* Turn on ATN for the retry */ mvi SCSISIGO, ATNO; p_mesgout: mov SINDEX, MSG_OUT; cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; p_mesgout_identify: or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SCB_LUN; test SCB_CONTROL, DISCENB jnz . + 2; and SINDEX, ~DISCENB; /* * Send a tag message if TAG_ENB is set in the SCB control block. * Use SCB_NONPACKET_TAG as the tag value. */ p_mesgout_tag: test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; mov SCSIDAT, SINDEX; /* Send the identify message */ call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; and SCSIDAT,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; mov SCBPTR jmp p_mesgout_onebyte; /* * Interrupt the driver, and allow it to handle this message * phase and any required retries. */ p_mesgout_from_host: cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; jmp host_message_loop; p_mesgout_onebyte: mvi CLRSINT1, CLRATNO; mov SCSIDAT, SINDEX; /* * If the next bus phase after ATN drops is message out, it means * that the target is requesting that the last message(s) be resent. */ call phase_lock; cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; p_mesgout_done: mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ mov LAST_MSG, MSG_OUT; mvi MSG_OUT, MSG_NOOP; /* No message left */ jmp ITloop; /* * Message in phase. Bytes are read using Automatic PIO mode. */ p_mesgin: /* read the 1st message byte */ mvi ACCUM call inb_first; test A,MSG_IDENTIFYFLAG jnz mesgin_identify; cmp A,MSG_DISCONNECT je mesgin_disconnect; cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; cmp ALLZEROS,A je mesgin_complete; cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; cmp A,MSG_NOOP je mesgin_done; /* * Pushed message loop to allow the kernel to * run it's own message state engine. To avoid an * extra nop instruction after signaling the kernel, * we perform the phase_lock before checking to see * if we should exit the loop and skip the phase_lock * in the ITloop. Performing back to back phase_locks * shouldn't hurt, but why do it twice... */ host_message_loop: call phase_lock; /* Benign the first time through. */ SET_SEQINTCODE(HOST_MSG_LOOP) cmp RETURN_1, EXIT_MSG_LOOP je ITloop; cmp RETURN_1, CONT_MSG_LOOP_WRITE jne . + 3; mov SCSIDAT, RETURN_2; jmp host_message_loop; /* Must be CONT_MSG_LOOP_READ */ mov NONE, SCSIDAT; /* ACK Byte */ jmp host_message_loop; mesgin_ign_wide_residue: mov SAVED_MODE, MODE_PTR; SET_MODE(M_SCSI, M_SCSI) shr NEGOADDR, 4, SAVED_SCSIID; mov A, NEGCONOPTS; RESTORE_MODE(SAVED_MODE) test A, WIDEXFER jz mesgin_reject; /* Pull the residue byte */ mvi REG0 call inb_next; cmp REG0, 0x01 jne mesgin_reject; test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jnz mesgin_done; SET_SEQINTCODE(IGN_WIDE_RES) jmp mesgin_done; mesgin_proto_violation: SET_SEQINTCODE(PROTO_VIOLATION) jmp mesgin_done; mesgin_reject: mvi MSG_MESSAGE_REJECT call mk_mesg; mesgin_done: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ jmp ITloop; #define INDEX_DISC_LIST(scsiid, lun) \ and A, 0xC0, scsiid; \ or SCBPTR, A, lun; \ clr SCBPTR[1]; \ and SINDEX, 0x30, scsiid; \ shr SINDEX, 3; /* Multiply by 2 */ \ add SINDEX, (SCB_DISCONNECTED_LISTS & 0xFF); \ mvi SINDEX[1], ((SCB_DISCONNECTED_LISTS >> 8) & 0xFF) mesgin_identify: /* * Determine whether a target is using tagged or non-tagged * transactions by first looking at the transaction stored in * the per-device, disconnected array. If there is no untagged * transaction for this target, this must be a tagged transaction. */ and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); bmov DINDEX, SINDEX, 2; bmov REG0, SINDIR, 2; cmp REG0[1], SCB_LIST_NULL je snoop_tag; /* Untagged. Clear the busy table entry and setup the SCB. */ bmov DINDIR, ALLONES, 2; bmov SCBPTR, REG0, 2; jmp setup_SCB; /* * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. * If we get one, we use the tag returned to find the proper * SCB. After receiving the tag, look for the SCB at SCB locations tag and * tag + 256. */ snoop_tag: if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x80; } mov NONE, SCSIDAT; /* ACK Identify MSG */ call phase_lock; if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x1; } cmp LASTPHASE, P_MESGIN jne not_found_ITloop; if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x2; } cmp SCSIBUS, MSG_SIMPLE_Q_TAG jne not_found; get_tag: clr SCBPTR[1]; mvi SCBPTR call inb_next; /* tag value */ verify_scb: test SCB_CONTROL,DISCONNECTED jz verify_other_scb; mov A, SAVED_SCSIID; cmp SCB_SCSIID, A jne verify_other_scb; mov A, SAVED_LUN; cmp SCB_LUN, A je setup_SCB_disconnected; verify_other_scb: xor SCBPTR[1], 1; test SCBPTR[1], 0xFF jnz verify_scb; jmp not_found; /* * Ensure that the SCB the tag points to is for * an SCB transaction to the reconnecting target. */ setup_SCB: if ((ahd->flags & AHD_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x10; } test SCB_CONTROL,DISCONNECTED jz not_found; setup_SCB_disconnected: and SCB_CONTROL,~DISCONNECTED; clr SEQ_FLAGS; /* make note of IDENTIFY */ test SCB_SGPTR, SG_LIST_NULL jnz . + 3; bmov ALLOCFIFO_SCBPTR, SCBPTR, 2; call allocate_fifo; /* See if the host wants to send a message upon reconnection */ test SCB_CONTROL, MK_MESSAGE jz mesgin_done; mvi HOST_MSG call mk_mesg; jmp mesgin_done; not_found: SET_SEQINTCODE(NO_MATCH) jmp mesgin_done; not_found_ITloop: SET_SEQINTCODE(NO_MATCH) jmp ITloop; /* * We received a "command complete" message. Put the SCB on the complete * queue and trigger a completion interrupt via the idle loop. Before doing - * so, check to see if there - * is a residual or the status byte is something other than STATUS_GOOD (0). - * In either of these conditions, we upload the SCB back to the host so it can - * process this information. In the case of a non zero status byte, we - * additionally interrupt the kernel driver synchronously, allowing it to - * decide if sense should be retrieved. If the kernel driver wishes to request - * sense, it will fill the kernel SCB with a request sense command, requeue - * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting - * RETURN_1 to SEND_SENSE. + * so, check to see if there is a residual or the status byte is something + * other than STATUS_GOOD (0). In either of these conditions, we upload the + * SCB back to the host so it can process this information. */ mesgin_complete: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte. * Either way, the target should take us to message out phase * and then attempt to complete the command again. We should use a * critical section here to guard against a timeout triggering * for this command and setting ATN while we are still processing * the completion. test SCSISIGI, ATNI jnz mesgin_done; */ /* * If we are identified and have successfully sent the CDB, * any status will do. Optimize this fast path. */ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; /* * If the target never sent an identify message but instead went * to mesgin to give an invalid message, let the host abort us. */ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; /* * If we recevied good status but never successfully sent the * cdb, abort the command. */ test SCB_SCSI_STATUS,0xff jnz complete_accepted; test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; complete_accepted: /* * See if we attempted to deliver a message but the target ingnored us. */ test SCB_CONTROL, MK_MESSAGE jz complete_nomsg; SET_SEQINTCODE(MKMSG_FAILED) complete_nomsg: call queue_scb_completion; jmp await_busfree; +BEGIN_CRITICAL; freeze_queue: /* Cancel any pending select-out. */ test SSTAT0, SELDO|SELINGO jnz . + 2; and SCSISEQ0, ~ENSELO; mov ACCUM_SAVE, A; clr A; add QFREEZE_COUNT, 1; adc QFREEZE_COUNT[1], A; or SEQ_FLAGS2, SELECTOUT_QFROZEN; mov A, ACCUM_SAVE ret; +END_CRITICAL; /* * Complete the current FIFO's SCB if data for this same * SCB is not transferring in the other FIFO. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; pkt_complete_scb_if_fifos_idle: bmov ARG_1, SCBPTR, 2; mvi DFFSXFRCTL, CLRCHN; SET_MODE(M_SCSI, M_SCSI) bmov SCBPTR, ARG_1, 2; test SCB_FIFO_USE_COUNT, 0xFF jnz return; queue_scb_completion: test SCB_SCSI_STATUS,0xff jnz bad_status; /* * Check for residuals */ test SCB_SGPTR, SG_LIST_NULL jnz complete; /* No xfer */ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; complete: +BEGIN_CRITICAL; bmov SCB_NEXT_COMPLETE, COMPLETE_SCB_HEAD, 2; bmov COMPLETE_SCB_HEAD, SCBPTR, 2 ret; +END_CRITICAL; bad_status: cmp SCB_SCSI_STATUS, STATUS_PKT_SENSE je upload_scb; call freeze_queue; upload_scb: /* * Restore SCB TAG since we reuse this field * in the sequencer. We don't want to corrupt * it on the host. */ bmov SCB_TAG, SCBPTR, 2; - bmov SCB_NEXT_COMPLETE, COMPLETE_DMA_SCB_HEAD, 2; +BEGIN_CRITICAL; + or SCB_SGPTR, SG_STATUS_VALID; + mvi SCB_NEXT_COMPLETE[1], SCB_LIST_NULL; + cmp COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne add_dma_scb_tail; bmov COMPLETE_DMA_SCB_HEAD, SCBPTR, 2; - or SCB_SGPTR, SG_STATUS_VALID ret; + bmov COMPLETE_DMA_SCB_TAIL, SCBPTR, 2 ret; +add_dma_scb_tail: + bmov REG0, SCBPTR, 2; + bmov SCBPTR, COMPLETE_DMA_SCB_TAIL, 2; + bmov SCB_NEXT_COMPLETE, REG0, 2; + bmov COMPLETE_DMA_SCB_TAIL, REG0, 2 ret; +END_CRITICAL; /* * Is it a disconnect message? Set a flag in the SCB to remind us * and await the bus going free. If this is an untagged transaction * store the SCB id for it in our untagged target table for lookup on * a reselction. */ mesgin_disconnect: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte * or we want to abort this command. Either way, the target * should take us to message out phase and then attempt to * disconnect again. * XXX - Wait for more testing. test SCSISIGI, ATNI jnz mesgin_done; */ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jnz mesgin_proto_violation; or SCB_CONTROL,DISCONNECTED; test SCB_CONTROL, TAG_ENB jnz await_busfree; queue_disc_scb: bmov REG0, SCBPTR, 2; INDEX_DISC_LIST(SAVED_SCSIID, SAVED_LUN); bmov DINDEX, SINDEX, 2; bmov DINDIR, REG0, 2; bmov SCBPTR, REG0, 2; /* FALLTHROUGH */ await_busfree: and SIMODE1, ~ENBUSFREE; if ((ahd->bugs & AHD_BUSFREEREV_BUG) == 0) { /* * In the BUSFREEREV_BUG case, the * busfree status was cleared at the * beginning of the connection. */ mvi CLRSINT1,CLRBUSFREE; } mov NONE, SCSIDAT; /* Ack the last byte */ test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz await_busfree_not_m_dff; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; await_busfree_clrchn: mvi DFFSXFRCTL, CLRCHN; await_busfree_not_m_dff: - call clear_target_state; + /* clear target specific flags */ + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; test SSTAT1,REQINIT|BUSFREE jz .; + /* + * We only set BUSFREE status once either a new + * phase has been detected or we are really + * BUSFREE. This allows the driver to know + * that we are active on the bus even though + * no identified transaction exists should a + * timeout occur while awaiting busfree. + */ + mvi LASTPHASE, P_BUSFREE; test SSTAT1, BUSFREE jnz idle_loop; SET_SEQINTCODE(MISSED_BUSFREE) /* * Save data pointers message: * Copying RAM values back to SCB, for Save Data Pointers message, but * only if we've actually been into a data phase to change them. This * protects against bogus data in scratch ram and the residual counts * since they are only initialized when we go into data_in or data_out. * Ack the message as soon as possible. */ SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; mesgin_sdptrs: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ test SEQ_FLAGS, DPHASE jz ITloop; call save_pointers; jmp ITloop; save_pointers: /* * If we are asked to save our position at the end of the * transfer, just mark us at the end rather than perform a * full save. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz save_pointers_full; or SCB_SGPTR, SG_LIST_NULL ret; save_pointers_full: /* * The SCB_DATAPTR becomes the current SHADDR. * All other information comes directly from our residual * state. */ bmov SCB_DATAPTR, SHADDR, 8; bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8 ret; /* * Restore pointers message? Data pointers are recopied from the * SCB anytime we enter a data phase for the first time, so all * we need to do is clear the DPHASE flag and let the data phase * code do the rest. We also reset/reallocate the FIFO to make * sure we have a clean start for the next data or command phase. */ mesgin_rdptrs: and SEQ_FLAGS, ~DPHASE; test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz msgin_rdptrs_get_fifo; mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) msgin_rdptrs_get_fifo: call allocate_fifo; jmp mesgin_done; -clear_target_state: - mvi LASTPHASE, P_BUSFREE; - /* clear target specific flags */ - mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; - phase_lock: if ((ahd->bugs & AHD_EARLY_REQ_BUG) != 0) { /* * Don't ignore persistent REQ assertions just because * they were asserted within the bus settle delay window. * This allows us to tolerate devices like the GEM318 * that violate the SCSI spec. We are careful not to * count REQ while we are waiting for it to fall during * an async phase due to our asserted ACK. Each * sequencer instruction takes ~25ns, so the REQ must * last at least 100ns in order to be counted as a true * REQ. */ test SCSIPHASE, 0xFF jnz phase_locked; test SCSISIGI, ACKI jnz phase_lock; test SCSISIGI, REQI jz phase_lock; test SCSIPHASE, 0xFF jnz phase_locked; test SCSISIGI, ACKI jnz phase_lock; test SCSISIGI, REQI jz phase_lock; phase_locked: } else { test SCSIPHASE, 0xFF jz .; } test SSTAT1, SCSIPERR jnz phase_lock; phase_lock_latch_phase: and LASTPHASE, PHASE_MASK, SCSISIGI ret; /* * Functions to read data in Automatic PIO mode. * * An ACK is not sent on input from the target until SCSIDATL is read from. * So we wait until SCSIDATL is latched (the usual way), then read the data * byte directly off the bus using SCSIBUSL. When we have pulled the ATN * line, or we just want to acknowledge the byte, then we do a dummy read * from SCISDATL. The SCSI spec guarantees that the target will hold the * data byte on the bus until we send our ACK. * * The assumption here is that these are called in a particular sequence, * and that REQ is already set when inb_first is called. inb_{first,next} * use the same calling convention as inb. */ inb_next: mov NONE,SCSIDAT; /*dummy read from latch to ACK*/ inb_next_wait: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SCSIPHASE, 0xFF jz .; test SSTAT1, SCSIPERR jnz inb_next_wait; inb_next_check_phase: and LASTPHASE, PHASE_MASK, SCSISIGI; cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; inb_first: clr DINDEX[1]; mov DINDEX,SINDEX; mov DINDIR,SCSIBUS ret; /*read byte directly from bus*/ inb_last: mov NONE,SCSIDAT ret; /*dummy read from latch to ACK*/ mk_mesg: mvi SCSISIGO, ATNO; mov MSG_OUT,SINDEX ret; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; disable_ccsgen: test SG_STATE, FETCH_INPROG jz disable_ccsgen_fetch_done; clr CCSGCTL; disable_ccsgen_fetch_done: clr SG_STATE ret; service_fifo: /* * Do we have any prefetch left??? */ test SG_STATE, SEGS_AVAIL jnz idle_sg_avail; /* * Can this FIFO have access to the S/G cache yet? */ test CCSGCTL, SG_CACHE_AVAIL jz return; /* Did we just finish fetching segs? */ test CCSGCTL, CCSGDONE jnz idle_sgfetch_complete; /* Are we actively fetching segments? */ test CCSGCTL, CCSGENACK jnz return; /* + * Should the other FIFO get the S/G cache first? If + * both FIFOs have been allocated since we last checked + * any FIFO, it is important that we service a FIFO + * that is not actively on the bus first. This guarantees + * that a FIFO will be freed to handle snapshot requests for + * any FIFO that is still on the bus. Chips with RTI do not + * perform snapshots, so don't bother with this test there. + */ + if ((ahd->features & AHD_RTI) == 0) { + /* + * If we're not still receiving SCSI data, + * it is safe to allocate the S/G cache to + * this FIFO. + */ + test DFCNTRL, SCSIEN jz idle_sgfetch_start; + + /* + * Switch to the other FIFO. Non-RTI chips + * also have the "set mode" bug, so we must + * disable interrupts during the switch. + */ + mvi SEQINTCTL, INTVEC1DSL; + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + + /* + * If the other FIFO needs loading, then it + * must not have claimed the S/G cache yet + * (SG_CACHE_AVAIL would have been cleared in + * the orginal FIFO mode and we test this above). + * Return to the idle loop so we can process the + * FIFO not currently on the bus first. + */ + test SG_STATE, LOADING_NEEDED jz idle_sgfetch_okay; + clr SEQINTCTL ret; +idle_sgfetch_okay: + xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); + clr SEQINTCTL; + } + +idle_sgfetch_start: + /* * We fetch a "cacheline aligned" and sized amount of data * so we don't end up referencing a non-existant page. * Cacheline aligned is in quotes because the kernel will * set the prefetch amount to a reasonable level if the * cacheline size is unknown. */ bmov SGHADDR, SCB_RESIDUAL_SGPTR, 4; mvi SGHCNT, SG_PREFETCH_CNT; if ((ahd->bugs & AHD_REG_SLOW_SETTLE_BUG) != 0) { /* - * Need two instruction between "touches" of SGHADDR. + * Need two instructions between "touches" of SGHADDR. */ nop; } and SGHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; mvi CCSGCTL, CCSGEN|CCSGRESET; or SG_STATE, FETCH_INPROG ret; idle_sgfetch_complete: /* * Guard against SG_CACHE_AVAIL activating during sg fetch * request in the other FIFO. */ test SG_STATE, FETCH_INPROG jz return; clr CCSGCTL; and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; mvi SG_STATE, SEGS_AVAIL|LOADING_NEEDED; idle_sg_avail: /* Does the hardware have space for another SG entry? */ test DFSTATUS, PRELOAD_AVAIL jz return; /* * On the A, preloading a segment before HDMAENACK * comes true can clobber the shaddow address of the * first segment in the S/G FIFO. Wait until it is * safe to proceed. */ if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) == 0) { test DFCNTRL, HDMAENACK jz return; } if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { bmov HADDR, CCSGRAM, 8; } else { bmov HADDR, CCSGRAM, 4; } bmov HCNT, CCSGRAM, 3; bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { and HADDR[4], SG_HIGH_ADDR_BITS, SCB_RESIDUAL_DATACNT[3]; } if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { /* Skip 4 bytes of pad. */ add CCSGADDR, 4; } sg_advance: clr A; /* add sizeof(struct scatter) */ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; adc SCB_RESIDUAL_SGPTR[1],A; adc SCB_RESIDUAL_SGPTR[2],A; adc SCB_RESIDUAL_SGPTR[3],A; mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 3; or SINDEX, LAST_SEG; clr SG_STATE; mov SG_CACHE_PRE, SINDEX; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN is never * modified by this operation. */ or DFCNTRL, PRELOADEN|HDMAEN|SCSIENWRDIS; } else { or DFCNTRL, PRELOADEN|HDMAEN; } /* * Do we have another segment in the cache? */ add NONE, SG_PREFETCH_CNT_LIMIT, CCSGADDR; jnc return; and SG_STATE, ~SEGS_AVAIL ret; /* * Initialize the DMA address and counter from the SCB. */ load_first_seg: bmov HADDR, SCB_DATAPTR, 11; and REG_ISR, ~SG_FULL_RESID, SCB_SGPTR[0]; test SCB_DATACNT[3], SG_LAST_SEG jz . + 2; or REG_ISR, LAST_SEG; mov SG_CACHE_PRE, REG_ISR; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); /* * Since we've are entering a data phase, we will * rely on the SCB_RESID* fields. Initialize the * residual and clear the full residual flag. */ and SCB_SGPTR[0], ~SG_FULL_RESID; bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; /* If we need more S/G elements, tell the idle loop */ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz . + 2; mvi SG_STATE, LOADING_NEEDED ret; clr SG_STATE ret; p_data_handle_xfer: call setjmp; test SG_STATE, LOADING_NEEDED jnz service_fifo; p_data_clear_handler: or LONGJMP_ADDR[1], INVALID_ADDR ret; p_data: test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; SET_SEQINTCODE(PROTO_VIOLATION) p_data_allowed: test SEQ_FLAGS, DPHASE jz data_phase_initialize; /* * If we re-enter the data phase after going through another * phase, our transfer location has almost certainly been * corrupted by the interveining, non-data, transfers. Ask * the host driver to fix us up based on the transfer residual * unless we already know that we should be bitbucketing. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; SET_SEQINTCODE(PDATA_REINIT) jmp data_phase_inbounds; p_data_bitbucket: /* * Turn on `Bit Bucket' mode, wait until the target takes * us to another phase, and then notify the host. */ mov SAVED_MODE, MODE_PTR; test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz bitbucket_not_m_dff; /* * Ensure that any FIFO contents are cleared out and the * FIFO free'd prior to starting the BITBUCKET. BITBUCKET * doesn't discard data already in the FIFO. */ mvi DFFSXFRCTL, RSTCHN|CLRSHCNT; SET_MODE(M_SCSI, M_SCSI) bitbucket_not_m_dff: or SXFRCTL1,BITBUCKET; /* Wait for non-data phase. */ test SCSIPHASE, ~DATA_PHASE_MASK jz .; and SXFRCTL1, ~BITBUCKET; RESTORE_MODE(SAVED_MODE) SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; SET_SEQINTCODE(DATA_OVERRUN) jmp ITloop; data_phase_initialize: test SCB_SGPTR[0], SG_LIST_NULL jnz p_data_bitbucket; call load_first_seg; data_phase_inbounds: /* We have seen a data phase at least once. */ or SEQ_FLAGS, DPHASE; mov SAVED_MODE, MODE_PTR; test SG_STATE, LOADING_NEEDED jz data_group_dma_loop; call p_data_handle_xfer; data_group_dma_loop: /* * The transfer is complete if either the last segment * completes or the target changes phase. Both conditions * will clear SCSIEN. */ call idle_loop_service_fifos; call idle_loop_cchan; call idle_loop_gsfifo; RESTORE_MODE(SAVED_MODE) test DFCNTRL, SCSIEN jnz data_group_dma_loop; data_group_dmafinish: /* * The transfer has terminated either due to a phase * change, and/or the completion of the last segment. * We have two goals here. Do as much other work * as possible while the data fifo drains on a read * and respond as quickly as possible to the standard * messages (save data pointers/disconnect and command * complete) that usually follow a data phase. */ call calc_residual; /* * Go ahead and shut down the DMA engine now. */ test DFCNTRL, DIRECTION jnz data_phase_finish; data_group_fifoflush: if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } /* * We have enabled the auto-ack feature. This means * that the controller may have already transferred * some overrun bytes into the data FIFO and acked them * on the bus. The only way to detect this situation is * to wait for LAST_SEG_DONE to come true on a completed * transfer and then test to see if the data FIFO is * non-empty. We know there is more data yet to transfer * if SG_LIST_NULL is not yet set, thus there cannot be * an overrun. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_finish; test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; test DFSTATUS, FIFOEMP jnz data_phase_finish; /* Overrun */ jmp p_data; data_phase_finish: /* * If the target has left us in data phase, loop through * the dma code again. We will only loop if there is a * data overrun. */ if ((ahd->flags & AHD_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_done; } if ((ahd->flags & AHD_INITIATORROLE) != 0) { test SSTAT1, REQINIT jz .; test SCSIPHASE, DATA_PHASE_MASK jnz p_data; } data_phase_done: /* Kill off any pending prefetch */ call disable_ccsgen; or LONGJMP_ADDR[1], INVALID_ADDR; if ((ahd->flags & AHD_TARGETROLE) != 0) { test SEQ_FLAGS, DPHASE_PENDING jz ITloop; /* and SEQ_FLAGS, ~DPHASE_PENDING; * For data-in phases, wait for any pending acks from the * initiator before changing phase. We only need to * send Ignore Wide Residue messages for data-in phases. test DFCNTRL, DIRECTION jz target_ITloop; test SSTAT1, REQINIT jnz .; test SCB_TASK_ATTRIBUTE, SCB_XFERLEN_ODD jz target_ITloop; SET_MODE(M_SCSI, M_SCSI) test NEGCONOPTS, WIDEXFER jz target_ITloop; */ /* * Issue an Ignore Wide Residue Message. mvi P_MESGIN|BSYO call change_phase; mvi MSG_IGN_WIDE_RESIDUE call target_outb; mvi 1 call target_outb; jmp target_ITloop; */ } else { jmp ITloop; } /* * We assume that, even though data may still be * transferring to the host, that the SCSI side of * the DMA engine is now in a static state. This * allows us to update our notion of where we are * in this transfer. * * If, by chance, we stopped before being able * to fetch additional segments for this transfer, * yet the last S/G was completely exhausted, * call our idle loop until it is able to load * another segment. This will allow us to immediately * pickup on the next segment on the next data phase. * * If we happened to stop on the last segment, then * our residual information is still correct from * the idle loop and there is no need to perform * any fixups. */ residual_before_last_seg: test MDFFSTAT, SHVALID jnz sgptr_fixup; /* * Can never happen from an interrupt as the packetized * hardware will only interrupt us once SHVALID or * LAST_SEG_DONE. */ call idle_loop_service_fifos; RESTORE_MODE(SAVED_MODE) /* FALLTHROUGH */ calc_residual: test SG_CACHE_SHADOW, LAST_SEG jz residual_before_last_seg; /* Record if we've consumed all S/G entries */ test MDFFSTAT, SHVALID jz . + 2; bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL ret; sgptr_fixup: /* * Fixup the residual next S/G pointer. The S/G preload * feature of the chip allows us to load two elements * in addition to the currently active element. We * store the bottom byte of the next S/G pointer in * the SG_CACHE_PTR register so we can restore the * correct value when the DMA completes. If the next * sg ptr value has advanced to the point where higher * bytes in the address have been affected, fix them * too. */ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; add SCB_RESIDUAL_SGPTR[1], -1; adc SCB_RESIDUAL_SGPTR[2], -1; adc SCB_RESIDUAL_SGPTR[3], -1; sgptr_fixup_done: and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; clr SCB_RESIDUAL_DATACNT[3]; /* We are not the last seg */ bmov SCB_RESIDUAL_DATACNT, SHCNT, 3 ret; export timer_isr: call issue_cmdcmplt; mvi CLRSEQINTSTAT, CLRSEQ_SWTMRTO; if ((ahd->bugs & AHD_SET_MODE_BUG) != 0) { /* * In H2A4, the mode pointer is not saved * for intvec2, but is restored on iret. * This can lead to the restoration of a * bogus mode ptr. Manually clear the * intmask bits and do a normal return * to compensate. */ and SEQINTCTL, ~(INTMASK2|INTMASK1) ret; } else { or SEQINTCTL, IRET ret; } export seq_isr: if ((ahd->features & AHD_RTI) == 0) { /* * On RevA Silicon, if the target returns us to data-out * after we have already trained for data-out, it is * possible for us to transition the free running clock to * data-valid before the required 100ns P1 setup time (8 P1 * assertions in fast-160 mode). This will only happen if * this L-Q is a continuation of a data transfer for which * we have already prefetched data into our FIFO (LQ/Data * followed by LQ/Data for the same write transaction). * This can cause some target implementations to miss the * first few data transfers on the bus. We detect this * situation by noticing that this is the first data transfer * after an LQ (LQIWORKONLQ true), that the data transfer is * a continuation of a transfer already setup in our FIFO * (SAVEPTRS interrupt), and that the transaction is a write * (DIRECTION set in DFCNTRL). The delay is performed by * disabling SCSIEN until we see the first REQ from the * target. * * First instruction in an ISR cannot be a branch on * Rev A. Snapshot LQISTAT2 so the status is not missed * and deffer the test by one instruction. */ mov REG_ISR, LQISTAT2; test REG_ISR, LQIWORKONLQ jz main_isr; test SEQINTSRC, SAVEPTRS jz main_isr; test LONGJMP_ADDR[1], INVALID_ADDR jz saveptr_active_fifo; /* * Switch to the active FIFO after clearing the snapshot * savepointer in the current FIFO. We do this so that * a pending CTXTDONE or SAVEPTR is visible in the active * FIFO. This status is the only way we can detect if we - * have lost the race (e.g. host paused us) and our attepts + * have lost the race (e.g. host paused us) and our attempts * to disable the channel occurred after all REQs were * already seen and acked (REQINIT never comes true). */ mvi DFFSXFRCTL, CLRCHN; xor MODE_PTR, MK_MODE(M_DFF1, M_DFF1); test DFCNTRL, DIRECTION jz interrupt_return; and DFCNTRL, ~SCSIEN; snapshot_wait_data_valid: - test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz snapshot_data_valid; + test SEQINTSRC, (CTXTDONE|SAVEPTRS) jnz interrupt_return; test SSTAT1, REQINIT jz snapshot_wait_data_valid; snapshot_data_valid: or DFCNTRL, SCSIEN; or SEQINTCTL, IRET ret; snapshot_saveptr: mvi DFFSXFRCTL, CLRCHN; or SEQINTCTL, IRET ret; main_isr: } test SEQINTSRC, CFG4DATA jnz cfg4data_intr; test SEQINTSRC, CFG4ISTAT jnz cfg4istat_intr; test SEQINTSRC, SAVEPTRS jnz saveptr_intr; test SEQINTSRC, CFG4ICMD jnz cfg4icmd_intr; SET_SEQINTCODE(INVALID_SEQINT) /* * There are two types of save pointers interrupts: * The first is a snapshot save pointers where the current FIFO is not * active and contains a snapshot of the current poniter information. * This happens between packets in a stream for a single L_Q. Since we * are not performing a pointer save, we can safely clear the channel * so it can be used for other transactions. On RTI capable controllers, * where snapshots can, and are, disabled, the code to handle this type * of snapshot is not active. * * The second case is a save pointers on an active FIFO which occurs * if the target changes to a new L_Q or busfrees/QASes and the transfer * has a residual. This should occur coincident with a ctxtdone. We * disable the interrupt and allow our active routine to handle the * save. */ saveptr_intr: if ((ahd->features & AHD_RTI) == 0) { test LONGJMP_ADDR[1], INVALID_ADDR jnz snapshot_saveptr; } saveptr_active_fifo: and SEQIMODE, ~ENSAVEPTRS; or SEQINTCTL, IRET ret; cfg4data_intr: test SCB_SGPTR[0], SG_LIST_NULL jnz pkt_handle_overrun_inc_use_count; call load_first_seg; call pkt_handle_xfer; inc SCB_FIFO_USE_COUNT; interrupt_return: or SEQINTCTL, IRET ret; cfg4istat_intr: call freeze_queue; add NONE, -13, SCB_CDB_LEN; jnc cfg4istat_have_sense_addr; test SCB_CDB_LEN, SCB_CDB_LEN_PTR jnz cfg4istat_have_sense_addr; /* * Host sets up address/count and enables transfer. */ SET_SEQINTCODE(CFG4ISTAT_INTR) jmp cfg4istat_setup_handler; cfg4istat_have_sense_addr: bmov HADDR, SCB_SENSE_BUSADDR, 4; mvi HCNT[1], (AHD_SENSE_BUFSIZE >> 8); mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, PRELOADEN|SCSIEN|HDMAEN; cfg4istat_setup_handler: /* * Status pkt is transferring to host. * Wait in idle loop for transfer to complete. * If a command completed before an attempted * task management function completed, notify the host. */ test SCB_TASK_MANAGEMENT, 0xFF jz cfg4istat_no_taskmgmt_func; SET_SEQINTCODE(TASKMGMT_CMD_CMPLT_OKAY) cfg4istat_no_taskmgmt_func: call pkt_handle_status; or SEQINTCTL, IRET ret; cfg4icmd_intr: /* * In the case of DMAing a CDB from the host, the normal * CDB buffer is formatted with an 8 byte address followed * by a 1 byte count. */ bmov HADDR[0], SCB_HOST_CDB_PTR, 9; mvi SG_CACHE_PRE, LAST_SEG; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN); call pkt_handle_cdb; or SEQINTCTL, IRET ret; /* * See if the target has gone on in this context creating an * overrun condition. For the write case, the hardware cannot * ack bytes until data are provided. So, if the target begins * another packet without changing contexts, implying we are * not sitting on a packet boundary, we are in an overrun * situation. For the read case, the hardware will continue to * ack bytes into the FIFO, and may even ack the last overrun packet * into the FIFO. If the FIFO should become non-empty, we are in * a read overrun case. */ #define check_overrun \ /* Not on a packet boundary. */ \ test MDFFSTAT, DLZERO jz pkt_handle_overrun; \ test DFSTATUS, FIFOEMP jz pkt_handle_overrun pkt_handle_xfer: test SG_STATE, LOADING_NEEDED jz pkt_last_seg; call setjmp; test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz pkt_service_fifo; /* * Defer handling of this NONPACKREQ until we * can be sure it pertains to this FIFO. SAVEPTRS * will not be asserted if the NONPACKREQ is for us, * so we must simulate it if shaddow is valid. If * shaddow is not valid, keep running this FIFO until we * have satisfied the transfer by loading segments and * waiting for either shaddow valid or last_seg_done. */ test MDFFSTAT, SHVALID jnz pkt_saveptrs; pkt_service_fifo: test SG_STATE, LOADING_NEEDED jnz service_fifo; pkt_last_seg: call setjmp; test SEQINTSRC, SAVEPTRS jnz pkt_saveptrs; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_last_seg_done; test SCSIPHASE, ~DATA_PHASE_MASK jz . + 2; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz return; test MDFFSTAT, SHVALID jz return; /* FALLTHROUGH */ /* * Either a SAVEPTRS interrupt condition is pending for this FIFO * or we have a pending NONPACKREQ for this FIFO. We differentiate * between the two by capturing the state of the SAVEPTRS interrupt * prior to clearing this status and executing the common code for * these two cases. */ pkt_saveptrs: BEGIN_CRITICAL; if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } mov REG0, SEQINTSRC; call calc_residual; call save_pointers; mvi CLRSEQINTSRC, CLRSAVEPTRS; call disable_ccsgen; or SEQIMODE, ENSAVEPTRS; test DFCNTRL, DIRECTION jnz pkt_saveptrs_check_status; test DFSTATUS, FIFOEMP jnz pkt_saveptrs_check_status; /* * Keep a handler around for this FIFO until it drains * to the host to guarantee that we don't complete the * command to the host before the data arrives. */ pkt_saveptrs_wait_fifoemp: call setjmp; test DFSTATUS, FIFOEMP jz return; pkt_saveptrs_check_status: or LONGJMP_ADDR[1], INVALID_ADDR; test REG0, SAVEPTRS jz unexpected_nonpkt_phase; dec SCB_FIFO_USE_COUNT; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; -END_CRITICAL; /* * LAST_SEG_DONE status has been seen in the current FIFO. * This indicates that all of the allowed data for this * command has transferred across the SCSI and host buses. * Check for overrun and see if we can complete this command. */ pkt_last_seg_done: -BEGIN_CRITICAL; /* * Mark transfer as completed. */ or SCB_SGPTR, SG_LIST_NULL; /* * Wait for the current context to finish to verify that * no overrun condition has occurred. */ test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; call setjmp; pkt_wait_ctxt_done_loop: test SEQINTSRC, CTXTDONE jnz pkt_ctxt_done; /* * A sufficiently large overrun or a NONPACKREQ may * prevent CTXTDONE from ever asserting, so we must * poll for these statuses too. */ check_overrun; test SSTAT2, NONPACKREQ jz return; test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; /* FALLTHROUGH */ pkt_ctxt_done: check_overrun; or LONGJMP_ADDR[1], INVALID_ADDR; /* * If status has been received, it is safe to skip * the check to see if another FIFO is active because * LAST_SEG_DONE has been observed. However, we check * the FIFO anyway since it costs us only one extra * instruction to leverage common code to perform the * SCB completion. */ dec SCB_FIFO_USE_COUNT; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; END_CRITICAL; /* * Must wait until CDB xfer is over before issuing the * clear channel. */ pkt_handle_cdb: call setjmp; test SG_CACHE_SHADOW, LAST_SEG_DONE jz return; or LONGJMP_ADDR[1], INVALID_ADDR; mvi DFFSXFRCTL, CLRCHN ret; /* * Watch over the status transfer. Our host sense buffer is * large enough to take the maximum allowed status packet. * None-the-less, we must still catch and report overruns to * the host. Additionally, properly catch unexpected non-packet * phases that are typically caused by CRC errors in status packet * transmission. */ pkt_handle_status: call setjmp; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; test SEQINTSRC, CTXTDONE jz pkt_status_check_nonpackreq; test SG_CACHE_SHADOW, LAST_SEG_DONE jnz pkt_status_check_overrun; pkt_status_IU_done: if ((ahd->bugs & AHD_AUTOFLUSH_BUG) != 0) { or DFCNTRL, FIFOFLUSH; } test DFSTATUS, FIFOEMP jz return; BEGIN_CRITICAL; or LONGJMP_ADDR[1], INVALID_ADDR; mvi SCB_SCSI_STATUS, STATUS_PKT_SENSE; or SCB_CONTROL, STATUS_RCVD; jmp pkt_complete_scb_if_fifos_idle; END_CRITICAL; pkt_status_check_overrun: /* * Status PKT overruns are uncerimoniously recovered with a * bus reset. If we've overrun, let the host know so that * recovery can be performed. * * LAST_SEG_DONE has been observed. If either CTXTDONE or * a NONPACKREQ phase change have occurred and the FIFO is * empty, there is no overrun. */ test DFSTATUS, FIFOEMP jz pkt_status_report_overrun; test SEQINTSRC, CTXTDONE jz . + 2; test DFSTATUS, FIFOEMP jnz pkt_status_IU_done; test SCSIPHASE, ~DATA_PHASE_MASK jz return; test DFSTATUS, FIFOEMP jnz pkt_status_check_nonpackreq; pkt_status_report_overrun: SET_SEQINTCODE(STATUS_OVERRUN) /* SEQUENCER RESTARTED */ pkt_status_check_nonpackreq: /* * CTXTDONE may be held off if a NONPACKREQ is associated with * the current context. If a NONPACKREQ is observed, decide * if it is for the current context. If it is for the current * context, we must defer NONPACKREQ processing until all data * has transferred to the host. */ test SCSIPHASE, ~DATA_PHASE_MASK jz return; test SCSISIGO, ATNO jnz . + 2; test SSTAT2, NONPACKREQ jz return; test SEQINTSRC, CTXTDONE jnz pkt_status_IU_done; test DFSTATUS, FIFOEMP jz return; /* * The unexpected nonpkt phase handler assumes that any * data channel use will have a FIFO reference count. It * turns out that the status handler doesn't need a refernce * count since the status received flag, and thus completion * processing, cannot be set until the handler is finished. * We increment the count here to make the nonpkt handler * happy. */ inc SCB_FIFO_USE_COUNT; /* FALLTHROUGH */ /* * Nonpackreq is a polled status. It can come true in three situations: * we have received an L_Q, we have sent one or more L_Qs, or there is no * L_Q context associated with this REQ (REQ occurs immediately after a * (re)selection). Routines that know that the context responsible for this * nonpackreq call directly into unexpected_nonpkt_phase. In the case of the * top level idle loop, we exhaust all active contexts prior to determining that * we simply do not have the full I_T_L_Q for this phase. */ unexpected_nonpkt_phase_find_ctxt: /* * This nonpackreq is most likely associated with one of the tags * in a FIFO or an outgoing LQ. Only treat it as an I_T only * nonpackreq if we've cleared out the FIFOs and handled any * pending SELDO. */ SET_SRC_MODE M_SCSI; SET_DST_MODE M_SCSI; and A, FIFO1FREE|FIFO0FREE, DFFSTAT; cmp A, FIFO1FREE|FIFO0FREE jne return; test SSTAT0, SELDO jnz return; mvi SCBPTR[1], SCB_LIST_NULL; unexpected_nonpkt_phase: test MODE_PTR, ~(MK_MODE(M_DFF1, M_DFF1)) jnz unexpected_nonpkt_mode_cleared; SET_SRC_MODE M_DFF0; SET_DST_MODE M_DFF0; or LONGJMP_ADDR[1], INVALID_ADDR; dec SCB_FIFO_USE_COUNT; mvi DFFSXFRCTL, CLRCHN; unexpected_nonpkt_mode_cleared: mvi CLRSINT2, CLRNONPACKREQ; test SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase; SET_SEQINTCODE(ENTERING_NONPACK) jmp ITloop; illegal_phase: SET_SEQINTCODE(ILLEGAL_PHASE) jmp ITloop; /* * We have entered an overrun situation. If we have working * BITBUCKET, flip that on and let the hardware eat any overrun * data. Otherwise use an overrun buffer in the host to simulate * BITBUCKET. */ pkt_handle_overrun_inc_use_count: inc SCB_FIFO_USE_COUNT; pkt_handle_overrun: SET_SEQINTCODE(CFG4OVERRUN) call freeze_queue; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) == 0) { or DFFSXFRCTL, DFFBITBUCKET; SET_SRC_MODE M_DFF1; SET_DST_MODE M_DFF1; } else { call load_overrun_buf; mvi DFCNTRL, (HDMAEN|SCSIEN|PRELOADEN); } call setjmp; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { test DFSTATUS, PRELOAD_AVAIL jz overrun_load_done; call load_overrun_buf; or DFCNTRL, PRELOADEN; overrun_load_done: test SEQINTSRC, CTXTDONE jnz pkt_overrun_end; } else { test DFFSXFRCTL, DFFBITBUCKET jz pkt_overrun_end; } test SSTAT2, NONPACKREQ jz return; pkt_overrun_end: or SCB_RESIDUAL_SGPTR, SG_OVERRUN_RESID; test SEQINTSRC, CTXTDONE jz unexpected_nonpkt_phase; dec SCB_FIFO_USE_COUNT; or LONGJMP_ADDR[1], INVALID_ADDR; test SCB_CONTROL, STATUS_RCVD jnz pkt_complete_scb_if_fifos_idle; mvi DFFSXFRCTL, CLRCHN ret; if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { load_overrun_buf: /* * Load a dummy segment if preload space is available. */ mov HADDR[0], SHARED_DATA_ADDR; add HADDR[1], PKT_OVERRUN_BUFOFFSET, SHARED_DATA_ADDR[1]; mov ACCUM_SAVE, A; clr A; adc HADDR[2], A, SHARED_DATA_ADDR[2]; adc HADDR[3], A, SHARED_DATA_ADDR[3]; mov A, ACCUM_SAVE; bmov HADDR[4], ALLZEROS, 4; /* PKT_OVERRUN_BUFSIZE is a multiple of 256 */ clr HCNT[0]; mvi HCNT[1], ((PKT_OVERRUN_BUFSIZE >> 8) & 0xFF); clr HCNT[2] ret; } Index: stable/4/sys/dev/aic7xxx/aic79xx_inline.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx_inline.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx_inline.h (revision 125849) @@ -1,965 +1,979 @@ /* * Inline routines shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#50 $ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#56 $ * * $FreeBSD$ */ #ifndef _AIC79XX_INLINE_H_ #define _AIC79XX_INLINE_H_ /******************************** Debugging ***********************************/ static __inline char *ahd_name(struct ahd_softc *ahd); static __inline char * ahd_name(struct ahd_softc *ahd) { return (ahd->name); } /************************ Sequencer Execution Control *************************/ static __inline void ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, ahd_mode *src, ahd_mode *dst); static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst); static __inline void ahd_update_modes(struct ahd_softc *ahd); static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line); static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd); static __inline void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state); static __inline int ahd_is_paused(struct ahd_softc *ahd); static __inline void ahd_pause(struct ahd_softc *ahd); static __inline void ahd_unpause(struct ahd_softc *ahd); static __inline void ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { ahd->src_mode = src; ahd->dst_mode = dst; ahd->saved_src_mode = src; ahd->saved_dst_mode = dst; } static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { return ((src << SRC_MODE_SHIFT) | (dst << DST_MODE_SHIFT)); } static __inline void ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state, ahd_mode *src, ahd_mode *dst) { *src = (state & SRC_MODE) >> SRC_MODE_SHIFT; *dst = (state & DST_MODE) >> DST_MODE_SHIFT; } static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { if (ahd->src_mode == src && ahd->dst_mode == dst) return; #ifdef AHD_DEBUG if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) panic("Setting mode prior to saving it.\n"); if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printf("%s: Setting mode 0x%x\n", ahd_name(ahd), ahd_build_mode_state(ahd, src, dst)); #endif ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); ahd->src_mode = src; ahd->dst_mode = dst; } static __inline void ahd_update_modes(struct ahd_softc *ahd) { ahd_mode_state mode_ptr; ahd_mode src; ahd_mode dst; mode_ptr = ahd_inb(ahd, MODE_PTR); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printf("Reading mode 0x%x\n", mode_ptr); #endif ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); ahd_known_modes(ahd, src, dst); } static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line) { #ifdef AHD_DEBUG if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { panic("%s:%s:%d: Mode assertion failed.\n", ahd_name(ahd), file, line); } #endif } static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd) { if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) ahd_update_modes(ahd); return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); } static __inline void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) { ahd_mode src; ahd_mode dst; ahd_extract_mode_state(ahd, state, &src, &dst); ahd_set_modes(ahd, src, dst); } #define AHD_ASSERT_MODES(ahd, source, dest) \ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ static __inline int ahd_is_paused(struct ahd_softc *ahd) { return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ static __inline void ahd_pause(struct ahd_softc *ahd) { ahd_outb(ahd, HCNTRL, ahd->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahd_is_paused(ahd) == 0) ; } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ static __inline void ahd_unpause(struct ahd_softc *ahd) { /* * Automatically restore our modes to those saved * prior to the first change of the mode. */ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) ahd_reset_cmds_pending(ahd); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); } if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) ahd_outb(ahd, HCNTRL, ahd->unpause); ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); } /*********************** Scatter Gather List Handling *************************/ static __inline void *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, bus_addr_t addr, bus_size_t len, int last); static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void * ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, bus_addr_t addr, bus_size_t len, int last) { scb->sg_count++; if (sizeof(bus_addr_t) > 4 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)sgptr; - sg->addr = ahd_htole64(addr); - sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); + sg->addr = aic_htole64(addr); + sg->len = aic_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } else { struct ahd_dma_seg *sg; sg = (struct ahd_dma_seg *)sgptr; - sg->addr = ahd_htole32(addr & 0xFFFFFFFF); - sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) + sg->addr = aic_htole32(addr & 0xFFFFFFFF); + sg->len = aic_htole32(len | ((addr >> 8) & 0x7F000000) | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } } static __inline void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) { /* XXX Handle target mode SCBs. */ scb->crc_retry_count = 0; if ((scb->flags & SCB_PACKETIZED) != 0) { /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; } else { - if (ahd_get_transfer_length(scb) & 0x01) + if (aic_get_transfer_length(scb) & 0x01) scb->hscb->task_attribute = SCB_XFERLEN_ODD; else scb->hscb->task_attribute = 0; } if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = - ahd_htole32(scb->sense_busaddr); + aic_htole32(scb->sense_busaddr); } static __inline void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) { /* * Copy the first SG into the "current" data ponter area. */ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)scb->sg_list; scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { struct ahd_dma_seg *sg; uint32_t *dataptr_words; sg = (struct ahd_dma_seg *)scb->sg_list; dataptr_words = (uint32_t*)&scb->hscb->dataptr; dataptr_words[0] = sg->addr; dataptr_words[1] = 0; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { uint64_t high_addr; - high_addr = ahd_le32toh(sg->len) & 0x7F000000; - scb->hscb->dataptr |= ahd_htole64(high_addr << 8); + high_addr = aic_le32toh(sg->len) & 0x7F000000; + scb->hscb->dataptr |= aic_htole64(high_addr << 8); } scb->hscb->datacnt = sg->len; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ - scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); + scb->hscb->sgptr = aic_htole32(scb->sg_list_busaddr|SG_FULL_RESID); } static __inline void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) { - scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); + scb->hscb->sgptr = aic_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } /************************** Memory mapping routines ***************************/ static __inline size_t ahd_sg_size(struct ahd_softc *ahd); static __inline void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr); static __inline uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg); static __inline void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op); static __inline void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op); static __inline void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op); static __inline uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index); static __inline size_t ahd_sg_size(struct ahd_softc *ahd) { if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) return (sizeof(struct ahd_dma64_seg)); return (sizeof(struct ahd_dma_seg)); } static __inline void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) { bus_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); return ((uint8_t *)scb->sg_list + sg_offset); } static __inline uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) { bus_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) - ahd_sg_size(ahd); return (scb->sg_list_busaddr + sg_offset); } static __inline void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) { - ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, + aic_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, scb->hscb_map->dmamap, /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, /*len*/sizeof(*scb->hscb), op); } static __inline void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) { if (scb->sg_count == 0) return; - ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, + aic_dmamap_sync(ahd, ahd->scb_data.sg_dmat, scb->sg_map->dmamap, /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), /*len*/ahd_sg_size(ahd) * scb->sg_count, op); } static __inline void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) { - ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, + aic_dmamap_sync(ahd, ahd->scb_data.sense_dmat, scb->sense_map->dmamap, /*offset*/scb->sense_busaddr, /*len*/AHD_SENSE_BUFSIZE, op); } static __inline uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) { return (((uint8_t *)&ahd->targetcmds[index]) - (uint8_t *)ahd->qoutfifo); } /*********************** Miscelaneous Support Functions ***********************/ static __inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb); static __inline struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate); static __inline uint16_t ahd_inw(struct ahd_softc *ahd, u_int port); static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value); static __inline uint32_t ahd_inl(struct ahd_softc *ahd, u_int port); static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value); static __inline uint64_t ahd_inq(struct ahd_softc *ahd, u_int port); static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value); static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd); static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr); static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd); static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value); static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset); static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset); static __inline uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset); static __inline uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset); static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb); static __inline uint8_t * ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb); static __inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb); static __inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; - sgptr = ahd_le32toh(scb->hscb->sgptr); + sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static __inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; - sgptr = ahd_le32toh(scb->hscb->sgptr); + sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ static __inline struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } #define AHD_COPY_COL_IDX(dst, src) \ do { \ dst->hscb->scsiid = src->hscb->scsiid; \ dst->hscb->lun = src->hscb->lun; \ } while (0) static __inline uint16_t ahd_inw(struct ahd_softc *ahd, u_int port) { + /* + * Read high byte first as some registers increment + * or have other side effects when the low byte is + * read. + */ return ((ahd_inb(ahd, port+1) << 8) | ahd_inb(ahd, port)); } static __inline void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) { + /* + * Write low byte first to accomodate registers + * such as PRGMCNT where the order maters. + */ ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); } static __inline uint32_t ahd_inl(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24)); } static __inline void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) { ahd_outb(ahd, port, (value) & 0xFF); ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); } static __inline uint64_t ahd_inq(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24) | (((uint64_t)ahd_inb(ahd, port+4)) << 32) | (((uint64_t)ahd_inb(ahd, port+5)) << 40) | (((uint64_t)ahd_inb(ahd, port+6)) << 48) | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); } static __inline void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) { ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); ahd_outb(ahd, port+2, (value >> 16) & 0xFF); ahd_outb(ahd, port+3, (value >> 24) & 0xFF); ahd_outb(ahd, port+4, (value >> 32) & 0xFF); ahd_outb(ahd, port+5, (value >> 40) & 0xFF); ahd_outb(ahd, port+6, (value >> 48) & 0xFF); ahd_outb(ahd, port+7, (value >> 56) & 0xFF); } static __inline u_int ahd_get_scbptr(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); } static __inline void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); ahd_outb(ahd, SCBPTR, scbptr & 0xFF); ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); } static __inline u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd) { return (ahd_inw_atomic(ahd, HNSCB_QOFF)); } static __inline void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outw_atomic(ahd, HNSCB_QOFF, value); } static __inline u_int ahd_get_hescb_qoff(struct ahd_softc *ahd) { return (ahd_inb(ahd, HESCB_QOFF)); } static __inline void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outb(ahd, HESCB_QOFF, value); } static __inline u_int ahd_get_snscb_qoff(struct ahd_softc *ahd) { u_int oldvalue; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); oldvalue = ahd_inw(ahd, SNSCB_QOFF); ahd_outw(ahd, SNSCB_QOFF, oldvalue); return (oldvalue); } static __inline void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outw(ahd, SNSCB_QOFF, value); } static __inline u_int ahd_get_sescb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SESCB_QOFF)); } static __inline void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SESCB_QOFF, value); } static __inline u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); } static __inline void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); } static __inline u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) { u_int value; /* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528 */ value = ahd_inb(ahd, offset); if ((ahd->flags & AHD_PCIX_SCBRAM_RD_BUG) != 0) ahd_inb(ahd, MODE_PTR); return (value); } static __inline u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inb_scbram(ahd, offset) | (ahd_inb_scbram(ahd, offset+1) << 8)); } static __inline uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inw_scbram(ahd, offset) | (ahd_inw_scbram(ahd, offset+2) << 16)); } static __inline uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inl_scbram(ahd, offset) | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); } static __inline struct scb * ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) { struct scb* scb; if (tag >= AHD_SCB_MAX) return (NULL); scb = ahd->scb_data.scbindex[tag]; if (scb != NULL) ahd_sync_scb(ahd, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static __inline void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) { - struct hardware_scb *q_hscb; + struct hardware_scb *q_hscb; + struct map_node *q_hscb_map; uint32_t saved_hscb_busaddr; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahd->next_queued_hscb; + q_hscb_map = ahd->next_queued_hscb_map; saved_hscb_busaddr = q_hscb->hscb_busaddr; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); q_hscb->hscb_busaddr = saved_hscb_busaddr; q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; /* Now swap HSCB pointers. */ ahd->next_queued_hscb = scb->hscb; + ahd->next_queued_hscb_map = scb->hscb_map; scb->hscb = q_hscb; + scb->hscb_map = q_hscb_map; /* Now define the mapping from tag to SCB in the scbindex */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; } /* * Tell the sequencer about a new transaction to execute. */ static __inline void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) { ahd_swap_with_next_hscb(ahd, scb); if (SCBID_IS_NULL(SCB_GET_TAG(scb))) panic("Attempt to queue invalid SCB tag %x\n", SCB_GET_TAG(scb)); /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; if (scb->sg_count != 0) ahd_setup_data_scb(ahd, scb); else ahd_setup_noxfer_scb(ahd, scb); ahd_setup_scb_common(ahd, scb); /* * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { uint64_t host_dataptr; - host_dataptr = ahd_le64toh(scb->hscb->dataptr); + host_dataptr = aic_le64toh(scb->hscb->dataptr); printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n", ahd_name(ahd), - SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr), + SCB_GET_TAG(scb), aic_le32toh(scb->hscb->hscb_busaddr), (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), (u_int)(host_dataptr & 0xFFFFFFFF), - ahd_le32toh(scb->hscb->datacnt)); + aic_le32toh(scb->hscb->datacnt)); } #endif /* Tell the adapter about the newly queued SCB */ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); } static __inline uint8_t * ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb) { return (scb->sense_data); } static __inline uint32_t ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb) { return (scb->sense_busaddr); } /************************** Interrupt Processing ******************************/ static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op); static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op); static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd); static __inline int ahd_intr(struct ahd_softc *ahd); static __inline void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) { - ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, - /*offset*/0, /*len*/AHC_SCB_MAX * sizeof(uint16_t), op); + aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + /*offset*/0, + /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); } static __inline void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) { #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { - ahd_dmamap_sync(ahd, ahd->shared_data_dmat, - ahd->shared_data_dmamap, + aic_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, 0), sizeof(struct target_cmd) * AHD_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHD_RUN_QOUTFIFO 0x1 #define AHD_RUN_TQINFIFO 0x2 static __inline u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) { u_int retval; retval = 0; - ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_dmamap, - /*offset*/ahd->qoutfifonext, /*len*/2, - BUS_DMASYNC_POSTREAD); - if ((ahd->qoutfifo[ahd->qoutfifonext] - & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) + aic_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, + /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), + /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); + if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag + == ahd->qoutfifonext_valid_tag) retval |= AHD_RUN_QOUTFIFO; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { - ahd_dmamap_sync(ahd, ahd->shared_data_dmat, - ahd->shared_data_dmamap, + aic_dmamap_sync(ahd, ahd->shared_data_dmat, + ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) retval |= AHD_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ static __inline int ahd_intr(struct ahd_softc *ahd) { u_int intstat; if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 && (ahd_check_cmdcmpltqueues(ahd) != 0)) intstat = CMDCMPLT; else intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) return (0); if (intstat & CMDCMPLT) { ahd_outb(ahd, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT. */ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) intstat |= SEQINT; } } else { ahd_flush_device_writes(ahd); } ahd_run_qoutfifo(ahd); ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & HWERRINT) { ahd_handle_hwerrint(ahd); } else if ((intstat & (PCIINT|SPLTINT)) != 0) { ahd->bus_intr(ahd); } else { if ((intstat & SEQINT) != 0) ahd_handle_seqint(ahd, intstat); if ((intstat & SCSIINT) != 0) ahd_handle_scsiint(ahd, intstat); } return (1); } #endif /* _AIC79XX_INLINE_H_ */ Index: stable/4/sys/dev/aic7xxx/aic79xx_osm.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx_osm.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx_osm.c (revision 125849) @@ -1,2016 +1,1706 @@ /* - * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers + * Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2001-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#27 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $ */ +#include +__FBSDID("$FreeBSD$"); + #include #include +#include + #include "opt_ddb.h" #ifdef DDB #include #endif #ifndef AHD_TMODE_ENABLE #define AHD_TMODE_ENABLE 0 #endif +#include + #define ccb_scb_ptr spriv_ptr0 #if UNUSED static void ahd_dump_targcmd(struct target_cmd *cmd); #endif static int ahd_modevent(module_t mod, int type, void *data); static void ahd_action(struct cam_sim *sim, union ccb *ccb); static void ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel, struct ccb_trans_settings *cts); static void ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel, struct ccb_trans_settings *cts); static void ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error); static void ahd_poll(struct cam_sim *sim); static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, struct ccb_scsiio *csio, struct scb *scb); static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb); static int ahd_create_path(struct ahd_softc *ahd, char channel, u_int target, u_int lun, struct cam_path **path); -#if NOT_YET -static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb); -#endif - static int ahd_create_path(struct ahd_softc *ahd, char channel, u_int target, u_int lun, struct cam_path **path) { path_id_t path_id; - if (channel == 'B') - path_id = cam_sim_path(ahd->platform_data->sim_b); - else - path_id = cam_sim_path(ahd->platform_data->sim); - + path_id = cam_sim_path(ahd->platform_data->sim); return (xpt_create_path(path, /*periph*/NULL, path_id, target, lun)); } int ahd_map_int(struct ahd_softc *ahd) { int error; /* Hook up our interrupt handler */ error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq, INTR_TYPE_CAM, ahd_platform_intr, ahd, &ahd->platform_data->ih); if (error != 0) device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n", error); return (error); } /* * Attach all the sub-devices we can find */ int ahd_attach(struct ahd_softc *ahd) { char ahd_info[256]; struct ccb_setasync csa; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; long s; int count; count = 0; + devq = NULL; sim = NULL; + /* + * Create a thread to perform all recovery. + */ + if (ahd_spawn_recovery_thread(ahd) != 0) + goto fail; + ahd_controller_info(ahd, ahd_info); printf("%s\n", ahd_info); ahd_lock(ahd, &s); /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(AHD_MAX_QUEUE); if (devq == NULL) goto fail; /* * Construct our SIM entry */ sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd, device_get_unit(ahd->dev_softc), 1, /*XXX*/256, devq); if (sim == NULL) { cam_simq_free(devq); goto fail; } if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) { cam_sim_free(sim, /*free_devq*/TRUE); sim = NULL; goto fail; } if (xpt_create_path(&path, /*periph*/NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, /*free_devq*/TRUE); sim = NULL; goto fail; } xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = ahd_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); count++; fail: ahd->platform_data->sim = sim; ahd->platform_data->path = path; if (count != 0) { /* We have to wait until after any system dumps... */ ahd->platform_data->eh = EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown, ahd, SHUTDOWN_PRI_DEFAULT); ahd_intr_enable(ahd, TRUE); } ahd_unlock(ahd, &s); return (count); } /* * Catch an interrupt from the adapter */ void ahd_platform_intr(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; ahd_intr(ahd); } /* * We have an scb which has been processed by the * adaptor, now we look to see how the operation * went. */ void ahd_done(struct ahd_softc *ahd, struct scb *scb) { union ccb *ccb; CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_done - scb %d\n", SCB_GET_TAG(scb))); ccb = scb->io_ctx; LIST_REMOVE(scb, pending_links); + if ((scb->flags & SCB_TIMEDOUT) != 0) + LIST_REMOVE(scb, timedout_links); - untimeout(ahd_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); + untimeout(ahd_platform_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); } #ifdef AHD_TARGET_MODE if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct cam_path *ccb_path; /* * If we have finally disconnected, clean up our * pending device state. * XXX - There may be error states that cause where * we will remain connected. */ ccb_path = ccb->ccb_h.path; if (ahd->pending_device != NULL && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { ahd->pending_device = NULL; } else { xpt_print_path(ccb->ccb_h.path); printf("Still disconnected\n"); ahd_freeze_ccb(ccb); } } - if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) + if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) ccb->ccb_h.status |= CAM_REQ_CMP; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ahd_free_scb(ahd, scb); xpt_done(ccb); return; } #endif /* * If the recovery SCB completes, we have to be * out of our timeout. */ if ((scb->flags & SCB_RECOVERY_SCB) != 0) { struct scb *list_scb; /* * We were able to complete the command successfully, * so reinstate the timeouts for all other pending * commands. */ LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) { union ccb *ccb; uint64_t time; ccb = list_scb->io_ctx; if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) continue; time = ccb->ccb_h.timeout; time *= hz; time /= 1000; ccb->ccb_h.timeout_ch = - timeout(ahd_timeout, list_scb, time); + timeout(ahd_platform_timeout, list_scb, time); } - if (ahd_get_transaction_status(scb) == CAM_BDR_SENT - || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED) - ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); + if (aic_get_transaction_status(scb) == CAM_BDR_SENT + || aic_get_transaction_status(scb) == CAM_REQ_ABORTED) + aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); + ahd_print_path(ahd, scb); printf("no longer in timeout, status = %x\n", ccb->ccb_h.status); } /* Don't clobber any existing error state */ - if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) { + if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) { ccb->ccb_h.status |= CAM_REQ_CMP; } else if ((scb->flags & SCB_SENSE) != 0) { /* * We performed autosense retrieval. * * Zero any sense not transferred by the * device. The SCSI spec mandates that any * untransfered data should be assumed to be * zero. Complete the 'bounce' of sense information * through buffers accessible via bus-space by * copying it into the clients csio. */ memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); memcpy(&ccb->csio.sense_data, ahd_get_sense_buf(ahd, scb), /* XXX What size do we want to use??? */ sizeof(ccb->csio.sense_data) - ccb->csio.sense_resid); scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; } else if ((scb->flags & SCB_PKT_SENSE) != 0) { struct scsi_status_iu_header *siu; u_int sense_len; int i; /* * Copy only the sense data into the provided buffer. */ siu = (struct scsi_status_iu_header *)scb->sense_data; sense_len = MIN(scsi_4btoul(siu->sense_length), sizeof(ccb->csio.sense_data)); memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); memcpy(&ccb->csio.sense_data, ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu), sense_len); printf("Copied %d bytes of sense data offset %d:", sense_len, SIU_SENSE_OFFSET(siu)); for (i = 0; i < sense_len; i++) printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]); printf("\n"); scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ahd_free_scb(ahd, scb); xpt_done(ccb); } static void ahd_action(struct cam_sim *sim, union ccb *ccb) { struct ahd_softc *ahd; #ifdef AHD_TARGET_MODE struct ahd_tmode_lstate *lstate; #endif u_int target_id; u_int our_id; long s; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n")); ahd = (struct ahd_softc *)cam_sim_softc(sim); target_id = ccb->ccb_h.target_id; our_id = SIM_SCSI_ID(ahd, sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ #ifdef AHD_TARGET_MODE case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ { struct ahd_tmode_tstate *tstate; cam_status status; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { /* Response from the black hole device */ tstate = NULL; lstate = ahd->black_hole; } else { ccb->ccb_h.status = status; xpt_done(ccb); break; } } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { ahd_lock(ahd, &s); SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); ahd_unlock(ahd, &s); break; } /* * The target_id represents the target we attempt to * select. In target mode, this is the initiator of * the original command. */ our_id = target_id; target_id = ccb->csio.init_id; /* FALLTHROUGH */ } #endif case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct scb *scb; struct hardware_scb *hscb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int col_idx; if ((ahd->flags & AHD_INITIATORROLE) == 0 && (ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_RESET_DEV)) { ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } /* * get an scb to use. */ ahd_lock(ahd, &s); tinfo = ahd_fetch_transinfo(ahd, 'A', our_id, target_id, &tstate); if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { col_idx = AHD_NEVER_COL_IDX; } else { col_idx = AHD_BUILD_COL_IDX(target_id, ccb->ccb_h.target_lun); } if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { xpt_freeze_simq(sim, /*count*/1); ahd->flags |= AHD_RESOURCE_SHORTAGE; ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } ahd_unlock(ahd, &s); hscb = scb->hscb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, ("start scb(%p)\n", scb)); scb->io_ctx = ccb; /* * So we can find the SCB when an abort is requested */ ccb->ccb_h.ccb_scb_ptr = scb; /* * Put all the arguments for the xfer in the scb */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id); hscb->lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { hscb->cdb_len = 0; scb->flags |= SCB_DEVICE_RESET; hscb->control |= MK_MESSAGE; hscb->task_management = SIU_TASKMGMT_LUN_RESET; ahd_execute_scb(scb, NULL, 0, 0); } else { #ifdef AHD_TARGET_MODE if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct target_data *tdata; tdata = &hscb->shared_data.tdata; if (ahd->pending_device == lstate) scb->flags |= SCB_TARGET_IMMEDIATE; hscb->control |= TARGET_SCB; tdata->target_phases = 0; if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { tdata->target_phases |= SPHASE_PENDING; tdata->scsi_status = ccb->csio.scsi_status; } if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) tdata->target_phases |= NO_DISCONNECT; tdata->initiator_tag = ahd_htole16(ccb->csio.tag_id); } #endif hscb->task_management = 0; if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) hscb->control |= ccb->csio.tag_action; ahd_setup_data(ahd, sim, &ccb->csio, scb); } break; } #ifdef AHD_TARGET_MODE case XPT_NOTIFY_ACK: case XPT_IMMED_NOTIFY: { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; cam_status status; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; xpt_done(ccb); break; } SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; ahd_send_lstate_events(ahd, lstate); break; } case XPT_EN_LUN: /* Enable LUN as a target */ ahd_handle_en_lun(ahd, sim, ccb); xpt_done(ccb); break; #endif case XPT_ABORT: /* Abort the specified CCB */ { ahd_abort_ccb(ahd, sim, ccb); break; } case XPT_SET_TRAN_SETTINGS: { ahd_lock(ahd, &s); ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), SIM_CHANNEL(ahd, sim), &ccb->cts); ahd_unlock(ahd, &s); xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { ahd_lock(ahd, &s); ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), SIM_CHANNEL(ahd, sim), &ccb->cts); ahd_unlock(ahd, &s); xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { - struct ccb_calc_geometry *ccg; - uint32_t size_mb; - uint32_t secs_per_cylinder; - int extended; - - ccg = &ccb->ccg; - size_mb = ccg->volume_size - / ((1024L * 1024L) / ccg->block_size); - extended = ahd->flags & AHD_EXTENDED_TRANS_A; - - if (size_mb > 1024 && extended) { - ccg->heads = 255; - ccg->secs_per_track = 63; - } else { - ccg->heads = 64; - ccg->secs_per_track = 32; - } - secs_per_cylinder = ccg->heads * ccg->secs_per_track; - ccg->cylinders = ccg->volume_size / secs_per_cylinder; - ccb->ccb_h.status = CAM_REQ_CMP; + aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int found; ahd_lock(ahd, &s); found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), /*initiate reset*/TRUE); ahd_unlock(ahd, &s); if (bootverbose) { xpt_print_path(SIM_PATH(ahd, sim)); printf("SCSI bus reset delivered. " "%d SCBs aborted.\n", found); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((ahd->features & AHD_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; if ((ahd->features & AHD_TARGETMODE) != 0) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7; cpi->max_lun = AHD_NUM_LUNS - 1; cpi->initiator_id = ahd->our_id; if ((ahd->flags & AHD_RESET_BUS_A) == 0) { cpi->hba_misc |= PIM_NOBUSRESET; } cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); #ifdef AHD_NEW_TRAN_SETTINGS cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; cpi->transport_version = 4; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; #endif cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } } static void ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel, struct ccb_trans_settings *cts) { #ifdef AHD_NEW_TRAN_SETTINGS struct ahd_devinfo devinfo; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t *discenable; uint16_t *tagenable; u_int update_type; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), cts->ccb_h.target_id, cts->ccb_h.target_lun, SIM_CHANNEL(ahd, sim), ROLE_UNKNOWN); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { update_type |= AHD_TRANS_GOAL; discenable = &tstate->discenable; tagenable = &tstate->tagenable; tinfo->curr.protocol_version = cts->protocol_version; tinfo->curr.transport_version = cts->transport_version; tinfo->goal.protocol_version = cts->protocol_version; tinfo->goal.transport_version = cts->transport_version; } else if (cts->type == CTS_TYPE_USER_SETTINGS) { update_type |= AHD_TRANS_USER; discenable = &ahd->user_discenable; tagenable = &ahd->user_tagenable; tinfo->user.protocol_version = cts->protocol_version; tinfo->user.transport_version = cts->transport_version; } else { cts->ccb_h.status = CAM_REQ_INVALID; return; } if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *discenable |= devinfo.target_mask; else *discenable &= ~devinfo.target_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *tagenable |= devinfo.target_mask; else *tagenable &= ~devinfo.target_mask; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { ahd_validate_width(ahd, /*tinfo limit*/NULL, &spi->bus_width, ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, spi->bus_width, update_type, /*paused*/FALSE); } if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { if (update_type == AHD_TRANS_USER) spi->ppr_options = tinfo->user.ppr_options; else spi->ppr_options = tinfo->goal.ppr_options; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { if (update_type == AHD_TRANS_USER) spi->sync_offset = tinfo->user.offset; else spi->sync_offset = tinfo->goal.offset; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (update_type == AHD_TRANS_USER) spi->sync_period = tinfo->user.period; else spi->sync_period = tinfo->goal.period; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { u_int maxsync; maxsync = AHD_SYNCRATE_MAX; if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; if ((*discenable & devinfo.target_mask) == 0) spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; ahd_find_syncrate(ahd, &spi->sync_period, &spi->ppr_options, maxsync); ahd_validate_offset(ahd, /*tinfo limit*/NULL, spi->sync_period, &spi->sync_offset, spi->bus_width, ROLE_UNKNOWN); /* We use a period of 0 to represent async */ if (spi->sync_offset == 0) { spi->sync_period = 0; spi->ppr_options = 0; } ahd_set_syncrate(ahd, &devinfo, spi->sync_period, spi->sync_offset, spi->ppr_options, update_type, /*paused*/FALSE); } cts->ccb_h.status = CAM_REQ_CMP; #else struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t *discenable; uint16_t *tagenable; u_int update_type; ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), cts->ccb_h.target_id, cts->ccb_h.target_lun, SIM_CHANNEL(ahd, sim), ROLE_UNKNOWN); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); update_type = 0; if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { update_type |= AHD_TRANS_GOAL; discenable = &tstate->discenable; tagenable = &tstate->tagenable; } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { update_type |= AHD_TRANS_USER; discenable = &ahd->user_discenable; tagenable = &ahd->user_tagenable; } else { cts->ccb_h.status = CAM_REQ_INVALID; return; } if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) *discenable |= devinfo.target_mask; else *discenable &= ~devinfo.target_mask; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) *tagenable |= devinfo.target_mask; else *tagenable &= ~devinfo.target_mask; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { ahd_validate_width(ahd, /*tinfo limit*/NULL, &cts->bus_width, ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, cts->bus_width, update_type, /*paused*/FALSE); } if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { if (update_type == AHD_TRANS_USER) cts->sync_offset = tinfo->user.offset; else cts->sync_offset = tinfo->goal.offset; } if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { if (update_type == AHD_TRANS_USER) cts->sync_period = tinfo->user.period; else cts->sync_period = tinfo->goal.period; } if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) || ((cts->valid & CCB_TRANS_TQ_VALID) != 0) || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) { u_int ppr_options; u_int maxsync; maxsync = AHD_SYNCRATE_MAX; ppr_options = 0; if (cts->sync_period <= AHD_SYNCRATE_DT && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) { ppr_options = tinfo->user.ppr_options | MSG_EXT_PPR_DT_REQ; } if ((*tagenable & devinfo.target_mask) == 0 || (*discenable & devinfo.target_mask) == 0) ppr_options &= ~MSG_EXT_PPR_IU_REQ; ahd_find_syncrate(ahd, &cts->sync_period, &ppr_options, maxsync); ahd_validate_offset(ahd, /*tinfo limit*/NULL, cts->sync_period, &cts->sync_offset, MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN); /* We use a period of 0 to represent async */ if (cts->sync_offset == 0) { cts->sync_period = 0; ppr_options = 0; } if (ppr_options != 0 && tinfo->user.transport_version >= 3) { tinfo->goal.transport_version = tinfo->user.transport_version; tinfo->curr.transport_version = tinfo->user.transport_version; } ahd_set_syncrate(ahd, &devinfo, cts->sync_period, cts->sync_offset, ppr_options, update_type, /*paused*/FALSE); } cts->ccb_h.status = CAM_REQ_CMP; #endif } static void ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel, struct ccb_trans_settings *cts) { #ifdef AHD_NEW_TRAN_SETTINGS struct ahd_devinfo devinfo; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; ahd_compile_devinfo(&devinfo, our_id, cts->ccb_h.target_id, cts->ccb_h.target_lun, channel, ROLE_UNKNOWN); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) tinfo = &targ_info->curr; else tinfo = &targ_info->user; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_USER_SETTINGS) { if ((ahd->user_discenable & devinfo.target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ahd->user_tagenable & devinfo.target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { if ((tstate->discenable & devinfo.target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((tstate->tagenable & devinfo.target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } cts->protocol_version = tinfo->protocol_version; cts->transport_version = tinfo->transport_version; spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->ppr_options = tinfo->ppr_options; cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else { scsi->valid = 0; } cts->ccb_h.status = CAM_REQ_CMP; #else struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; ahd_compile_devinfo(&devinfo, our_id, cts->ccb_h.target_id, cts->ccb_h.target_lun, channel, ROLE_UNKNOWN); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) tinfo = &targ_info->curr; else tinfo = &targ_info->user; cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { if ((ahd->user_discenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((ahd->user_tagenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; } else { if ((tstate->discenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((tstate->tagenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; } cts->sync_period = tinfo->period; cts->sync_offset = tinfo->offset; cts->bus_width = tinfo->width; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; cts->ccb_h.status = CAM_REQ_CMP; #endif } static void ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ahd_softc *ahd; struct cam_sim *sim; sim = (struct cam_sim *)callback_arg; ahd = (struct ahd_softc *)cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: { struct ahd_devinfo devinfo; long s; ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), xpt_path_target_id(path), xpt_path_lun_id(path), SIM_CHANNEL(ahd, sim), ROLE_UNKNOWN); /* * Revert to async/narrow transfers * for the next device. */ ahd_lock(ahd, &s); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE); ahd_unlock(ahd, &s); break; } default: break; } } static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct scb *scb; union ccb *ccb; struct ahd_softc *ahd; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int mask; u_long s; scb = (struct scb *)arg; ccb = scb->io_ctx; ahd = scb->ahd_softc; if (error != 0) { if (error == EFBIG) - ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG); + aic_set_transaction_status(scb, CAM_REQ_TOO_BIG); else - ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); + aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); if (nsegments != 0) bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); ahd_unlock(ahd, &s); xpt_done(ccb); return; } scb->sg_count = 0; if (nsegments != 0) { void *sg; bus_dmasync_op_t op; u_int i; /* Copy the segments into our SG list */ for (i = nsegments, sg = scb->sg_list; i > 0; i--) { sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr, dm_segs->ds_len, /*last*/i == 1); dm_segs++; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct target_data *tdata; tdata = &scb->hscb->shared_data.tdata; tdata->target_phases |= DPHASE_PENDING; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) tdata->data_phase = P_DATAOUT; else tdata->data_phase = P_DATAIN; } } ahd_lock(ahd, &s); /* * Last time we need to check if this SCB needs to * be aborted. */ - if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) { + if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); ahd_free_scb(ahd, scb); ahd_unlock(ahd, &s); xpt_done(ccb); return; } tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid), SCSIID_OUR_ID(scb->hscb->scsiid), SCSIID_TARGET(ahd, scb->hscb->scsiid), &tstate); mask = SCB_GET_TARGET_MASK(ahd, scb); if ((tstate->discenable & mask) != 0 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) scb->hscb->control |= DISCENB; if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { scb->flags |= SCB_PACKETIZED; if (scb->hscb->task_management != 0) scb->hscb->control &= ~MK_MESSAGE; } if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 && (tinfo->goal.width != 0 || tinfo->goal.period != 0 || tinfo->goal.ppr_options != 0)) { scb->flags |= SCB_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } else if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { uint64_t time; if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) ccb->ccb_h.timeout = 5 * 1000; time = ccb->ccb_h.timeout; time *= hz; time /= 1000; ccb->ccb_h.timeout_ch = - timeout(ahd_timeout, (caddr_t)scb, time); + timeout(ahd_platform_timeout, (caddr_t)scb, time); } if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { /* Define a mapping from our tag to the SCB. */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; ahd_pause(ahd); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); ahd_unpause(ahd); } else { ahd_queue_scb(ahd, scb); } ahd_unlock(ahd, &s); } static void ahd_poll(struct cam_sim *sim) { ahd_intr(cam_sim_softc(sim)); } static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, struct ccb_scsiio *csio, struct scb *scb) { struct hardware_scb *hscb; struct ccb_hdr *ccb_h; hscb = scb->hscb; ccb_h = &csio->ccb_h; csio->resid = 0; csio->sense_resid = 0; if (ccb_h->func_code == XPT_SCSI_IO) { hscb->cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if (hscb->cdb_len > MAX_CDB_LEN && (ccb_h->flags & CAM_CDB_PHYS) == 0) { u_long s; /* * Should CAM start to support CDB sizes * greater than 16 bytes, we could use * the sense buffer to store the CDB. */ - ahd_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_REQ_INVALID); ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); ahd_unlock(ahd, &s); xpt_done((union ccb *)csio); return; } if ((ccb_h->flags & CAM_CDB_PHYS) != 0) { hscb->shared_data.idata.cdb_from_host.cdbptr = - ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr); + aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr); hscb->shared_data.idata.cdb_from_host.cdblen = csio->cdb_len; hscb->cdb_len |= SCB_CDB_LEN_PTR; } else { memcpy(hscb->shared_data.idata.cdb, csio->cdb_io.cdb_ptr, hscb->cdb_len); } } else { if (hscb->cdb_len > MAX_CDB_LEN) { u_long s; - ahd_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_REQ_INVALID); ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); ahd_unlock(ahd, &s); xpt_done((union ccb *)csio); return; } memcpy(hscb->shared_data.idata.cdb, csio->cdb_io.cdb_bytes, hscb->cdb_len); } } /* Only use S/G if there is a transfer */ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { /* We've been given a pointer to a single buffer */ if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load(ahd->buffer_dmat, scb->dmamap, csio->data_ptr, csio->dxfer_len, ahd_execute_scb, scb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(sim, /*count*/1); scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE) panic("ahd_setup_data - Transfer size " "larger than can device max"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; ahd_execute_scb(scb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccb_h->flags & CAM_DATA_PHYS) != 0) panic("ahd_setup_data - Physical segment " "pointers unsupported"); if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) panic("ahd_setup_data - Virtual segment " "addresses unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *)csio->data_ptr; ahd_execute_scb(scb, segs, csio->sglist_cnt, 0); } } else { ahd_execute_scb(scb, NULL, 0, 0); } } -#if NOT_YET static void -ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) { - - if ((scb->flags & SCB_RECOVERY_SCB) == 0) { - struct scb *list_scb; - - scb->flags |= SCB_RECOVERY_SCB; - - /* - * Take all queued, but not sent SCBs out of the equation. - * Also ensure that no new CCBs are queued to us while we - * try to fix this problem. - */ - if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { - xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1); - scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; - } - - /* - * Go through all of our pending SCBs and remove - * any scheduled timeouts for them. We will reschedule - * them after we've successfully fixed this problem. - */ - LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) { - union ccb *ccb; - - ccb = list_scb->io_ctx; - untimeout(ahd_timeout, list_scb, ccb->ccb_h.timeout_ch); - } - } -} -#endif - -void -ahd_timeout(void *arg) -{ - struct scb *scb; - struct ahd_softc *ahd; - ahd_mode_state saved_modes; - long s; - int target; - int lun; - char channel; - -#if NOT_YET - int i; - int found; - u_int last_phase; -#endif - - scb = (struct scb *)arg; - ahd = (struct ahd_softc *)scb->ahd_softc; - - ahd_lock(ahd, &s); - - ahd_pause_and_flushwork(ahd); - - saved_modes = ahd_save_modes(ahd); -#if 0 - ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); - ahd_outb(ahd, SCSISIGO, ACKO); - printf("set ACK\n"); - ahd_outb(ahd, SCSISIGO, 0); - printf("clearing Ack\n"); - ahd_restore_modes(ahd, saved_modes); -#endif - if ((scb->flags & SCB_ACTIVE) == 0) { - /* Previous timeout took care of me already */ - printf("%s: Timedout SCB already complete. " - "Interrupts may not be functioning.\n", ahd_name(ahd)); - ahd_unpause(ahd); - ahd_unlock(ahd, &s); - return; - } - - target = SCB_GET_TARGET(ahd, scb); - channel = SCB_GET_CHANNEL(ahd, scb); - lun = SCB_GET_LUN(scb); - - ahd_print_path(ahd, scb); - printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb)); - ahd_dump_card_state(ahd); - ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), - /*initiate reset*/TRUE); - ahd_unlock(ahd, &s); - return; -#if NOT_YET - last_phase = ahd_inb(ahd, LASTPHASE); - if (scb->sg_count > 0) { - for (i = 0; i < scb->sg_count; i++) { - printf("sg[%d] - Addr 0x%x : Length %d\n", - i, - ((struct ahd_dma_seg *)scb->sg_list)[i].addr, - ((struct ahd_dma_seg *)scb->sg_list)[i].len - & AHD_SG_LEN_MASK); - } - } - if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { - /* - * Been down this road before. - * Do a full bus reset. - */ -bus_reset: - ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT); - found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE); - printf("%s: Issued Channel %c Bus Reset. " - "%d SCBs aborted\n", ahd_name(ahd), channel, found); - } else { - /* - * If we are a target, transition to bus free and report - * the timeout. - * - * The target/initiator that is holding up the bus may not - * be the same as the one that triggered this timeout - * (different commands have different timeout lengths). - * If the bus is idle and we are actiing as the initiator - * for this request, queue a BDR message to the timed out - * target. Otherwise, if the timed out transaction is - * active: - * Initiator transaction: - * Stuff the message buffer with a BDR message and assert - * ATN in the hopes that the target will let go of the bus - * and go to the mesgout phase. If this fails, we'll - * get another timeout 2 seconds later which will attempt - * a bus reset. - * - * Target transaction: - * Transition to BUS FREE and report the error. - * It's good to be the target! - */ - u_int active_scb_index; - u_int saved_scbptr; - - saved_scbptr = ahd_get_scbptr(ahd); - active_scb_index = saved_scbptr; - - if (last_phase != P_BUSFREE - && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 - && (active_scb_index < ahd->scb_data.numscbs)) { - struct scb *active_scb; - - /* - * If the active SCB is not us, assume that - * the active SCB has a longer timeout than - * the timedout SCB, and wait for the active - * SCB to timeout. - */ - active_scb = ahd_lookup_scb(ahd, active_scb_index); - if (active_scb != scb) { - struct ccb_hdr *ccbh; - uint64_t newtimeout; - - ahd_print_path(ahd, scb); - printf("Other SCB Timeout%s", - (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 - ? " again\n" : "\n"); - scb->flags |= SCB_OTHERTCL_TIMEOUT; - newtimeout = - MAX(active_scb->io_ctx->ccb_h.timeout, - scb->io_ctx->ccb_h.timeout); - newtimeout *= hz; - newtimeout /= 1000; - ccbh = &scb->io_ctx->ccb_h; - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahd_timeout, scb, newtimeout); - ahd_unpause(ahd); - ahd_unlock(ahd, &s); - return; - } - - /* It's us */ - if ((scb->hscb->control & TARGET_SCB) != 0) { - - /* - * Send back any queued up transactions - * and properly record the error condition. - */ - ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), - SCB_GET_CHANNEL(ahd, scb), - SCB_GET_LUN(scb), - SCB_GET_TAG(scb), - ROLE_TARGET, - CAM_CMD_TIMEOUT); - - /* Will clear us from the bus */ - ahd_restart(ahd); - ahd_unlock(ahd, &s); - return; - } - - ahd_set_recoveryscb(ahd, active_scb); - ahd_outb(ahd, MSG_OUT, HOST_MSG); - ahd_outb(ahd, SCSISIGO, last_phase|ATNO); - ahd_print_path(ahd, active_scb); - printf("BDR message in message buffer\n"); - active_scb->flags |= SCB_DEVICE_RESET; - active_scb->io_ctx->ccb_h.timeout_ch = - timeout(ahd_timeout, (caddr_t)active_scb, 2 * hz); - ahd_unpause(ahd); - } else { - int disconnected; - - /* XXX Shouldn't panic. Just punt instead? */ - if ((scb->hscb->control & TARGET_SCB) != 0) - panic("Timed-out target SCB but bus idle"); - - if (last_phase != P_BUSFREE - && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) { - /* XXX What happened to the SCB? */ - /* Hung target selection. Goto busfree */ - printf("%s: Hung target selection\n", - ahd_name(ahd)); - ahd_restart(ahd); - ahd_unlock(ahd, &s); - return; - } - - if (ahd_search_qinfifo(ahd, target, channel, lun, - SCB_GET_TAG(scb), ROLE_INITIATOR, - /*status*/0, SEARCH_COUNT) > 0) { - disconnected = FALSE; - } else { - disconnected = TRUE; - } - - if (disconnected) { - - ahd_set_recoveryscb(ahd, scb); - /* - * Actually re-queue this SCB in an attempt - * to select the device before it reconnects. - * In either case (selection or reselection), - * we will now issue a target reset to the - * timed-out device. - * - * Set the MK_MESSAGE control bit indicating - * that we desire to send a message. We - * also set the disconnected flag since - * in the paging case there is no guarantee - * that our SCB control byte matches the - * version on the card. We don't want the - * sequencer to abort the command thinking - * an unsolicited reselection occurred. - */ - scb->hscb->control |= MK_MESSAGE|DISCONNECTED; - scb->flags |= SCB_DEVICE_RESET; - - /* - * The sequencer will never re-reference the - * in-core SCB. To make sure we are notified - * during reslection, set the MK_MESSAGE flag - * in the card's copy of the SCB. - */ - ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); - ahd_outb(ahd, SCB_CONTROL, - ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE); - - /* - * Clear out any entries in the QINFIFO first - * so we are the next SCB for this target - * to run. - */ - ahd_search_qinfifo(ahd, - SCB_GET_TARGET(ahd, scb), - channel, SCB_GET_LUN(scb), - SCB_LIST_NULL, - ROLE_INITIATOR, - CAM_REQUEUE_REQ, - SEARCH_COMPLETE); - ahd_print_path(ahd, scb); - printf("Queuing a BDR SCB\n"); - ahd_qinfifo_requeue_tail(ahd, scb); - ahd_set_scbptr(ahd, saved_scbptr); - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahd_timeout, (caddr_t)scb, 2 * hz); - ahd_unpause(ahd); - } else { - /* Go "immediatly" to the bus reset */ - /* This shouldn't happen */ - ahd_set_recoveryscb(ahd, scb); - ahd_print_path(ahd, scb); - printf("SCB %d: Immediate reset. " - "Flags = 0x%x\n", SCB_GET_TAG(scb), - scb->flags); - goto bus_reset; - } - } - } - ahd_unlock(ahd, &s); -#endif -} - -static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; switch (abort_ccb->ccb_h.func_code) { #ifdef AHD_TARGET_MODE case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_hdr_slist *list; cam_status status; status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; break; } if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) list = &lstate->accept_tios; else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) list = &lstate->immed_notifies; else list = NULL; if (list != NULL) { struct ccb_hdr *curelm; int found; curelm = SLIST_FIRST(list); found = 0; if (curelm == &abort_ccb->ccb_h) { found = 1; SLIST_REMOVE_HEAD(list, sim_links.sle); } else { while(curelm != NULL) { struct ccb_hdr *nextelm; nextelm = SLIST_NEXT(curelm, sim_links.sle); if (nextelm == &abort_ccb->ccb_h) { found = 1; SLIST_NEXT(curelm, sim_links.sle) = SLIST_NEXT(nextelm, sim_links.sle); break; } curelm = nextelm; } } if (found) { abort_ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(abort_ccb); ccb->ccb_h.status = CAM_REQ_CMP; } else { xpt_print_path(abort_ccb->ccb_h.path); printf("Not found\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } break; } /* FALLTHROUGH */ } #endif case XPT_SCSI_IO: /* XXX Fully implement the hard ones */ ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } void ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun, ac_code code, void *opt_arg) { struct ccb_trans_settings cts; struct cam_path *path; void *arg; int error; arg = NULL; error = ahd_create_path(ahd, channel, target, lun, &path); if (error != CAM_REQ_CMP) return; switch (code) { case AC_TRANSFER_NEG: { #ifdef AHD_NEW_TRAN_SETTINGS struct ccb_trans_settings_scsi *scsi; cts.type = CTS_TYPE_CURRENT_SETTINGS; scsi = &cts.proto_specific.scsi; #else cts.flags = CCB_TRANS_CURRENT_SETTINGS; #endif cts.ccb_h.path = path; cts.ccb_h.target_id = target; cts.ccb_h.target_lun = lun; ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts); arg = &cts; #ifdef AHD_NEW_TRAN_SETTINGS scsi->valid &= ~CTS_SCSI_VALID_TQ; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; #else cts.valid &= ~CCB_TRANS_TQ_VALID; cts.flags &= ~CCB_TRANS_TAG_ENB; #endif if (opt_arg == NULL) break; if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED) #ifdef AHD_NEW_TRAN_SETTINGS scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; #else cts.flags |= CCB_TRANS_TAG_ENB; cts.valid |= CCB_TRANS_TQ_VALID; #endif break; } case AC_SENT_BDR: case AC_BUS_RESET: break; default: panic("ahd_send_async: Unexpected async event"); } xpt_async(code, path, arg); xpt_free_path(path); } void ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, int enable) { } int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) { ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ahd->platform_data == NULL) return (ENOMEM); return (0); } void ahd_platform_free(struct ahd_softc *ahd) { struct ahd_platform_data *pdata; pdata = ahd->platform_data; if (pdata != NULL) { if (pdata->regs[0] != NULL) bus_release_resource(ahd->dev_softc, pdata->regs_res_type[0], pdata->regs_res_id[0], pdata->regs[0]); if (pdata->regs[1] != NULL) bus_release_resource(ahd->dev_softc, pdata->regs_res_type[1], pdata->regs_res_id[1], pdata->regs[1]); if (pdata->irq != NULL) bus_release_resource(ahd->dev_softc, pdata->irq_res_type, 0, pdata->irq); - if (pdata->sim_b != NULL) { - xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); - xpt_free_path(pdata->path_b); - xpt_bus_deregister(cam_sim_path(pdata->sim_b)); - cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); - } if (pdata->sim != NULL) { xpt_async(AC_LOST_DEVICE, pdata->path, NULL); xpt_free_path(pdata->path); xpt_bus_deregister(cam_sim_path(pdata->sim)); cam_sim_free(pdata->sim, /*free_devq*/TRUE); } if (pdata->eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); free(ahd->platform_data, M_DEVBUF); } } int ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd) { /* We don't sort softcs under FreeBSD so report equal always */ return (0); } int ahd_detach(device_t dev) { struct ahd_softc *ahd; u_long l; u_long s; ahd_list_lock(&l); device_printf(dev, "detaching device\n"); ahd = device_get_softc(dev); ahd = ahd_find_softc(ahd); if (ahd == NULL) { device_printf(dev, "aic7xxx already detached\n"); ahd_list_unlock(&l); return (ENOENT); } + TAILQ_REMOVE(&ahd_tailq, ahd, links); + ahd_list_unlock(&l); ahd_lock(ahd, &s); ahd_intr_enable(ahd, FALSE); bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih); ahd_unlock(ahd, &s); ahd_free(ahd); - ahd_list_unlock(&l); return (0); } #if UNUSED static void ahd_dump_targcmd(struct target_cmd *cmd) { uint8_t *byte; uint8_t *last_byte; int i; byte = &cmd->initiator_channel; /* Debugging info for received commands */ last_byte = &cmd[1].initiator_channel; i = 0; while (byte < last_byte) { if (i == 0) printf("\t"); printf("%#x", *byte++); i++; if (i == 8) { printf("\n"); i = 0; } else { printf(", "); } } } #endif static int ahd_modevent(module_t mod, int type, void *data) { /* XXX Deal with busy status on unload. */ return 0; } static moduledata_t ahd_mod = { "ahd", ahd_modevent, NULL }; /********************************** DDB Hooks *********************************/ #ifdef DDB static struct ahd_softc *ahd_ddb_softc; static int ahd_ddb_paused; static int ahd_ddb_paused_on_entry; DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit) { struct ahd_softc *list_ahd; ahd_ddb_softc = NULL; TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { if (list_ahd->unit == addr) ahd_ddb_softc = list_ahd; } if (ahd_ddb_softc == NULL) db_error("No matching softc found!\n"); } DB_COMMAND(ahd_pause, ahd_ddb_pause) { if (ahd_ddb_softc == NULL) { db_error("Must set unit with ahd_set_unit first!\n"); return; } if (ahd_ddb_paused == 0) { ahd_ddb_paused++; if (ahd_is_paused(ahd_ddb_softc)) { ahd_ddb_paused_on_entry++; return; } ahd_pause(ahd_ddb_softc); } } DB_COMMAND(ahd_unpause, ahd_ddb_unpause) { if (ahd_ddb_softc == NULL) { db_error("Must set unit with ahd_set_unit first!\n"); return; } if (ahd_ddb_paused != 0) { ahd_ddb_paused = 0; if (ahd_ddb_paused_on_entry) return; ahd_unpause(ahd_ddb_softc); } else if (ahd_ddb_paused_on_entry != 0) { /* Two unpauses to clear a paused on entry. */ ahd_ddb_paused_on_entry = 0; ahd_unpause(ahd_ddb_softc); } } DB_COMMAND(ahd_in, ahd_ddb_in) { int c; int size; if (ahd_ddb_softc == NULL) { db_error("Must set unit with ahd_set_unit first!\n"); return; } if (have_addr == 0) return; size = 1; while ((c = *modif++) != '\0') { switch (c) { case 'b': size = 1; break; case 'w': size = 2; break; case 'l': size = 4; break; } } if (count <= 0) count = 1; while (--count >= 0) { db_printf("%04lx (M)%x: \t", (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR)); switch (size) { case 1: db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr)); break; case 2: db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr)); break; case 4: db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr)); break; } } } DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL) { db_expr_t old_value; db_expr_t new_value; int size; if (ahd_ddb_softc == NULL) { db_error("Must set unit with ahd_set_unit first!\n"); return; } switch (modif[0]) { case '\0': case 'b': size = 1; break; case 'h': size = 2; break; case 'l': size = 4; break; default: db_error("Unknown size\n"); return; } while (db_expression(&new_value)) { switch (size) { default: case 1: old_value = ahd_inb(ahd_ddb_softc, addr); ahd_outb(ahd_ddb_softc, addr, new_value); break; case 2: old_value = ahd_inw(ahd_ddb_softc, addr); ahd_outw(ahd_ddb_softc, addr, new_value); break; case 4: old_value = ahd_inl(ahd_ddb_softc, addr); ahd_outl(ahd_ddb_softc, addr, new_value); break; } db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx", (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR), (u_long)old_value, (u_long)new_value); addr += size; } db_skip_to_eol(); } #endif DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(ahd, cam, 1, 1, 1); MODULE_VERSION(ahd, 1); Index: stable/4/sys/dev/aic7xxx/aic79xx_osm.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx_osm.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx_osm.h (revision 125849) @@ -1,607 +1,322 @@ /* * FreeBSD platform specific driver option settings, data structures, * function declarations and includes. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2001-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.h#20 $ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.h#23 $ * * $FreeBSD$ */ #ifndef _AIC79XX_FREEBSD_H_ #define _AIC79XX_FREEBSD_H_ #include /* for config options */ #include #include #include /* For device_t */ #if __FreeBSD_version >= 500000 #include #endif #include #include #include #include -#define AHD_PCI_CONFIG 1 +#define AIC_PCI_CONFIG 1 #include #include #include #include #include #include #include +#if __FreeBSD_version >= 500000 +#include +#include +#else #include #include +#endif #include #include #include #include #include #include #include #include #ifdef CAM_NEW_TRAN_CODE #define AHD_NEW_TRAN_SETTINGS #endif /* CAM_NEW_TRAN_CODE */ /****************************** Platform Macros *******************************/ #define SIM_IS_SCSIBUS_B(ahd, sim) \ (0) #define SIM_CHANNEL(ahd, sim) \ ('A') #define SIM_SCSI_ID(ahd, sim) \ (ahd->our_id) #define SIM_PATH(ahd, sim) \ (ahd->platform_data->path) #define BUILD_SCSIID(ahd, sim, target_id, our_id) \ ((((target_id) << TID_SHIFT) & TID) | (our_id)) #define SCB_GET_SIM(ahd, scb) \ ((ahd)->platform_data->sim) #ifndef offsetof #define offsetof(type, member) ((size_t)(&((type *)0)->member)) #endif -/************************* Forward Declarations *******************************/ -typedef device_t ahd_dev_softc_t; -typedef union ccb *ahd_io_ctx_t; -/***************************** Bus Space/DMA **********************************/ -#define ahd_dma_tag_create(ahd, parent_tag, alignment, boundary, \ - lowaddr, highaddr, filter, filterarg, \ - maxsize, nsegments, maxsegsz, flags, \ - dma_tagp) \ - bus_dma_tag_create(parent_tag, alignment, boundary, \ - lowaddr, highaddr, filter, filterarg, \ - maxsize, nsegments, maxsegsz, flags, \ - dma_tagp) - -#define ahd_dma_tag_destroy(ahd, tag) \ - bus_dma_tag_destroy(tag) - -#define ahd_dmamem_alloc(ahd, dmat, vaddr, flags, mapp) \ - bus_dmamem_alloc(dmat, vaddr, flags, mapp) - -#define ahd_dmamem_free(ahd, dmat, vaddr, map) \ - bus_dmamem_free(dmat, vaddr, map) - -#define ahd_dmamap_create(ahd, tag, flags, mapp) \ - bus_dmamap_create(tag, flags, mapp) - -#define ahd_dmamap_destroy(ahd, tag, map) \ - bus_dmamap_destroy(tag, map) - -#define ahd_dmamap_load(ahd, dmat, map, addr, buflen, callback, \ - callback_arg, flags) \ - bus_dmamap_load(dmat, map, addr, buflen, callback, callback_arg, flags) - -#define ahd_dmamap_unload(ahd, tag, map) \ - bus_dmamap_unload(tag, map) - -/* XXX Need to update Bus DMA for partial map syncs */ -#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op) \ - bus_dmamap_sync(dma_tag, dmamap, op) - /************************ Tunable Driver Parameters **************************/ /* * The number of dma segments supported. The sequencer can handle any number * of physically contiguous S/G entrys. To reduce the driver's memory * consumption, we limit the number supported to be sufficient to handle * the largest mapping supported by the kernel, MAXPHYS. Assuming the * transfer is as fragmented as possible and unaligned, this turns out to * be the number of paged sized transfers in MAXPHYS plus an extra element * to handle any unaligned residual. The sequencer fetches SG elements * in cacheline sized chucks, so make the number per-transaction an even * multiple of 16 which should align us on even the largest of cacheline * boundaries. */ #define AHD_NSEG (roundup(btoc(MAXPHYS) + 1, 16)) /* This driver supports target mode */ #if NOT_YET #define AHD_TARGET_MODE 1 #endif /************************** Softc/SCB Platform Data ***************************/ struct ahd_platform_data { /* * Hooks into the XPT. */ struct cam_sim *sim; - struct cam_sim *sim_b; struct cam_path *path; - struct cam_path *path_b; int regs_res_type[2]; int regs_res_id[2]; int irq_res_type; struct resource *regs[2]; struct resource *irq; void *ih; eventhandler_tag eh; + struct proc *recovery_thread; }; struct scb_platform_data { }; -/********************************* Byte Order *********************************/ -#if __FreeBSD_version >= 500000 -#define ahd_htobe16(x) htobe16(x) -#define ahd_htobe32(x) htobe32(x) -#define ahd_htobe64(x) htobe64(x) -#define ahd_htole16(x) htole16(x) -#define ahd_htole32(x) htole32(x) -#define ahd_htole64(x) htole64(x) - -#define ahd_be16toh(x) be16toh(x) -#define ahd_be32toh(x) be32toh(x) -#define ahd_be64toh(x) be64toh(x) -#define ahd_le16toh(x) le16toh(x) -#define ahd_le32toh(x) le32toh(x) -#define ahd_le64toh(x) le64toh(x) -#else -#define ahd_htobe16(x) (x) -#define ahd_htobe32(x) (x) -#define ahd_htobe64(x) (x) -#define ahd_htole16(x) (x) -#define ahd_htole32(x) (x) -#define ahd_htole64(x) (x) - -#define ahd_be16toh(x) (x) -#define ahd_be32toh(x) (x) -#define ahd_be64toh(x) (x) -#define ahd_le16toh(x) (x) -#define ahd_le32toh(x) (x) -#define ahd_le64toh(x) (x) -#endif - -/************************** Timer DataStructures ******************************/ -typedef struct callout ahd_timer_t; - /***************************** Core Includes **********************************/ #if AHD_REG_PRETTY_PRINT #define AIC_DEBUG_REGISTERS 1 #else #define AIC_DEBUG_REGISTERS 0 #endif -#include +#define AIC_CORE_INCLUDE +#define AIC_LIB_PREFIX ahd +#define AIC_CONST_PREFIX AHD +#include -/***************************** Timer Facilities *******************************/ -timeout_t ahd_timeout; -#if __FreeBSD_version >= 500000 -#define ahd_timer_init(timer) callout_init(timer, /*mpsafe*/0) -#else -#define ahd_timer_init callout_init -#endif -#define ahd_timer_stop callout_stop - -static __inline void -ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg) -{ - callout_reset(timer, (usec * hz)/1000000, func, arg); -} - -static __inline void -ahd_scb_timer_reset(struct scb *scb, u_int usec) -{ - untimeout(ahd_timeout, (caddr_t)scb, scb->io_ctx->ccb_h.timeout_ch); - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahd_timeout, scb, (usec * hz)/1000000); -} - /*************************** Device Access ************************************/ #define ahd_inb(ahd, port) \ bus_space_read_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF) #define ahd_outb(ahd, port, value) \ bus_space_write_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF, value) #define ahd_inw_atomic(ahd, port) \ - ahd_le16toh(bus_space_read_2((ahd)->tags[(port) >> 8], \ + aic_le16toh(bus_space_read_2((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], (port) & 0xFF)) #define ahd_outw_atomic(ahd, port, value) \ bus_space_write_2((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ - (port & 0xFF), ahd_htole16(value)) + (port & 0xFF), aic_htole16(value)) #define ahd_outsb(ahd, port, valp, count) \ bus_space_write_multi_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ (port & 0xFF), valp, count) #define ahd_insb(ahd, port, valp, count) \ bus_space_read_multi_1((ahd)->tags[(port) >> 8], \ (ahd)->bshs[(port) >> 8], \ (port & 0xFF), valp, count) static __inline void ahd_flush_device_writes(struct ahd_softc *); static __inline void ahd_flush_device_writes(struct ahd_softc *ahd) { /* XXX Is this sufficient for all architectures??? */ ahd_inb(ahd, INTSTAT); } /**************************** Locking Primitives ******************************/ /* Lock protecting internal data structures */ static __inline void ahd_lockinit(struct ahd_softc *); static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags); static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags); /* Lock held during command compeletion to the upper layer */ static __inline void ahd_done_lockinit(struct ahd_softc *); static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags); static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags); /* Lock held during ahd_list manipulation and ahd softc frees */ static __inline void ahd_list_lockinit(void); static __inline void ahd_list_lock(unsigned long *flags); static __inline void ahd_list_unlock(unsigned long *flags); static __inline void ahd_lockinit(struct ahd_softc *ahd) { } static __inline void ahd_lock(struct ahd_softc *ahd, unsigned long *flags) { *flags = splcam(); } static __inline void ahd_unlock(struct ahd_softc *ahd, unsigned long *flags) { splx(*flags); } /* Lock held during command compeletion to the upper layer */ static __inline void ahd_done_lockinit(struct ahd_softc *ahd) { } static __inline void ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags) { } static __inline void ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags) { } /* Lock held during ahd_list manipulation and ahd softc frees */ static __inline void ahd_list_lockinit(void) { } static __inline void ahd_list_lock(unsigned long *flags) { } static __inline void ahd_list_unlock(unsigned long *flags) { } -/****************************** OS Primitives *********************************/ -#define ahd_delay DELAY +/********************************** PCI ***************************************/ +int ahd_pci_map_registers(struct ahd_softc *ahd); +int ahd_pci_map_int(struct ahd_softc *ahd); + /************************** Transaction Operations ****************************/ -static __inline void ahd_set_transaction_status(struct scb *, uint32_t); -static __inline void ahd_set_scsi_status(struct scb *, uint32_t); -static __inline uint32_t ahd_get_transaction_status(struct scb *); -static __inline uint32_t ahd_get_scsi_status(struct scb *); -static __inline void ahd_set_transaction_tag(struct scb *, int, u_int); -static __inline u_long ahd_get_transfer_length(struct scb *); -static __inline int ahd_get_transfer_dir(struct scb *); -static __inline void ahd_set_residual(struct scb *, u_long); -static __inline void ahd_set_sense_residual(struct scb *, u_long); -static __inline u_long ahd_get_residual(struct scb *); -static __inline int ahd_perform_autosense(struct scb *); -static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc*, struct scb*); -static __inline void ahd_freeze_simq(struct ahd_softc *); -static __inline void ahd_release_simq(struct ahd_softc *); -static __inline void ahd_freeze_ccb(union ccb *ccb); -static __inline void ahd_freeze_scb(struct scb *scb); -static __inline void ahd_platform_freeze_devq(struct ahd_softc *, struct scb *); -static __inline int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, - char channel, int lun, u_int tag, - role_t role, uint32_t status); +static __inline void aic_freeze_simq(struct aic_softc*); +static __inline void aic_release_simq(struct aic_softc*); -static __inline -void ahd_set_transaction_status(struct scb *scb, uint32_t status) -{ - scb->io_ctx->ccb_h.status &= ~CAM_STATUS_MASK; - scb->io_ctx->ccb_h.status |= status; -} - -static __inline -void ahd_set_scsi_status(struct scb *scb, uint32_t status) -{ - scb->io_ctx->csio.scsi_status = status; -} - -static __inline -uint32_t ahd_get_transaction_status(struct scb *scb) -{ - return (scb->io_ctx->ccb_h.status & CAM_STATUS_MASK); -} - -static __inline -uint32_t ahd_get_scsi_status(struct scb *scb) -{ - return (scb->io_ctx->csio.scsi_status); -} - -static __inline -void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type) -{ - scb->io_ctx->csio.tag_action = type; - if (enabled) - scb->io_ctx->ccb_h.flags |= CAM_TAG_ACTION_VALID; - else - scb->io_ctx->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; -} - -static __inline -u_long ahd_get_transfer_length(struct scb *scb) -{ - return (scb->io_ctx->csio.dxfer_len); -} - -static __inline -int ahd_get_transfer_dir(struct scb *scb) -{ - return (scb->io_ctx->ccb_h.flags & CAM_DIR_MASK); -} - -static __inline -void ahd_set_residual(struct scb *scb, u_long resid) -{ - scb->io_ctx->csio.resid = resid; -} - -static __inline -void ahd_set_sense_residual(struct scb *scb, u_long resid) -{ - scb->io_ctx->csio.sense_resid = resid; -} - -static __inline -u_long ahd_get_residual(struct scb *scb) -{ - return (scb->io_ctx->csio.resid); -} - -static __inline -int ahd_perform_autosense(struct scb *scb) -{ - return (!(scb->io_ctx->ccb_h.flags & CAM_DIS_AUTOSENSE)); -} - -static __inline uint32_t -ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb) -{ - return (sizeof(struct scsi_sense_data)); -} - static __inline void -ahd_freeze_simq(struct ahd_softc *ahd) +aic_freeze_simq(struct aic_softc *aic) { - xpt_freeze_simq(ahd->platform_data->sim, /*count*/1); + xpt_freeze_simq(aic->platform_data->sim, /*count*/1); } static __inline void -ahd_release_simq(struct ahd_softc *ahd) +aic_release_simq(struct aic_softc *aic) { - xpt_release_simq(ahd->platform_data->sim, /*run queue*/TRUE); + xpt_release_simq(aic->platform_data->sim, /*run queue*/TRUE); } - -static __inline void -ahd_freeze_ccb(union ccb *ccb) -{ - if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { - ccb->ccb_h.status |= CAM_DEV_QFRZN; - xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); - } -} - -static __inline void -ahd_freeze_scb(struct scb *scb) -{ - ahd_freeze_ccb(scb->io_ctx); -} - -static __inline void -ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb) -{ - /* Nothing to do here for FreeBSD */ -} - -static __inline int -ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, - char channel, int lun, u_int tag, - role_t role, uint32_t status) -{ - /* Nothing to do here for FreeBSD */ - return (0); -} - -static __inline void -ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb) -{ - /* What do we do to generically handle driver resource shortages??? */ - if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0 - && scb->io_ctx != NULL - && (scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { - scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; - ahd->flags &= ~AHD_RESOURCE_SHORTAGE; - } - scb->io_ctx = NULL; -} - -/********************************** PCI ***************************************/ -#ifdef AHD_PCI_CONFIG -static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci, - int reg, int width); -static __inline void ahd_pci_write_config(ahd_dev_softc_t pci, - int reg, uint32_t value, - int width); -static __inline int ahd_get_pci_function(ahd_dev_softc_t); -static __inline int ahd_get_pci_slot(ahd_dev_softc_t); -static __inline int ahd_get_pci_bus(ahd_dev_softc_t); - -int ahd_pci_map_registers(struct ahd_softc *ahd); -int ahd_pci_map_int(struct ahd_softc *ahd); - -static __inline uint32_t -ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width) -{ - return (pci_read_config(pci, reg, width)); -} - -static __inline void -ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width) -{ - pci_write_config(pci, reg, value, width); -} - -static __inline int -ahd_get_pci_function(ahd_dev_softc_t pci) -{ - return (pci_get_function(pci)); -} - -static __inline int -ahd_get_pci_slot(ahd_dev_softc_t pci) -{ - return (pci_get_slot(pci)); -} - -static __inline int -ahd_get_pci_bus(ahd_dev_softc_t pci) -{ - return (pci_get_bus(pci)); -} - -typedef enum -{ - AHD_POWER_STATE_D0, - AHD_POWER_STATE_D1, - AHD_POWER_STATE_D2, - AHD_POWER_STATE_D3 -} ahd_power_state; - -void ahd_power_state_change(struct ahd_softc *ahd, - ahd_power_state new_state); -#endif -/******************************** VL/EISA *************************************/ -int aic7770_map_registers(struct ahd_softc *ahd); -int aic7770_map_int(struct ahd_softc *ahd, int irq); - /********************************* Debug **************************************/ static __inline void ahd_print_path(struct ahd_softc *, struct scb *); static __inline void ahd_platform_dump_card_state(struct ahd_softc *ahd); static __inline void ahd_print_path(struct ahd_softc *ahd, struct scb *scb) { xpt_print_path(scb->io_ctx->ccb_h.path); } static __inline void ahd_platform_dump_card_state(struct ahd_softc *ahd) { /* Nothing to do here for FreeBSD */ } /**************************** Transfer Settings *******************************/ void ahd_notify_xfer_settings_change(struct ahd_softc *, struct ahd_devinfo *); void ahd_platform_set_tags(struct ahd_softc *, struct ahd_devinfo *, int /*enable*/); /************************* Initialization/Teardown ****************************/ int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg); void ahd_platform_free(struct ahd_softc *ahd); int ahd_map_int(struct ahd_softc *ahd); int ahd_attach(struct ahd_softc *); int ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd); int ahd_detach(device_t); #define ahd_platform_init(arg) /****************************** Interrupts ************************************/ void ahd_platform_intr(void *); static __inline void ahd_platform_flushwork(struct ahd_softc *ahd); static __inline void ahd_platform_flushwork(struct ahd_softc *ahd) { } /************************ Misc Function Declarations **************************/ void ahd_done(struct ahd_softc *ahd, struct scb *scb); void ahd_send_async(struct ahd_softc *, char /*channel*/, u_int /*target*/, u_int /*lun*/, ac_code, void *arg); #endif /* _AIC79XX_FREEBSD_H_ */ Index: stable/4/sys/dev/aic7xxx/aic79xx_pci.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic79xx_pci.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic79xx_pci.c (revision 125849) @@ -1,1008 +1,1021 @@ /* * Product specific probe and attach routines for: * aic7901 and aic7902 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#75 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#86 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #endif static __inline uint64_t ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull -#define ID_ALL_IROC_MASK 0xFFFFFF7FFFFFFFFFull +#define ID_ALL_IROC_MASK 0xFF7FFFFFFFFFFFFFull #define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull #define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull -#define ID_9005_GENERIC_IROC_MASK 0xFFF0FF7F00000000ull +#define ID_9005_GENERIC_IROC_MASK 0xFF70FFFF00000000ull #define ID_AIC7901 0x800F9005FFFF9005ull #define ID_AHA_29320A 0x8000900500609005ull #define ID_AHA_29320ALP 0x8017900500449005ull #define ID_AIC7901A 0x801E9005FFFF9005ull -#define ID_AHA_29320 0x8012900500429005ull -#define ID_AHA_29320B 0x8013900500439005ull #define ID_AHA_29320LP 0x8014900500449005ull #define ID_AIC7902 0x801F9005FFFF9005ull #define ID_AIC7902_B 0x801D9005FFFF9005ull #define ID_AHA_39320 0x8010900500409005ull +#define ID_AHA_29320 0x8012900500429005ull +#define ID_AHA_29320B 0x8013900500439005ull #define ID_AHA_39320_B 0x8015900500409005ull #define ID_AHA_39320A 0x8016900500409005ull #define ID_AHA_39320D 0x8011900500419005ull #define ID_AHA_39320D_B 0x801C900500419005ull #define ID_AHA_39320D_HP 0x8011900500AC0E11ull #define ID_AHA_39320D_B_HP 0x801C900500AC0E11ull #define ID_AIC7902_PCI_REV_A4 0x3 #define ID_AIC7902_PCI_REV_B0 0x10 #define SUBID_HP 0x0E11 +#define DEVID_9005_HOSTRAID(id) ((id) & 0x80) + #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_HBA_2EXT 0x1 /* 2 External Ports */ -#define DEVID_9005_TYPE_IROC 0x8 /* Raid(0,1,10) Card */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MFUNC(id) ((id) & 0x10) #define DEVID_9005_PACKETIZED(id) ((id) & 0x8000) #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_HBA 0x0 /* Standard Card */ #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_AUTOTERM(id) (((id) & 0x10) == 0) #define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) #define SUBID_9005_SEEPTYPE(id) ((id) & 0x0C0) >> 6) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_4K 0x1 static ahd_device_setup_t ahd_aic7901_setup; static ahd_device_setup_t ahd_aic7901A_setup; static ahd_device_setup_t ahd_aic7902_setup; +static ahd_device_setup_t ahd_aic790X_setup; struct ahd_pci_identity ahd_pci_ident_table [] = { /* aic7901 based controllers */ { ID_AHA_29320A, ID_ALL_MASK, "Adaptec 29320A Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AHA_29320ALP, ID_ALL_MASK, "Adaptec 29320ALP Ultra320 SCSI adapter", ahd_aic7901_setup }, /* aic7901A based controllers */ { + ID_AHA_29320LP, + ID_ALL_MASK, + "Adaptec 29320LP Ultra320 SCSI adapter", + ahd_aic7901A_setup + }, + /* aic7902 based controllers */ + { ID_AHA_29320, ID_ALL_MASK, "Adaptec 29320 Ultra320 SCSI adapter", - ahd_aic7901A_setup + ahd_aic7902_setup }, { ID_AHA_29320B, ID_ALL_MASK, "Adaptec 29320B Ultra320 SCSI adapter", - ahd_aic7901A_setup + ahd_aic7902_setup }, { - ID_AHA_29320LP, - ID_ALL_MASK, - "Adaptec 29320LP Ultra320 SCSI adapter", - ahd_aic7901A_setup - }, - /* aic7902 based controllers */ - { ID_AHA_39320, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320_B, ID_ALL_MASK, "Adaptec 39320 Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320A, ID_ALL_MASK, "Adaptec 39320A Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B, ID_ALL_MASK, "Adaptec 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, { ID_AHA_39320D_B_HP, ID_ALL_MASK, "Adaptec (HP OEM) 39320D Ultra320 SCSI adapter", ahd_aic7902_setup }, - { - ID_AHA_29320, - ID_ALL_MASK, - "Adaptec 29320 Ultra320 SCSI adapter", - ahd_aic7902_setup - }, - { - ID_AHA_29320B, - ID_ALL_MASK, - "Adaptec 29320B Ultra320 SCSI adapter", - ahd_aic7902_setup - }, /* Generic chip probes for devices we don't know 'exactly' */ { - ID_AIC7901 & ID_DEV_VENDOR_MASK, + ID_AIC7901 & ID_9005_GENERIC_MASK, ID_DEV_VENDOR_MASK, "Adaptec AIC7901 Ultra320 SCSI adapter", ahd_aic7901_setup }, { ID_AIC7901A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec AIC7901A Ultra320 SCSI adapter", ahd_aic7901A_setup }, { ID_AIC7902 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec AIC7902 Ultra320 SCSI adapter", ahd_aic7902_setup } }; const u_int ahd_num_pci_devs = NUM_ELEMENTS(ahd_pci_ident_table); #define DEVCONFIG 0x40 #define PCIXINITPAT 0x0000E000ul #define PCIXINIT_PCI33_66 0x0000E000ul #define PCIXINIT_PCIX50_66 0x0000C000ul #define PCIXINIT_PCIX66_100 0x0000A000ul #define PCIXINIT_PCIX100_133 0x00008000ul #define PCI_BUS_MODES_INDEX(devconfig) \ (((devconfig) & PCIXINITPAT) >> 13) static const char *pci_bus_modes[] = { "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI bus mode unknown", "PCI-X 101-133Mhz", "PCI-X 67-100Mhz", "PCI-X 50-66Mhz", "PCI 33 or 66Mhz" }; #define TESTMODE 0x00000800ul #define IRDY_RST 0x00000200ul #define FRAME_RST 0x00000100ul #define PCI64BIT 0x00000080ul #define MRDCEN 0x00000040ul #define ENDIANSEL 0x00000020ul #define MIXQWENDIANEN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define QWENDIANSEL 0x00000001ul #define DEVCONFIG1 0x44 #define PREQDIS 0x01 #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x000000fful #define LATTIME 0x0000ff00ul static int ahd_check_extport(struct ahd_softc *ahd); static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control); static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat); struct ahd_pci_identity * -ahd_find_pci_device(ahd_dev_softc_t pci) +ahd_find_pci_device(aic_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; struct ahd_pci_identity *entry; u_int i; - vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); - device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); - subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); - subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); + vendor = aic_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); + device = aic_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); + subvendor = aic_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); + subdevice = aic_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); full_id = ahd_compose_id(device, vendor, subdevice, subvendor); + /* + * If we are configured to attach to HostRAID + * controllers, mask out the IROC/HostRAID bit + * in the + */ + if (ahd_attach_to_HostRAID_controllers) + full_id &= ID_ALL_IROC_MASK; + for (i = 0; i < ahd_num_pci_devs; i++) { entry = &ahd_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) { struct scb_data *shared_scb_data; u_long l; u_int command; uint32_t devconfig; + uint16_t device; uint16_t subvendor; int error; shared_scb_data = NULL; ahd->description = entry->name; /* + * Record if this is a HostRAID board. + */ + device = aic_pci_read_config(ahd->dev_softc, + PCIR_DEVICE, /*bytes*/2); + if (DEVID_9005_HOSTRAID(device)) + ahd->flags |= AHD_HOSTRAID_BOARD; + + /* * Record if this is an HP board. */ - subvendor = ahd_pci_read_config(ahd->dev_softc, + subvendor = aic_pci_read_config(ahd->dev_softc, PCIR_SUBVEND_0, /*bytes*/2); if (subvendor == SUBID_HP) ahd->flags |= AHD_HP_BOARD; error = entry->setup(ahd); if (error != 0) return (error); - devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) { ahd->chip |= AHD_PCI; /* Disable PCIX workarounds when running in PCI mode. */ ahd->bugs &= ~AHD_PCIX_BUG_MASK; } else { ahd->chip |= AHD_PCIX; } ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)]; - ahd_power_state_change(ahd, AHD_POWER_STATE_D0); + aic_power_state_change(ahd, AIC_POWER_STATE_D0); error = ahd_pci_map_registers(ahd); if (error != 0) return (error); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) { uint32_t devconfig; if (bootverbose) printf("%s: Enabling 39Bit Addressing\n", ahd_name(ahd)); - devconfig = ahd_pci_read_config(ahd->dev_softc, + devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig |= DACEN; - ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, + aic_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Ensure busmastering is enabled */ - command = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + command = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2); + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, /*bytes*/2); error = ahd_softc_init(ahd); if (error != 0) return (error); ahd->bus_intr = ahd_pci_intr; error = ahd_reset(ahd, /*reinit*/FALSE); if (error != 0) return (ENXIO); ahd->pci_cachesize = - ahd_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, + aic_pci_read_config(ahd->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahd->pci_cachesize *= 4; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* See if we have a SEEPROM and perform auto-term */ error = ahd_check_extport(ahd); if (error != 0) return (error); /* Core initialization */ error = ahd_init(ahd); if (error != 0) return (error); /* * Allow interrupts now that we are completely setup. */ error = ahd_pci_map_int(ahd); if (error != 0) return (error); ahd_list_lock(&l); /* * Link this softc in with all other ahd instances. */ ahd_softc_insert(ahd); ahd_list_unlock(&l); return (0); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahd_pci_test_register_access(struct ahd_softc *ahd) { uint32_t cmd; u_int targpcistat; u_int pci_status1; int error; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ - cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, + cmd = aic_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahd_inb(ahd, HCNTRL); if (hcntrl == 0xFF) goto fail; /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support * either, so look for data corruption and/or flaged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahd_outb(ahd, HCNTRL, hcntrl|PAUSE); while (ahd_is_paused(ahd) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); ahd_outb(ahd, TARGPCISTAT, targpcistat); - pci_status1 = ahd_pci_read_config(ahd->dev_softc, + pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); - ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_outb(ahd, SEQCTL0, PERRORDIS); ahd_outl(ahd, SRAM_BASE, 0x5aa555aa); if (ahd_inl(ahd, SRAM_BASE) != 0x5aa555aa) goto fail; if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { u_int targpcistat; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); if ((targpcistat & STA) != 0) goto fail; } error = 0; fail: if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); targpcistat = ahd_inb(ahd, TARGPCISTAT); /* Silently clear any latched errors. */ ahd_outb(ahd, TARGPCISTAT, targpcistat); - pci_status1 = ahd_pci_read_config(ahd->dev_softc, + pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); - ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_outb(ahd, CLRINT, CLRPCIINT); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS); - ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); + aic_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static int ahd_check_extport(struct ahd_softc *ahd) { struct vpd_config vpd; struct seeprom_config *sc; u_int adapter_control; int have_seeprom; int error; sc = ahd->seep_config; have_seeprom = ahd_acquire_seeprom(ahd); if (have_seeprom) { u_int start_addr; /* * Fetch VPD for this function and parse it. */ if (bootverbose) printf("%s: Reading VPD from SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = ((2 * sizeof(*sc)) + (sizeof(vpd) * (ahd->channel - 'A'))) / 2; error = ahd_read_seeprom(ahd, (uint16_t *)&vpd, start_addr, sizeof(vpd)/2, /*bytestream*/TRUE); if (error == 0) error = ahd_parse_vpddata(ahd, &vpd); if (bootverbose) printf("%s: VPD parsing %s\n", ahd_name(ahd), error == 0 ? "successful" : "failed"); if (bootverbose) printf("%s: Reading SEEPROM...", ahd_name(ahd)); /* Address is always in units of 16bit words */ start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A'); error = ahd_read_seeprom(ahd, (uint16_t *)sc, start_addr, sizeof(*sc)/2, /*bytestream*/FALSE); if (error != 0) { printf("Unable to read SEEPROM\n"); have_seeprom = 0; } else { have_seeprom = ahd_verify_cksum(sc); if (bootverbose) { if (have_seeprom == 0) printf ("checksum error\n"); else printf ("done.\n"); } } ahd_release_seeprom(ahd); } if (!have_seeprom) { u_int nvram_scb; /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT', 'BIOS', or 'ASPI' signature is found * in SCB 0xFF. We manually compose the data as 16bit * values to avoid endian issues. */ ahd_set_scbptr(ahd, 0xFF); nvram_scb = ahd_inb_scbram(ahd, SCB_BASE + NVRAM_SCB_OFFSET); if (nvram_scb != 0xFF && ((ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'D' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'T') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'B' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'I' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'O' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'S') || (ahd_inb_scbram(ahd, SCB_BASE + 0) == 'A' && ahd_inb_scbram(ahd, SCB_BASE + 1) == 'S' && ahd_inb_scbram(ahd, SCB_BASE + 2) == 'P' && ahd_inb_scbram(ahd, SCB_BASE + 3) == 'I'))) { uint16_t *sc_data; int i; ahd_set_scbptr(ahd, nvram_scb); sc_data = (uint16_t *)sc; for (i = 0; i < 64; i += 2) *sc_data++ = ahd_inw_scbram(ahd, SCB_BASE+i); have_seeprom = ahd_verify_cksum(sc); if (have_seeprom) ahd->flags |= AHD_SCB_CONFIG_USED; } } #if AHD_DEBUG if (have_seeprom != 0 && (ahd_debug & AHD_DUMP_SEEPROM) != 0) { uint16_t *sc_data; int i; printf("%s: Seeprom Contents:", ahd_name(ahd)); sc_data = (uint16_t *)sc; for (i = 0; i < (sizeof(*sc)); i += 2) printf("\n\t0x%.4x", sc_data[i]); printf("\n"); } #endif if (!have_seeprom) { if (bootverbose) printf("%s: No SEEPROM available.\n", ahd_name(ahd)); ahd->flags |= AHD_USEDEFAULTS; error = ahd_default_config(ahd); adapter_control = CFAUTOTERM|CFSEAUTOTERM; free(ahd->seep_config, M_DEVBUF); ahd->seep_config = NULL; } else { error = ahd_parse_cfgdata(ahd, sc); adapter_control = sc->adapter_control; } if (error != 0) return (error); ahd_configure_termination(ahd, adapter_control); return (0); } static void ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control) { int error; u_int sxfrctl1; uint8_t termctl; uint32_t devconfig; - devconfig = ahd_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig = aic_pci_read_config(ahd->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((ahd->flags & AHD_STPWLEVEL_A) != 0) devconfig |= STPWLEVEL; if (bootverbose) printf("%s: STPWLEVEL is %s\n", ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off"); - ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + aic_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Make sure current sensing is off. */ if ((ahd->flags & AHD_CURRENT_SENSING) != 0) { (void)ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); } /* * Read to sense. Write to set. */ error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl); if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual Primary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH); if ((adapter_control & CFSTERM) != 0) termctl |= FLX_TERMCTL_ENPRILOW; if ((adapter_control & CFWSTERM) != 0) termctl |= FLX_TERMCTL_ENPRIHIGH; } else if (error != 0) { printf("%s: Primary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH; } if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual Secondary Termination\n", ahd_name(ahd)); termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH); if ((adapter_control & CFSELOWTERM) != 0) termctl |= FLX_TERMCTL_ENSECLOW; if ((adapter_control & CFSEHIGHTERM) != 0) termctl |= FLX_TERMCTL_ENSECHIGH; } else if (error != 0) { printf("%s: Secondary Auto-Term Sensing failed! " "Using Defaults.\n", ahd_name(ahd)); termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH; } /* * Now set the termination based on what we found. */ sxfrctl1 = ahd_inb(ahd, SXFRCTL1) & ~STPWEN; + ahd->flags &= ~AHD_TERM_ENB_A; if ((termctl & FLX_TERMCTL_ENPRILOW) != 0) { ahd->flags |= AHD_TERM_ENB_A; sxfrctl1 |= STPWEN; } /* Must set the latch once in order to be effective. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl); if (error != 0) { printf("%s: Unable to set termination settings!\n", ahd_name(ahd)); } else if (bootverbose) { printf("%s: Primary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis"); printf("%s: Primary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis"); printf("%s: Secondary High byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis"); printf("%s: Secondary Low byte termination %sabled\n", ahd_name(ahd), (termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis"); } return; } #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static const char *split_status_source[] = { "DFF0", "DFF1", "OVLY", "CMC", }; static const char *pci_status_source[] = { "DFF0", "DFF1", "SG", "CMC", "OVLY", "NONE", "MSI", "TARG" }; static const char *split_status_strings[] = { "%s: Received split response in %s.\n", "%s: Received split completion error message in %s\n", "%s: Receive overrun in %s\n", "%s: Count not complete in %s\n", "%s: Split completion data bucket in %s\n", "%s: Split completion address error in %s\n", "%s: Split completion byte count error in %s\n", "%s: Signaled Target-abort to early terminate a split in %s\n" }; static const char *pci_status_strings[] = { "%s: Data Parity Error has been reported via PERR# in %s\n", "%s: Target initial wait state error in %s\n", "%s: Split completion read data parity error in %s\n", "%s: Split completion address attribute parity error in %s\n", "%s: Received a Target Abort in %s\n", "%s: Received a Master Abort in %s\n", "%s: Signal System Error Detected in %s\n", "%s: Address or Write Phase Parity Error Detected in %s.\n" }; void ahd_pci_intr(struct ahd_softc *ahd) { uint8_t pci_status[8]; ahd_mode_state saved_modes; u_int pci_status1; u_int intstat; u_int i; u_int reg; intstat = ahd_inb(ahd, INTSTAT); if ((intstat & SPLTINT) != 0) ahd_pci_split_intr(ahd, intstat); if ((intstat & PCIINT) == 0) return; printf("%s: PCI error Interrupt\n", ahd_name(ahd)); saved_modes = ahd_save_modes(ahd); ahd_dump_card_state(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); for (i = 0, reg = DF0PCISTAT; i < 8; i++, reg++) { if (i == 5) continue; pci_status[i] = ahd_inb(ahd, reg); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, reg, pci_status[i]); } for (i = 0; i < 8; i++) { u_int bit; if (i == 5) continue; for (bit = 0; bit < 8; bit++) { if ((pci_status[i] & (0x1 << bit)) != 0) { static const char *s; s = pci_status_strings[bit]; if (i == 7/*TARG*/ && bit == 3) s = "%s: Signaled Target Abort\n"; printf(s, ahd_name(ahd), pci_status_source[i]); } } } - pci_status1 = ahd_pci_read_config(ahd->dev_softc, + pci_status1 = aic_pci_read_config(ahd->dev_softc, PCIR_STATUS + 1, /*bytes*/1); - ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, pci_status1, /*bytes*/1); ahd_restore_modes(ahd, saved_modes); ahd_outb(ahd, CLRINT, CLRPCIINT); ahd_unpause(ahd); } static void ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) { uint8_t split_status[4]; uint8_t split_status1[4]; uint8_t sg_split_status[2]; uint8_t sg_split_status1[2]; ahd_mode_state saved_modes; u_int i; uint16_t pcix_status; /* * Check for splits in all modes. Modes 0 and 1 * additionally have SG engine splits to look at. */ - pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS, + pcix_status = aic_pci_read_config(ahd->dev_softc, PCIXR_STATUS, /*bytes*/2); printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n", ahd_name(ahd), pcix_status); saved_modes = ahd_save_modes(ahd); for (i = 0; i < 4; i++) { ahd_set_modes(ahd, i, i); split_status[i] = ahd_inb(ahd, DCHSPLTSTAT0); split_status1[i] = ahd_inb(ahd, DCHSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, DCHSPLTSTAT0, split_status[i]); ahd_outb(ahd, DCHSPLTSTAT1, split_status1[i]); if (i > 1) continue; sg_split_status[i] = ahd_inb(ahd, SGSPLTSTAT0); sg_split_status1[i] = ahd_inb(ahd, SGSPLTSTAT1); /* Clear latched errors. So our interrupt deasserts. */ ahd_outb(ahd, SGSPLTSTAT0, sg_split_status[i]); ahd_outb(ahd, SGSPLTSTAT1, sg_split_status1[i]); } for (i = 0; i < 4; i++) { u_int bit; for (bit = 0; bit < 8; bit++) { if ((split_status[i] & (0x1 << bit)) != 0) { static const char *s; s = split_status_strings[bit]; printf(s, ahd_name(ahd), split_status_source[i]); } if (i > 1) continue; if ((sg_split_status[i] & (0x1 << bit)) != 0) { static const char *s; s = split_status_strings[bit]; printf(s, ahd_name(ahd), "SG"); } } } /* * Clear PCI-X status bits. */ - ahd_pci_write_config(ahd->dev_softc, PCIXR_STATUS, + aic_pci_write_config(ahd->dev_softc, PCIXR_STATUS, pcix_status, /*bytes*/2); ahd_outb(ahd, CLRINT, CLRSPLTINT); ahd_restore_modes(ahd, saved_modes); } static int ahd_aic7901_setup(struct ahd_softc *ahd) { - int error; - error = ahd_aic7902_setup(ahd); - if (error != 0) - return (error); ahd->chip = AHD_AIC7901; - return (0); + ahd->features = AHD_AIC7901_FE; + return (ahd_aic790X_setup(ahd)); } static int ahd_aic7901A_setup(struct ahd_softc *ahd) { - int error; - error = ahd_aic7902_setup(ahd); - if (error != 0) - return (error); ahd->chip = AHD_AIC7901A; - return (0); + ahd->features = AHD_AIC7901A_FE; + return (ahd_aic790X_setup(ahd)); } static int ahd_aic7902_setup(struct ahd_softc *ahd) { - ahd_dev_softc_t pci; + ahd->chip = AHD_AIC7902; + ahd->features = AHD_AIC7902_FE; + return (ahd_aic790X_setup(ahd)); +} + +static int +ahd_aic790X_setup(struct ahd_softc *ahd) +{ + aic_dev_softc_t pci; u_int rev; pci = ahd->dev_softc; - rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev < ID_AIC7902_PCI_REV_A4) { printf("%s: Unable to attach to unsupported chip revision %d\n", ahd_name(ahd), rev); - ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); + aic_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2); return (ENXIO); } - ahd->channel = ahd_get_pci_function(pci) + 'A'; - ahd->chip = AHD_AIC7902; - ahd->features = AHD_AIC7902_FE; + ahd->channel = aic_get_pci_function(pci) + 'A'; if (rev < ID_AIC7902_PCI_REV_B0) { /* * Enable A series workarounds. */ ahd->bugs |= AHD_SENT_SCB_UPDATE_BUG|AHD_ABORT_LQI_BUG | AHD_PKT_BITBUCKET_BUG|AHD_LONG_SETIMO_BUG | AHD_NLQICRC_DELAYED_BUG|AHD_SCSIRST_BUG | AHD_LQO_ATNO_BUG|AHD_AUTOFLUSH_BUG | AHD_CLRLQO_AUTOCLR_BUG|AHD_PCIX_MMAPIO_BUG | AHD_PCIX_CHIPRST_BUG|AHD_PCIX_SCBRAM_RD_BUG | AHD_PKTIZED_STATUS_BUG|AHD_PKT_LUN_BUG | AHD_MDFF_WSCBPTR_BUG|AHD_REG_SLOW_SETTLE_BUG | AHD_SET_MODE_BUG|AHD_BUSFREEREV_BUG | AHD_NONPACKFIFO_BUG|AHD_PACED_NEGTABLE_BUG | AHD_FAINT_LED_BUG; /* * IO Cell paramter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); if ((ahd->flags & AHD_HP_BOARD) == 0) AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA); } else { u_int devconfig1; ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS - | AHD_NEW_DFCNTRL_OPTS; - ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_ABORT_LQI_BUG - | AHD_INTCOLLISION_BUG|AHD_EARLY_REQ_BUG; + | AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY; + ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; /* + * Some issues have been resolved in the 7901B. + */ + if ((ahd->features & AHD_MULTI_FUNC) != 0) + ahd->bugs |= AHD_INTCOLLISION_BUG|AHD_ABORT_LQI_BUG; + + /* * IO Cell paramter setup. */ AHD_SET_PRECOMP(ahd, AHD_PRECOMP_CUTBACK_29); AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVB); AHD_SET_AMPLITUDE(ahd, AHD_AMPLITUDE_DEF); /* * Set the PREQDIS bit for H2B which disables some workaround * that doesn't work on regular PCI busses. * XXX - Find out exactly what this does from the hardware * folks! */ - devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); - ahd_pci_write_config(pci, DEVCONFIG1, + devconfig1 = aic_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + aic_pci_write_config(pci, DEVCONFIG1, devconfig1|PREQDIS, /*bytes*/1); - devconfig1 = ahd_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); + devconfig1 = aic_pci_read_config(pci, DEVCONFIG1, /*bytes*/1); } return (0); } Index: stable/4/sys/dev/aic7xxx/aic7xxx.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx.c (revision 125849) @@ -1,7452 +1,7774 @@ /* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2002 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#148 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aicasm/aicasm_insformat.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #include #endif /****************************** Softc Data ************************************/ struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); /***************************** Lookup Tables **********************************/ char *ahc_chip_names[] = { "NONE", "aic7770", "aic7850", "aic7855", "aic7859", "aic7860", "aic7870", "aic7880", "aic7895", "aic7895C", "aic7890/91", "aic7896/97", "aic7892", "aic7899" }; static const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); /* * Hardware error codes. */ struct ahc_hard_error_entry { uint8_t errno; char *errmesg; }; static struct ahc_hard_error_entry ahc_hard_errors[] = { { ILLHADDR, "Illegal Host Access" }, { ILLSADDR, "Illegal Sequencer Address referrenced" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { PCIERRSTAT, "PCI Error detected" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = NUM_ELEMENTS(ahc_hard_errors); static struct ahc_phase_table_entry ahc_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = NUM_ELEMENTS(ahc_phase_table) - 1; /* * Valid SCSIRATE values. (p. 3-17) * Provides a mapping of tranfer periods in ns to the proper value to * stick in the scsixfer reg. */ static struct ahc_syncrate ahc_syncrates[] = { /* ultra2 fast/ultra period rate */ { 0x42, 0x000, 9, "80.0" }, { 0x03, 0x000, 10, "40.0" }, { 0x04, 0x000, 11, "33.0" }, { 0x05, 0x100, 12, "20.0" }, { 0x06, 0x110, 15, "16.0" }, { 0x07, 0x120, 18, "13.4" }, { 0x08, 0x000, 25, "10.0" }, { 0x19, 0x010, 31, "8.0" }, { 0x1a, 0x020, 37, "6.67" }, { 0x1b, 0x030, 43, "5.7" }, { 0x1c, 0x040, 50, "5.0" }, { 0x00, 0x050, 56, "4.4" }, { 0x00, 0x060, 62, "4.0" }, { 0x00, 0x070, 68, "3.6" }, { 0x00, 0x000, 0, NULL } }; /* Our Sequencer Program */ #include "aic7xxx_seq.h" /**************************** Function Declarations ***************************/ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static struct ahc_tmode_tstate* ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel); #ifdef AHC_TARGET_MODE static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force); #endif static struct ahc_syncrate* ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahc_update_pending_scbs(struct ahc_softc *ahc); static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_assert_atn(struct ahc_softc *ahc); static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset); static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width); static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahc_clear_msg_state(struct ahc_softc *ahc); static void ahc_handle_proto_violation(struct ahc_softc *ahc); static void ahc_handle_message_phase(struct ahc_softc *ahc); typedef enum { AHCMSG_1B, AHCMSG_2B, AHCMSG_EXT } ahc_msgtype; static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full); static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo); static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc); static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level); #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb); #endif static bus_dmamap_callback_t ahc_dmamap_cb; static void ahc_build_free_scb_list(struct ahc_softc *ahc); static int ahc_init_scbdata(struct ahc_softc *ahc); static void ahc_fini_scbdata(struct ahc_softc *ahc); static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb); static int ahc_qinfifo_count(struct ahc_softc *ahc); static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr); static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev); static void ahc_reset_current_bus(struct ahc_softc *ahc); #ifdef AHC_DUMP_SEQ static void ahc_dumpseq(struct ahc_softc *ahc); #endif static int ahc_loadseq(struct ahc_softc *ahc); static int ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, u_int start_instr, u_int *skip_addr); static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts); #ifdef AHC_TARGET_MODE static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask); static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd); #endif /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ void ahc_restart(struct ahc_softc *ahc) { ahc_pause(ahc); /* No more pending messages. */ ahc_clear_msg_state(ahc); ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); ahc_outb(ahc, LASTPHASE, P_BUSFREE); ahc_outb(ahc, SAVED_SCSIID, 0xFF); ahc_outb(ahc, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); /* Always allow reselection */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Ensure that no DMA operations are in progress */ ahc_outb(ahc, CCSCBCNT, 0); ahc_outb(ahc, CCSGCTL, 0); ahc_outb(ahc, CCSCBCTL, 0); } /* * If we were in the process of DMA'ing SCB data into * an SCB, replace that SCB on the free list. This prevents * an SCB leak. */ if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) { ahc_add_curscb_to_free_list(ahc); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); } ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); ahc_unpause(ahc); } /************************* Input/Output Queues ********************************/ void ahc_run_qoutfifo(struct ahc_softc *ahc) { struct scb *scb; u_int scb_index; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { scb_index = ahc->qoutfifo[ahc->qoutfifonext]; if ((ahc->qoutfifonext & 0x03) == 0x03) { u_int modnext; /* * Clear 32bits of QOUTFIFO at a time * so that we don't clobber an incoming * byte DMA to the array on architectures * that only support 32bit load and store * operations. */ modnext = ahc->qoutfifonext & ~0x3; *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/modnext, /*len*/4, BUS_DMASYNC_PREREAD); } ahc->qoutfifonext++; scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahc_name(ahc), scb_index, (ahc->qoutfifonext - 1) & 0xFF); continue; } /* * Save off the residual * if there is one. */ ahc_update_residual(ahc, scb); ahc_done(ahc, scb); } } void ahc_run_untagged_queues(struct ahc_softc *ahc) { int i; for (i = 0; i < 16; i++) ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); } void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) { struct scb *scb; if (ahc->untagged_queue_lock != 0) return; if ((scb = TAILQ_FIRST(queue)) != NULL && (scb->flags & SCB_ACTIVE) == 0) { scb->flags |= SCB_ACTIVE; ahc_queue_scb(ahc, scb); } } /************************* Interrupt Handling *********************************/ void ahc_handle_brkadrint(struct ahc_softc *ahc) { /* * We upset the sequencer :-( * Lookup the error message */ int i; int error; error = ahc_inb(ahc, ERROR); for (i = 0; error != 1 && i < num_errors; i++) error >>= 1; printf("%s: brkadrint, %s at seqaddr = 0x%x\n", ahc_name(ahc), ahc_hard_errors[i].errmesg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); ahc_dump_card_state(ahc); /* Tell everyone that this HBA is no longer available */ ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Disable all interrupt sources by resetting the controller */ ahc_shutdown(ahc); } void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) { struct scb *scb; struct ahc_devinfo devinfo; ahc_fetch_devinfo(ahc, &devinfo); /* * Clear the upper byte that holds SEQINT status * codes and clear the SEQINT bit. We will unpause * the sequencer, if appropriate, after servicing * the request. */ ahc_outb(ahc, CLRINT, CLRSEQINT); switch (intstat & SEQINT_MASK) { case BAD_STATUS: { u_int scb_index; struct hardware_scb *hscb; /* * Set the default return value to 0 (don't * send sense). The sense code will change * this if needed. */ ahc_outb(ahc, RETURN_1, 0); /* * The sequencer will notify us when a command * has an error that would be of interest to * the kernel. This allows us to leave the sequencer * running in the common case of command completes * without error. The sequencer will already have * dma'd the SCB back up to us, so we can reference * the in kernel copy directly. */ scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); printf("ahc_intr - referenced scb " "not valid during seqint 0x%x scb(%d)\n", intstat, scb_index); ahc_dump_card_state(ahc); panic("for safety"); goto unpause; } hscb = scb->hscb; /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and have * the sequencer do a normal command * complete. */ scb->flags &= ~SCB_SENSE; - ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); break; } - ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); + aic_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); /* Freeze the queue until the client sees the error. */ ahc_freeze_devq(ahc, scb); - ahc_freeze_scb(scb); - ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); + aic_freeze_scb(scb); + aic_set_scsi_status(scb, hscb->shared_data.status.scsi_status); switch (hscb->shared_data.status.scsi_status) { case SCSI_STATUS_OK: printf("%s: Interrupted for staus of 0???\n", ahc_name(ahc)); break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahc_dma_seg *sg; struct scsi_sense *sc; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printf("SCB %d: requests Check Status\n", scb->hscb->tag); } #endif - if (ahc_perform_autosense(scb) == 0) + if (aic_perform_autosense(scb) == 0) break; targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)(&hscb->shared_data.cdb); /* * Save off the residual if there is one. */ ahc_update_residual(ahc, scb); #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_SENSE) { ahc_print_path(ahc, scb); printf("Sending Sense\n"); } #endif sg->addr = ahc_get_sense_bufaddr(ahc, scb); - sg->len = ahc_get_sense_bufsize(ahc, scb); + sg->len = aic_get_sense_bufsize(ahc, scb); sg->len |= AHC_DMA_LAST_SEG; /* Fixup byte order */ - sg->addr = ahc_htole32(sg->addr); - sg->len = ahc_htole32(sg->len); + sg->addr = aic_htole32(sg->addr); + sg->len = aic_htole32(sg->len); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = sg->len; sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ - if (ahc_get_residual(scb) - == ahc_get_transfer_length(scb)) { + if (aic_get_residual(scb) + == aic_get_transfer_length(scb)) { ahc_update_neg_request(ahc, &devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~SCB_NEGOTIATE; scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); hscb->dataptr = sg->addr; hscb->datacnt = sg->len; hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; - hscb->sgptr = ahc_htole32(hscb->sgptr); + hscb->sgptr = aic_htole32(hscb->sgptr); scb->sg_count = 1; scb->flags |= SCB_SENSE; ahc_qinfifo_requeue_tail(ahc, scb); ahc_outb(ahc, RETURN_1, SEND_SENSE); /* * Ensure we have enough time to actually * retrieve the sense. */ - ahc_scb_timer_reset(scb, 5 * 1000000); + aic_scb_timer_reset(scb, 5 * 1000000); break; } default: break; } break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); printf("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahc_name(ahc), devinfo.channel, devinfo.target); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX)); printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL)); ahc_dump_card_state(ahc); ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahc_outb(ahc, MSG_OUT, HOST_MSG); ahc_assert_atn(ahc); break; } case SEND_REJECT: { u_int rejbyte = ahc_inb(ahc, ACCUM); printf("%s:%c:%d: Warning - unknown message received from " "target (0x%x). Rejecting\n", ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); break; } case PROTO_VIOLATION: { ahc_handle_proto_violation(ahc); break; } case IGN_WIDE_RES: ahc_handle_ign_wide_residue(ahc, &devinfo); break; case PDATA_REINIT: ahc_reinitialize_dataptrs(ahc); break; case BAD_PHASE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printf("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahc_inb(ahc, LASTPHASE); printf("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahc_name(ahc), devinfo.channel, devinfo.target, lastphase, ahc_inb(ahc, SCSISIGI)); ahc_restart(ahc); return; } case HOST_MSG_LOOP: { /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transfered so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ if (ahc->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printf("ahc_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahc_clear_intstat(ahc); ahc_restart(ahc); return; } scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (scb == NULL) panic("HOST_MSG_LOOP with " "invalid SCB %x\n", scb_index); if (bus_phase == P_MESGOUT) ahc_setup_initiator_msgout(ahc, &devinfo, scb); else { ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; } } #ifdef AHC_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc->msgin_index = 0; } else ahc_setup_target_msgin(ahc, &devinfo, scb); } #endif } ahc_handle_message_phase(ahc); break; } case PERR_DETECTED: { /* * If we've cleared the parity error interrupt * but the sequencer still believes that SCSIPERR * is true, it must be that the parity error is * for the currently presented byte on the bus, * and we are not in a phase (data-in) where we will * eventually ack this byte. Ack the byte and * throw it away in the hope that the target will * take us to message out to deliver the appropriate * error message. */ if ((intstat & SCSIINT) == 0 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { if ((ahc->features & AHC_DT) == 0) { u_int curphase; /* * The hardware will only let you ack bytes * if the expected phase in SCSISIGO matches * the current phase. Make sure this is * currently the case. */ curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; ahc_outb(ahc, LASTPHASE, curphase); ahc_outb(ahc, SCSISIGO, curphase); } if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) { int wait; /* * In a data phase. Faster to bitbucket * the data than to individually ack each * byte. This is also the only strategy * that will work with AUTOACK enabled. */ ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) | BITBUCKET); wait = 5000; while (--wait != 0) { if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) != 0) break; - ahc_delay(100); + aic_delay(100); } ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); if (wait == 0) { struct scb *scb; u_int scb_index; ahc_print_devinfo(ahc, &devinfo); printf("Unable to clear parity error. " "Resetting bus.\n"); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL) - ahc_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_UNCOR_PARITY); ahc_reset_channel(ahc, devinfo.channel, /*init reset*/TRUE); } } else { ahc_inb(ahc, SCSIDATL); } } break; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ u_int scbindex = ahc_inb(ahc, SCB_TAG); u_int lastphase = ahc_inb(ahc, LASTPHASE); u_int i; scb = ahc_lookup_scb(ahc, scbindex); for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } ahc_print_path(ahc, scb); printf("data overrun detected %s." " Tag == 0x%x.\n", ahc_phase_table[i].phasemsg, scb->hscb->tag); ahc_print_path(ahc, scb); printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", - ahc_get_transfer_length(scb), scb->sg_count); + aic_get_transfer_length(scb), scb->sg_count); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printf("sg[%d] - Addr 0x%x%x : Length %d\n", i, - (ahc_le32toh(scb->sg_list[i].len) >> 24 + (aic_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), - ahc_le32toh(scb->sg_list[i].addr), - ahc_le32toh(scb->sg_list[i].len) + aic_le32toh(scb->sg_list[i].addr), + aic_le32toh(scb->sg_list[i].len) & AHC_SG_LEN_MASK); } } /* * Set this and it will take effect when the * target does a command complete. */ ahc_freeze_devq(ahc, scb); if ((scb->flags & SCB_SENSE) == 0) { - ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); + aic_set_transaction_status(scb, CAM_DATA_RUN_ERR); } else { scb->flags &= ~SCB_SENSE; - ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); + aic_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); } - ahc_freeze_scb(scb); + aic_freeze_scb(scb); if ((ahc->features & AHC_ULTRA2) != 0) { /* * Clear the channel in case we return * to data phase later. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN); } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; /* Ensure HHADDR is 0 for future DMA operations. */ dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, 0); ahc_outb(ahc, DSCOMMAND1, dscommand1); } break; } case MKMSG_FAILED: { u_int scbindex; printf("%s:%c:%d:%d: Attempt to issue message failed\n", ahc_name(ahc), devinfo.channel, devinfo.target, devinfo.lun); scbindex = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbindex); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), scb->hscb->tag, ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); break; } case NO_FREE_SCB: { printf("%s: No free or disconnected SCBs\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("for safety"); break; } case SCB_MISMATCH: { u_int scbptr; scbptr = ahc_inb(ahc, SCBPTR); printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n", scbptr, ahc_inb(ahc, ARG_1), ahc->scb_data->hscbs[scbptr].tag); ahc_dump_card_state(ahc); panic("for saftey"); break; } case OUT_OF_RANGE: { printf("%s: BTT calculation out of range\n", ahc_name(ahc)); printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "ARG_1 == 0x%x ACCUM = 0x%x\n", ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM)); printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n, A == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR), ahc_index_busy_tcl(ahc, BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN))), ahc_inb(ahc, SINDEX), ahc_inb(ahc, ACCUM)); printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n", ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG), ahc_inb(ahc, SCB_CONTROL)); printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI)); ahc_dump_card_state(ahc); panic("for safety"); break; } default: printf("ahc_intr: seqint, " "intstat == 0x%x, scsisigi = 0x%x\n", intstat, ahc_inb(ahc, SCSISIGI)); break; } unpause: /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahc_unpause(ahc); } void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) { u_int scb_index; u_int status0; u_int status; struct scb *scb; char cur_channel; char intr_channel; if ((ahc->features & AHC_TWIN) != 0 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) cur_channel = 'B'; else cur_channel = 'A'; intr_channel = cur_channel; if ((ahc->features & AHC_ULTRA2) != 0) status0 = ahc_inb(ahc, SSTAT0) & IOERR; else status0 = 0; status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); if (status == 0 && status0 == 0) { if ((ahc->features & AHC_TWIN) != 0) { /* Try the other channel */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); intr_channel = (cur_channel == 'A') ? 'B' : 'A'; } if (status == 0) { printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); return; } } /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb != NULL && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((ahc->features & AHC_ULTRA2) != 0 && (status0 & IOERR) != 0) { int now_lvd; now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40; printf("%s: Transceiver State Has Changed to %s mode\n", ahc_name(ahc), now_lvd ? "LVD" : "SE"); ahc_outb(ahc, CLRSINT0, CLRIOERR); /* * When transitioning to SE mode, the reset line * glitches, triggering an arbitration bug in some * Ultra2 controllers. This bug is cleared when we * assert the reset line. Since a reset glitch has * already occurred with this transition and a * transceiver state change is handled just like * a bus reset anyway, asserting the reset line * ourselves is safe. */ ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/now_lvd == 0); } else if ((status & SCSIRSTI) != 0) { printf("%s: Someone reset channel %c\n", ahc_name(ahc), intr_channel); if (intr_channel != cur_channel) ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* * Determine the bus phase and queue an appropriate message. * SCSIPERR is latched true as soon as a parity error * occurs. If the sequencer acked the transfer that * caused the parity error and the currently presented * transfer on the bus has correct parity, SCSIPERR will * be cleared by CLRSCSIPERR. Use this to determine if * we should look at the last phase the sequencer recorded, * or the current phase presented on the bus. */ struct ahc_devinfo devinfo; u_int mesg_out; u_int curphase; u_int errorphase; u_int lastphase; u_int scsirate; u_int i; u_int sstat2; int silent; lastphase = ahc_inb(ahc, LASTPHASE); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; sstat2 = ahc_inb(ahc, SSTAT2); ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); /* * For all phases save DATA, the sequencer won't * automatically ack a byte that has a parity error * in it. So the only way that the current phase * could be 'data-in' is if the parity error is for * an already acked byte in the data phase. During * synchronous data-in transfers, we may actually * ack bytes before latching the current phase in * LASTPHASE, leading to the discrepancy between * curphase and lastphase. */ if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 || curphase == P_DATAIN || curphase == P_DATAIN_DT) errorphase = curphase; else errorphase = lastphase; for (i = 0; i < num_phases; i++) { if (errorphase == ahc_phase_table[i].phase) break; } mesg_out = ahc_phase_table[i].mesg_out; silent = FALSE; if (scb != NULL) { if (SCB_IS_SILENT(scb)) silent = TRUE; else ahc_print_path(ahc, scb); scb->flags |= SCB_TRANSMISSION_ERROR; } else printf("%s:%c:%d: ", ahc_name(ahc), intr_channel, SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); scsirate = ahc_inb(ahc, SCSIRATE); if (silent == FALSE) { printf("parity error detected %s. " "SEQADDR(0x%x) SCSIRATE(0x%x)\n", ahc_phase_table[i].phasemsg, ahc_inw(ahc, SEQADDR0), scsirate); if ((ahc->features & AHC_DT) != 0) { if ((sstat2 & CRCVALERR) != 0) printf("\tCRC Value Mismatch\n"); if ((sstat2 & CRCENDERR) != 0) printf("\tNo terminal CRC packet " "recevied\n"); if ((sstat2 & CRCREQERR) != 0) printf("\tIllegal CRC packet " "request\n"); if ((sstat2 & DUAL_EDGE_ERR) != 0) printf("\tUnexpected %sDT Data Phase\n", (scsirate & SINGLE_EDGE) ? "" : "non-"); } } if ((ahc->features & AHC_DT) != 0 && (sstat2 & DUAL_EDGE_ERR) != 0) { /* * This error applies regardless of * data direction, so ignore the value * in the phase table. */ mesg_out = MSG_INITIATOR_DET_ERR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ if (mesg_out != MSG_NOOP) { if (ahc->msg_type != MSG_TYPE_NONE) ahc->send_msg_perror = TRUE; else ahc_outb(ahc, MSG_OUT, mesg_out); } /* * Force a renegotiation with this target just in * case we are out of sync for some external reason * unknown (or unreported) by the target. */ ahc_fetch_devinfo(ahc, &devinfo); ahc_force_renegotiation(ahc, &devinfo); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_unpause(ahc); } else if ((status & SELTO) != 0) { u_int scbptr; /* Stop the selection */ ahc_outb(ahc, SCSISEQ, 0); /* No more pending messages */ ahc_clear_msg_state(ahc); /* Clear interrupt state */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a sucessfull * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahc_outb(ahc, CLRSINT0, CLRSELINGO); scbptr = ahc_inb(ahc, WAITING_SCBH); ahc_outb(ahc, SCBPTR, scbptr); scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("%s: ahc_intr - referenced scb not " "valid during SELTO scb(%d, %d)\n", ahc_name(ahc), scbptr, scb_index); ahc_dump_card_state(ahc); } else { struct ahc_devinfo devinfo; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_SELTO) != 0) { ahc_print_path(ahc, scb); printf("Saw Selection Timeout for SCB 0x%x\n", scb_index); } #endif /* * Force a renegotiation with this target just in * case the cable was pulled and will later be * re-attached. The target may forget its negotiation * settings with us should it attempt to reselect * during the interruption. The target will not issue * a unit attention in this case, so we must always * renegotiate. */ ahc_scb_devinfo(ahc, &devinfo, scb); ahc_force_renegotiation(ahc, &devinfo); - ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); + aic_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_freeze_devq(ahc, scb); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else if ((status & BUSFREE) != 0 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { struct ahc_devinfo devinfo; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; char channel; int printerror; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); /* * Disable busfree interrupts and clear the busfree * interrupt status. We do this here so that several * bus transactions occur prior to clearing the SCSIINT * latch. It can take a bit for the clearing to take effect. */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); /* * Look at what phase we were last in. * If its message out, chances are pretty good * that the busfree was in response to one of * our abort requests. */ lastphase = ahc_inb(ahc, LASTPHASE); saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); saved_lun = ahc_inb(ahc, SAVED_LUN); target = SCSIID_TARGET(ahc, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); channel = SCSIID_CHANNEL(ahc, saved_scsiid); ahc_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, channel, ROLE_INITIATOR); printerror = 1; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE) || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) { if (ahc->msgout_buf[ahc->msgout_index - 1] == MSG_ABORT_TAG) tag = scb->hscb->tag; ahc_print_path(ahc, scb); printf("SCB %d - Abort%s Completed.\n", scb->hscb->tag, tag == SCB_LIST_NULL ? "" : " Tag"); ahc_abort_scbs(ahc, target, channel, saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahc_match_scb(ahc, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) { - ahc_set_transaction_status(scb, CAM_REQ_CMP); + aic_set_transaction_status(scb, CAM_REQ_CMP); } #endif ahc_compile_devinfo(&devinfo, initiator_role_id, target, CAM_LUN_WILDCARD, channel, ROLE_INITIATOR); ahc_handle_devreset(ahc, &devinfo, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, FALSE)) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; /* * PPR Rejected. Try non-ppr negotiation * and retry command. */ tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, FALSE)) { /* * Negotiation Rejected. Go-narrow and * retry command. */ ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, FALSE)) { /* * Negotiation Rejected. Go-async and * retry command. */ ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_qinfifo_requeue_tail(ahc, scb); printerror = 0; } } if (printerror != 0) { u_int i; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = scb->hscb->tag; else tag = SCB_LIST_NULL; ahc_print_path(ahc, scb); ahc_abort_scbs(ahc, target, channel, SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printf("%s: ", ahc_name(ahc)); } for (i = 0; i < num_phases; i++) { if (lastphase == ahc_phase_table[i].phase) break; } if (lastphase != P_BUSFREE) { /* * Renegotiate with this device at the * next oportunity just in case this busfree * is due to a negotiation mismatch with the * device. */ ahc_force_renegotiation(ahc, &devinfo); } printf("Unexpected busfree %s\n" "SEQADDR == 0x%x\n", ahc_phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); } ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_restart(ahc); } else { printf("%s: Missing case in ahc_handle_scsiint. status = %x\n", ahc_name(ahc), status); ahc_outb(ahc, CLRINT, CLRSCSIINT); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; targ_info = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahc_update_neg_request(ahc, devinfo, tstate, targ_info, AHC_NEG_IF_NON_ASYNC); } #define AHC_MAX_STEPS 2000 void ahc_clear_critical_section(struct ahc_softc *ahc) { int stepping; int steps; u_int simode0; u_int simode1; if (ahc->num_critical_sections == 0) return; stepping = FALSE; steps = 0; simode0 = 0; simode1 = 0; for (;;) { struct cs *cs; u_int seqaddr; u_int i; seqaddr = ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8); /* * Seqaddr represents the next instruction to execute, * so we are really executing the instruction just * before it. */ if (seqaddr != 0) seqaddr -= 1; cs = ahc->critical_sections; for (i = 0; i < ahc->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahc->num_critical_sections) break; if (steps > AHC_MAX_STEPS) { printf("%s: Infinite loop in critical section\n", ahc_name(ahc)); ahc_dump_card_state(ahc); panic("critical section loop"); } steps++; if (stepping == FALSE) { /* * Disable all interrupt sources so that the * sequencer will not be stuck by a pausing * interrupt condition while we attempt to * leave a critical section. */ simode0 = ahc_inb(ahc, SIMODE0); ahc_outb(ahc, SIMODE0, 0); simode1 = ahc_inb(ahc, SIMODE1); if ((ahc->features & AHC_DT) != 0) /* * On DT class controllers, we * use the enhanced busfree logic. * Unfortunately we cannot re-enable * busfree detection within the * current connection, so we must * leave it on while single stepping. */ ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE); else ahc_outb(ahc, SIMODE1, 0); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP); stepping = TRUE; } if ((ahc->features & AHC_DT) != 0) { ahc_outb(ahc, CLRSINT1, CLRBUSFREE); ahc_outb(ahc, CLRINT, CLRSCSIINT); } ahc_outb(ahc, HCNTRL, ahc->unpause); while (!ahc_is_paused(ahc)) - ahc_delay(200); + aic_delay(200); } if (stepping) { ahc_outb(ahc, SIMODE0, simode0); ahc_outb(ahc, SIMODE1, simode1); ahc_outb(ahc, SEQCTL, ahc->seqctl); } } /* * Clear any pending interrupt status. */ void ahc_clear_intstat(struct ahc_softc *ahc) { /* Clear any interrupt conditions this may have caused */ ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| CLRREQINIT); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); ahc_flush_device_writes(ahc); ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_flush_device_writes(ahc); } /**************************** Debugging Routines ******************************/ #ifdef AHC_DEBUG uint32_t ahc_debug = AHC_DEBUG_OPTS; #endif void ahc_print_scb(struct scb *scb) { int i; struct hardware_scb *hscb = scb->hscb; printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printf("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.cdb); i++) printf("%#02x", hscb->shared_data.cdb[i]); printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", - ahc_le32toh(hscb->dataptr), - ahc_le32toh(hscb->datacnt), - ahc_le32toh(hscb->sgptr), + aic_le32toh(hscb->dataptr), + aic_le32toh(hscb->datacnt), + aic_le32toh(hscb->sgptr), hscb->tag); if (scb->sg_count > 0) { for (i = 0; i < scb->sg_count; i++) { printf("sg[%d] - Addr 0x%x%x : Length %d\n", i, - (ahc_le32toh(scb->sg_list[i].len) >> 24 + (aic_le32toh(scb->sg_list[i].len) >> 24 & SG_HIGH_ADDR_BITS), - ahc_le32toh(scb->sg_list[i].addr), - ahc_le32toh(scb->sg_list[i].len)); + aic_le32toh(scb->sg_list[i].addr), + aic_le32toh(scb->sg_list[i].len)); } } } /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahc_tmode_tstate * ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) { struct ahc_tmode_tstate *master_tstate; struct ahc_tmode_tstate *tstate; int i; master_tstate = ahc->enabled_targets[ahc->our_id]; if (channel == 'B') { scsi_id += 8; master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; } if (ahc->enabled_targets[scsi_id] != NULL && ahc->enabled_targets[scsi_id] != master_tstate) panic("%s: ahc_alloc_tstate - Target already allocated", ahc_name(ahc)); tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); tstate->ultraenb = 0; for (i = 0; i < AHC_NUM_TARGETS; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahc->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHC_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) { struct ahc_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (((channel == 'B' && scsi_id == ahc->our_id_b) || (channel == 'A' && scsi_id == ahc->our_id)) && force == FALSE) return; if (channel == 'B') scsi_id += 8; tstate = ahc->enabled_targets[scsi_id]; if (tstate != NULL) free(tstate, M_DEVBUF); ahc->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest syncrate to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ struct ahc_syncrate * ahc_devlimited_syncrate(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahc_transinfo *transinfo; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) { if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHC_SYNCRATE_DT; } else { maxsync = AHC_SYNCRATE_ULTRA; /* Can't do DT on an SE bus */ *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } } else if ((ahc->features & AHC_ULTRA) != 0) { maxsync = AHC_SYNCRATE_ULTRA; } else { maxsync = AHC_SYNCRATE_FAST; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= transinfo->ppr_options; if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = MAX(maxsync, AHC_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; return (NULL); } *period = MAX(*period, transinfo->period); return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ struct ahc_syncrate * ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync) { struct ahc_syncrate *syncrate; if ((ahc->features & AHC_DT) == 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && maxsync < AHC_SYNCRATE_ULTRA2) maxsync = AHC_SYNCRATE_ULTRA2; for (syncrate = &ahc_syncrates[maxsync]; syncrate->rate != NULL; syncrate++) { /* * The Ultra2 table doesn't go as low * as for the Fast/Ultra cards. */ if ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0)) break; if (*period <= syncrate->period) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (syncrate == &ahc_syncrates[maxsync]) *period = syncrate->period; /* * At some speeds, we only support * ST transfers. */ if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } } if ((*period == 0) || (syncrate->rate == NULL) || ((ahc->features & AHC_ULTRA2) != 0 && (syncrate->sxfr_u2 == 0))) { /* Use asynchronous transfers. */ *period = 0; syncrate = NULL; *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } return (syncrate); } /* * Convert from an entry in our syncrate table to the SCSI equivalent * sync "period" factor. */ u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) { struct ahc_syncrate *syncrate; if ((ahc->features & AHC_ULTRA2) != 0) scsirate &= SXFR_ULTRA2; else scsirate &= SXFR; syncrate = &ahc_syncrates[maxsync]; while (syncrate->rate != NULL) { if ((ahc->features & AHC_ULTRA2) != 0) { if (syncrate->sxfr_u2 == 0) break; else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) return (syncrate->period); } else if (scsirate == (syncrate->sxfr & SXFR)) { return (syncrate->period); } syncrate++; } return (0); /* async */ } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (syncrate == NULL) { maxoffset = 0; } else if ((ahc->features & AHC_ULTRA2) != 0) { maxoffset = MAX_OFFSET_ULTRA2; } else { if (wide) maxoffset = MAX_OFFSET_16BIT; else maxoffset = MAX_OFFSET_8BIT; } *offset = MIN(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = MIN(*offset, tinfo->user.offset); else *offset = MIN(*offset, tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahc->features & AHC_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = MIN(tinfo->user.width, *bus_width); else *bus_width = MIN(tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient oportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_tmode_tstate *tstate, struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHC_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahc->features & AHC_WIDE) != 0) tinfo->curr.width = AHC_WIDTH_UNKNOWN; tinfo->curr.period = AHC_PERIOD_UNKNOWN; tinfo->curr.offset = AHC_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHC_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; if (syncrate == NULL) { period = 0; offset = 0; } tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHC_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHC_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; if ((ahc->features & AHC_ULTRA2) != 0) { scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); if (syncrate != NULL) { scsirate |= syncrate->sxfr_u2; if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) scsirate |= ENABLE_CRC; else scsirate |= SINGLE_EDGE; } } else { scsirate &= ~(SXFR|SOFS); /* * Ensure Ultra mode is set properly for * this target. */ tstate->ultraenb &= ~devinfo->target_mask; if (syncrate != NULL) { if (syncrate->sxfr & ULTRA_SXFR) { tstate->ultraenb |= devinfo->target_mask; } scsirate |= syncrate->sxfr & SXFR; scsirate |= offset & SOFS; } if (active) { u_int sxfrctl0; sxfrctl0 = ahc_inb(ahc, SXFRCTL0); sxfrctl0 &= ~FAST20; if (tstate->ultraenb & devinfo->target_mask) sxfrctl0 |= FAST20; ahc_outb(ahc, SXFRCTL0, sxfrctl0); } } if (active) { ahc_outb(ahc, SCSIRATE, scsirate); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIOFFSET, offset); } tinfo->scsirate = scsirate; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { if (offset != 0) { printf("%s: target %d synchronous at %sMHz%s, " "offset = 0x%x\n", ahc_name(ahc), devinfo->target, syncrate->rate, (ppr_options & MSG_EXT_PPR_DT_REQ) ? " DT" : "", offset); } else { printf("%s: target %d using " "asynchronous transfers\n", ahc_name(ahc), devinfo->target); } } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; update_needed = 0; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHC_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHC_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { u_int scsirate; update_needed++; scsirate = tinfo->scsirate; scsirate &= ~WIDEXFER; if (width == MSG_EXT_WDTR_BUS_16_BIT) scsirate |= WIDEXFER; tinfo->scsirate = scsirate; if (active) ahc_outb(ahc, SCSIRATE, scsirate); tinfo->curr.width = width; ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL); if (bootverbose) { printf("%s: target %d using %dbit transfers\n", ahc_name(ahc), devinfo->target, 8 * (0x01 << width)); } } update_needed += ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_TO_GOAL); if (update_needed) ahc_update_pending_scbs(ahc); } /* * Update the current state of tagged queuing for a given target. */ void ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg) { ahc_platform_set_tags(ahc, devinfo, alg); ahc_send_async(ahc, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG, &alg); } /* * When the transfer settings for a connection change, update any * in-transit SCBs to contain the new data so the hardware will * be set correctly during future (re)selections. */ static void ahc_update_pending_scbs(struct ahc_softc *ahc) { struct scb *pending_scb; int pending_scb_count; int i; int paused; u_int saved_scbptr; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { struct ahc_devinfo devinfo; struct hardware_scb *pending_hscb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; ahc_scb_devinfo(ahc, &devinfo, pending_scb); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); pending_hscb = pending_scb->hscb; pending_hscb->control &= ~ULTRAENB; if ((tstate->ultraenb & devinfo.target_mask) != 0) pending_hscb->control |= ULTRAENB; pending_hscb->scsirate = tinfo->scsirate; pending_hscb->scsioffset = tinfo->curr.offset; if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_hscb->control &= ~MK_MESSAGE; } ahc_sync_scb(ahc, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); /* Ensure that the hscbs down on the card match the new information */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { struct hardware_scb *pending_hscb; u_int control; u_int scb_tag; ahc_outb(ahc, SCBPTR, i); scb_tag = ahc_inb(ahc, SCB_TAG); pending_scb = ahc_lookup_scb(ahc, scb_tag); if (pending_scb == NULL) continue; pending_hscb = pending_scb->hscb; control = ahc_inb(ahc, SCB_CONTROL); control &= ~(ULTRAENB|MK_MESSAGE); control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE); ahc_outb(ahc, SCB_CONTROL, control); ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); } ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } /**************************** Pathing Information *****************************/ static void ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int saved_scsiid; role_t role; int our_id; if (ahc_inb(ahc, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahc->features & AHC_MULTI_TID) != 0 && (ahc_inb(ahc, SEQ_FLAGS) & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahc_inb(ahc, TARGIDIN) & OID; } else if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); ahc_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahc, saved_scsiid), ahc_inb(ahc, SAVED_LUN), SCSIID_CHANNEL(ahc, saved_scsiid), role); } struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase) { struct ahc_phase_table_entry *entry; struct ahc_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahc_phase_table[num_phases]; for (entry = ahc_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } static void ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->flags & SCB_TARGET_SCB) != 0) role = ROLE_TARGET; ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); } /************************ Message Phase Processing ****************************/ static void ahc_assert_atn(struct ahc_softc *ahc) { u_int scsisigo; scsisigo = ATNO; if ((ahc->features & AHC_DT) == 0) scsisigo |= ahc_inb(ahc, SCSISIGI); ahc_outb(ahc, SCSISIGO, scsisigo); } /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if ((scb->flags & SCB_DEVICE_RESET) == 0 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahc->msgout_buf[ahc->msgout_index++] = identify_msg; ahc->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; ahc->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; ahc->msgout_len++; ahc_print_path(ahc, scb); printf("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; else ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; ahc->msgout_len++; ahc_print_path(ahc, scb); printf("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahc_build_transfer_msg(ahc, devinfo); } else { printf("ahc_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " "SCB flags = %x", scb->hscb->tag, scb->hscb->control, ahc_inb(ahc, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; struct ahc_syncrate *rate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; rate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahc->features & AHC_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahc_print_devinfo(ahc, devinfo); printf("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahc_validate_offset(ahc, tinfo, rate, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahc_construct_ppr(ahc, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahc_construct_sdtr(ahc, devinfo, period, offset); } } else { ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; ahc->msgout_buf[ahc->msgout_index++] = period; ahc->msgout_buf[ahc->msgout_index++] = offset; ahc->msgout_len += 5; if (bootverbose) { printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiation message in our message * buffer based on the input parameters. */ static void ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int bus_width) { ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; ahc->msgout_buf[ahc->msgout_index++] = bus_width; ahc->msgout_len += 4; if (bootverbose) { printf("(%s:%c:%d:%d): Sending WDTR %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { if (offset == 0) period = AHC_ASYNC_XFER_PERIOD; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; ahc->msgout_buf[ahc->msgout_index++] = period; ahc->msgout_buf[ahc->msgout_index++] = 0; ahc->msgout_buf[ahc->msgout_index++] = offset; ahc->msgout_buf[ahc->msgout_index++] = bus_width; ahc->msgout_buf[ahc->msgout_index++] = ppr_options; ahc->msgout_len += 8; if (bootverbose) { printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahc_clear_msg_state(struct ahc_softc *ahc) { ahc->msgout_len = 0; ahc->msgin_index = 0; ahc->msg_type = MSG_TYPE_NONE; if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahc_outb(ahc, CLRSINT1, CLRATNO); } ahc_outb(ahc, MSG_OUT, MSG_NOOP); ahc_outb(ahc, SEQ_FLAGS2, ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); } static void ahc_handle_proto_violation(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahc_fetch_devinfo(ahc, &devinfo); scbid = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scbid); seq_flags = ahc_inb(ahc, SEQ_FLAGS); curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; lastphase = ahc_inb(ahc, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahc_print_devinfo(ahc, &devinfo); printf("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahc_print_devinfo(ahc, &devinfo); printf("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { - ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL); + aic_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahc_print_path(ahc, scb); printf("No or incomplete CDB sent to device.\n"); } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahc_print_path(ahc, scb); printf("Completed command without status.\n"); } else { ahc_print_path(ahc, scb); printf("Unknown protocol violation.\n"); ahc_dump_card_state(ahc); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data/command * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahc_reset_channel(ahc, 'A', TRUE); printf("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahc_name(ahc), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_assert_atn(ahc); ahc_outb(ahc, MSG_OUT, HOST_MSG); if (scb == NULL) { ahc_print_devinfo(ahc, &devinfo); ahc->msgout_buf[0] = MSG_ABORT_TASK; ahc->msgout_len = 1; ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahc_print_path(ahc, scb); scb->flags |= SCB_ABORT; } printf("Protocol violation %s. Attempting to abort.\n", ahc_lookup_phase_entry(curphase)->phasemsg); } } /* * Manual message loop handler. */ static void ahc_handle_message_phase(struct ahc_softc *ahc) { struct ahc_devinfo devinfo; u_int bus_phase; int end_session; ahc_fetch_devinfo(ahc, &devinfo); end_session = FALSE; bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; reswitch: switch (ahc->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahc->msgout_len == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc_outb(ahc, CLRSINT1, CLRATNO); ahc->send_msg_perror = FALSE; ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahc->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahc->send_msg_perror) { ahc_outb(ahc, CLRSINT1, CLRATNO); ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->send_msg_perror); #endif ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahc->msgout_index = 0; ahc_assert_atn(ahc); } lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahc_outb(ahc, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgout_buf[ahc->msgout_index]); #endif ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { printf(" PHASEMIS %s\n", ahc_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahc->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahc->send_msg_perror == TRUE || (ahc->msgout_len != 0 && ahc->msgout_index == 0))) { ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) printf(" byte 0x%x\n", ahc->msgin_buf[ahc->msgin_index]); #endif message_done = ahc_parse_msg(ahc, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahc->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahc->msgout_len != 0) { #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) { ahc_print_devinfo(ahc, &devinfo); printf("Asserting ATN for response\n"); } #endif ahc_assert_atn(ahc); } } else ahc->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahc_outb(ahc, CLRSINT1, CLRREQINIT); ahc_inb(ahc, SCSIDATL); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; if (ahc->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 && ahc->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); ahc->msgin_index = 0; /* Dummy read to REQ for first byte */ ahc_inb(ahc, SCSIDATL); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); break; } msgdone = ahc->msgout_index == ahc->msgout_len; if (msgdone) { ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); msgdone = ahc_parse_msg(ahc, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahc->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahc->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahc->msgout_len != 0) { ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); ahc->msg_type = MSG_TYPE_TARGET_MSGIN; ahc->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { ahc_clear_msg_state(ahc); ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); } else ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahc->msgout_len) { if (ahc->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahc->msgout_buf[index + 1]; if (ahc->msgout_buf[index+2] == msgval && type == AHCMSG_EXT) { if (full) { if (ahc->msgout_index > end_index) found = TRUE; } else if (ahc->msgout_index > index) found = TRUE; } index = end_index; } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHCMSG_1B && ahc->msgout_buf[index] == msgval && ahc->msgout_index > index) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int reject; int done; int response; u_int targ_scsirate; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); targ_scsirate = tinfo->scsirate; /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahc->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahc->msgin_index < 2) break; switch (ahc->msgin_buf[2]) { case MSG_EXT_SDTR: { struct ahc_syncrate *syncrate; u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahc->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahc->msgin_buf[4]; syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, targ_scsirate & WIDEXFER, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, ahc->msgin_buf[3], saved_offset, period, offset); } ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_sdtr(ahc, devinfo, period, offset); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahc->msgin_buf[3]; saved_width = bus_width; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); if (bootverbose) { printf("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printf("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printf("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_wdtr(ahc, devinfo, bus_width); ahc->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahc_update_neg_request(ahc, devinfo, tstate, tinfo, AHC_NEG_ALWAYS); ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { struct ahc_syncrate *syncrate; u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahc->msgin_buf[3]; offset = ahc->msgin_buf[5]; bus_width = ahc->msgin_buf[6]; saved_width = bus_width; ppr_options = ahc->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period == 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Mask out any options we don't support * on any controller. Transfer options are * only available if we are negotiating wide. */ ppr_options &= MSG_EXT_PPR_DT_REQ; if (bus_width == 0) ppr_options = 0; ahc_validate_width(ahc, tinfo, &bus_width, devinfo->role); syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period, &ppr_options, devinfo->role); ahc_validate_offset(ahc, tinfo, syncrate, &offset, bus_width, devinfo->role); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; syncrate = NULL; } } else { if (devinfo->role != ROLE_TARGET) printf("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); else printf("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_construct_ppr(ahc, devinfo, period, offset, bus_width, ppr_options); ahc->msgout_index = 0; response = TRUE; } if (bootverbose) { printf("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahc->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahc_set_width(ahc, devinfo, bus_width, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, syncrate, period, offset, ppr_options, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHC_TARGET_MODE case MSG_BUS_DEV_RESET: ahc_handle_devreset(ahc, devinfo, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahc->msgin_buf[0] == MSG_ABORT_TAG) tag = ahc_inb(ahc, INITIATOR_TAG); ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, ahc->msgin_buf[0], /*arg*/tag); ahc_send_lstate_events(ahc, lstate); } } ahc_restart(ahc); done = MSGLOOP_TERMINATED; break; } #endif case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahc->msgout_index = 0; ahc->msgout_len = 1; ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahc->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahc_inb(ahc, LAST_MSG); if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printf("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahc->msgout_index = 0; ahc->msgout_len = 0; ahc_build_transfer_msg(ahc, devinfo); ahc->msgout_index = 0; response = 1; } } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, /*paused*/TRUE); printf("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printf("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun); ahc_set_tags(ahc, devinfo, AHC_QUEUE_NONE); mask = ~0x23; } else { printf("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahc_name(ahc), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahc_set_tags(ahc, devinfo, AHC_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & mask); scb->hscb->control &= mask; - ahc_set_transaction_tag(scb, /*enabled*/FALSE, + aic_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); ahc_assert_atn(ahc); /* * This transaction is now at the head of * the untagged queue for this target. */ if ((ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; untagged_q = &(ahc->untagged_queues[devinfo->target_offset]); TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; } ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), scb->hscb->tag); /* * Requeue all tagged commands for this target * currently in our posession so they can be * converted to untagged commands. */ ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), SCB_GET_CHANNEL(ahc, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else { /* * Otherwise, we ignore it. */ printf("%s:%c:%d: Message reject for %x -- ignored\n", ahc_name(ahc), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 - || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { + || aic_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { struct ahc_dma_seg *sg; uint32_t data_cnt; uint32_t data_addr; uint32_t sglen; /* Pull in all of the sgptr */ sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR); data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHC_SG_LEN_MASK; } data_addr = ahc_inl(ahc, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; - sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; + sglen = aic_le32toh(sg->len) & AHC_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHC_SG_LEN_MASK)) { sg--; - sglen = ahc_le32toh(sg->len); + sglen = aic_le32toh(sg->len); /* * Preserve High Address and SG_LIST bits * while setting the count to 1. */ data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK)); - data_addr = ahc_le32toh(sg->addr) + data_addr = aic_le32toh(sg->addr) + (sglen & AHC_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahc_sg_virt_to_bus(scb, sg); } ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr); ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt); /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahc_outb(ahc, SCB_LUN, ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD); } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc) { struct scb *scb; struct ahc_dma_seg *sg; u_int scb_index; uint32_t sgptr; uint32_t resid; uint32_t dataptr; scb_index = ahc_inb(ahc, SCB_TAG); scb = ahc_lookup_scb(ahc, scb_index); sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; sg = ahc_sg_bus_to_virt(scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8) | ahc_inb(ahc, SCB_RESIDUAL_DATACNT); - dataptr = ahc_le32toh(sg->addr) - + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK) + dataptr = aic_le32toh(sg->addr) + + (aic_le32toh(sg->len) & AHC_SG_LEN_MASK) - resid; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { u_int dscommand1; dscommand1 = ahc_inb(ahc, DSCOMMAND1); ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0); ahc_outb(ahc, HADDR, - (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); + (aic_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS); ahc_outb(ahc, DSCOMMAND1, dscommand1); } ahc_outb(ahc, HADDR + 3, dataptr >> 24); ahc_outb(ahc, HADDR + 2, dataptr >> 16); ahc_outb(ahc, HADDR + 1, dataptr >> 8); ahc_outb(ahc, HADDR, dataptr); ahc_outb(ahc, HCNT + 2, resid >> 16); ahc_outb(ahc, HCNT + 1, resid >> 8); ahc_outb(ahc, HCNT, resid); if ((ahc->features & AHC_ULTRA2) == 0) { ahc_outb(ahc, STCNT + 2, resid >> 16); ahc_outb(ahc, STCNT + 1, resid >> 8); ahc_outb(ahc, STCNT, resid); } } /* * Handle the effects of issuing a bus device reset message. */ static void ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, cam_status status, char *message, int verbose_level) { #ifdef AHC_TARGET_MODE struct ahc_tmode_tstate* tstate; u_int lun; #endif int found; found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, status); #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahc->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); ahc_send_async(ahc, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); if (message != NULL && (verbose_level <= bootverbose)) printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), message, devinfo->channel, devinfo->target, found); } #ifdef AHC_TARGET_MODE static void ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahc->msgout_index = 0; ahc->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahc_build_transfer_msg(ahc, devinfo); else panic("ahc_intr: AWAITING target message with no message"); ahc->msgout_index = 0; ahc->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahc_softc * ahc_alloc(void *platform_arg, char *name) { struct ahc_softc *ahc; int i; #ifndef __FreeBSD__ ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); if (!ahc) { printf("aic7xxx: cannot malloc softc!\n"); free(name, M_DEVBUF); return NULL; } #else ahc = device_get_softc((device_t)platform_arg); #endif memset(ahc, 0, sizeof(*ahc)); ahc->seep_config = malloc(sizeof(*ahc->seep_config), M_DEVBUF, M_NOWAIT); if (ahc->seep_config == NULL) { #ifndef __FreeBSD__ free(ahc, M_DEVBUF); #endif free(name, M_DEVBUF); return (NULL); } LIST_INIT(&ahc->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahc->name = name; ahc->unit = -1; ahc->description = NULL; ahc->channel = 'A'; ahc->channel_b = 'B'; ahc->chip = AHC_NONE; ahc->features = AHC_FENONE; ahc->bugs = AHC_BUGNONE; ahc->flags = AHC_FNONE; /* * Default to all error reporting enabled with the * sequencer operating at its fastest speed. * The bus attach code may modify this. */ ahc->seqctl = FASTMODE; for (i = 0; i < AHC_NUM_TARGETS; i++) TAILQ_INIT(&ahc->untagged_queues[i]); if (ahc_platform_alloc(ahc, platform_arg) != 0) { ahc_free(ahc); ahc = NULL; } return (ahc); } int ahc_softc_init(struct ahc_softc *ahc) { /* The IRQMS bit is only valid on VL and EISA chips */ if ((ahc->chip & AHC_PCI) == 0) ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS; else ahc->unpause = 0; ahc->pause = ahc->unpause | PAUSE; /* XXX The shared scb data stuff should be deprecated */ if (ahc->scb_data == NULL) { ahc->scb_data = malloc(sizeof(*ahc->scb_data), M_DEVBUF, M_NOWAIT); if (ahc->scb_data == NULL) return (ENOMEM); memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); } return (0); } void ahc_softc_insert(struct ahc_softc *ahc) { struct ahc_softc *list_ahc; -#if AHC_PCI_CONFIG > 0 +#if AIC_PCI_CONFIG > 0 /* * Second Function PCI devices need to inherit some * settings from function 0. */ if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI && (ahc->features & AHC_MULTI_FUNC) != 0) { TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { - ahc_dev_softc_t list_pci; - ahc_dev_softc_t pci; + aic_dev_softc_t list_pci; + aic_dev_softc_t pci; list_pci = list_ahc->dev_softc; pci = ahc->dev_softc; - if (ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) - && ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci)) { + if (aic_get_pci_slot(list_pci) == aic_get_pci_slot(pci) + && aic_get_pci_bus(list_pci) == aic_get_pci_bus(pci)) { struct ahc_softc *master; struct ahc_softc *slave; - if (ahc_get_pci_function(list_pci) == 0) { + if (aic_get_pci_function(list_pci) == 0) { master = list_ahc; slave = ahc; } else { master = ahc; slave = list_ahc; } slave->flags &= ~AHC_BIOS_ENABLED; slave->flags |= master->flags & AHC_BIOS_ENABLED; slave->flags &= ~AHC_PRIMARY_CHANNEL; slave->flags |= master->flags & AHC_PRIMARY_CHANNEL; break; } } } #endif /* * Insertion sort into our list of softcs. */ list_ahc = TAILQ_FIRST(&ahc_tailq); while (list_ahc != NULL && ahc_softc_comp(ahc, list_ahc) <= 0) list_ahc = TAILQ_NEXT(list_ahc, links); if (list_ahc != NULL) TAILQ_INSERT_BEFORE(list_ahc, ahc, links); else TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); ahc->init_level++; } /* * Verify that the passed in softc pointer is for a * controller that is still configured. */ struct ahc_softc * ahc_find_softc(struct ahc_softc *ahc) { struct ahc_softc *list_ahc; TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { if (list_ahc == ahc) return (ahc); } return (NULL); } void ahc_set_unit(struct ahc_softc *ahc, int unit) { ahc->unit = unit; } void ahc_set_name(struct ahc_softc *ahc, char *name) { if (ahc->name != NULL) free(ahc->name, M_DEVBUF); ahc->name = name; } void ahc_free(struct ahc_softc *ahc) { int i; + ahc_terminate_recovery_thread(ahc); switch (ahc->init_level) { default: case 5: ahc_shutdown(ahc); - TAILQ_REMOVE(&ahc_tailq, ahc, links); /* FALLTHROUGH */ case 4: - ahc_dmamap_unload(ahc, ahc->shared_data_dmat, + aic_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 3: - ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, + aic_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); - ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, + aic_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); /* FALLTHROUGH */ case 2: - ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); + aic_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: #ifndef __linux__ - ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); + aic_dma_tag_destroy(ahc, ahc->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ - ahc_dma_tag_destroy(ahc, ahc->parent_dmat); + aic_dma_tag_destroy(ahc, ahc->parent_dmat); #endif ahc_platform_free(ahc); ahc_fini_scbdata(ahc); for (i = 0; i < AHC_NUM_TARGETS; i++) { struct ahc_tmode_tstate *tstate; tstate = ahc->enabled_targets[i]; if (tstate != NULL) { #ifdef AHC_TARGET_MODE int j; for (j = 0; j < AHC_NUM_LUNS; j++) { struct ahc_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); free(lstate, M_DEVBUF); } } #endif free(tstate, M_DEVBUF); } } #ifdef AHC_TARGET_MODE if (ahc->black_hole != NULL) { xpt_free_path(ahc->black_hole->path); free(ahc->black_hole, M_DEVBUF); } #endif if (ahc->name != NULL) free(ahc->name, M_DEVBUF); if (ahc->seep_config != NULL) free(ahc->seep_config, M_DEVBUF); #ifndef __FreeBSD__ free(ahc, M_DEVBUF); #endif return; } void ahc_shutdown(void *arg) { struct ahc_softc *ahc; int i; ahc = (struct ahc_softc *)arg; /* This will reset most registers to 0, but not all */ ahc_reset(ahc, /*reinit*/FALSE); ahc_outb(ahc, SCSISEQ, 0); ahc_outb(ahc, SXFRCTL0, 0); ahc_outb(ahc, DSPCISTATUS, 0); for (i = TARG_SCSIRATE; i < SCSICONF; i++) ahc_outb(ahc, i, 0); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occured after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahc_intr_enable(). */ int ahc_reset(struct ahc_softc *ahc, int reinit) { u_int sblkctl; u_int sxfrctl1_a, sxfrctl1_b; int error; int wait; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahc_pause(ahc); - if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) { - /* - * The chip has not been initialized since - * PCI/EISA/VLB bus reset. Don't trust - * "left over BIOS data". - */ - ahc->flags |= AHC_NO_BIOS_INIT; - } sxfrctl1_b = 0; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { u_int sblkctl; /* * Save channel B's settings in case this chip * is setup for TWIN channel operation. */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { - ahc_delay(1000); + aic_delay(1000); } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printf("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahc_name(ahc)); } ahc_outb(ahc, HCNTRL, ahc->pause); /* Determine channel configuration */ sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); /* No Twin Channel PCI cards */ if ((ahc->chip & AHC_PCI) != 0) sblkctl &= ~SELBUSB; switch (sblkctl) { case 0: /* Single Narrow Channel */ break; case 2: /* Wide Channel */ ahc->features |= AHC_WIDE; break; case 8: /* Twin Channel */ ahc->features |= AHC_TWIN; break; default: printf(" Unsupported adapter type. Ignoring\n"); return(-1); } /* * Reload sxfrctl1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ if ((ahc->features & AHC_TWIN) != 0) { u_int sblkctl; sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); } ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); error = 0; if (reinit != 0) /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ error = ahc->bus_chip_init(ahc); #ifdef AHC_DUMP_SEQ else ahc_dumpseq(ahc); #endif return (error); } /* * Determine the number of SCBs available on the controller */ int ahc_probe_scbs(struct ahc_softc *ahc) { int i; for (i = 0; i < AHC_SCB_MAX; i++) { ahc_outb(ahc, SCBPTR, i); ahc_outb(ahc, SCB_BASE, i); if (ahc_inb(ahc, SCB_BASE) != i) break; ahc_outb(ahc, SCBPTR, 0); if (ahc_inb(ahc, SCB_BASE) != 0) break; } return (i); } static void ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static void ahc_build_free_scb_list(struct ahc_softc *ahc) { int scbsize; int i; scbsize = 32; if ((ahc->flags & AHC_LSCBS_ENABLED) != 0) scbsize = 64; for (i = 0; i < ahc->scb_data->maxhscbs; i++) { int j; ahc_outb(ahc, SCBPTR, i); /* * Touch all SCB bytes to avoid parity errors * should one of our debugging routines read * an otherwise uninitiatlized byte. */ for (j = 0; j < scbsize; j++) ahc_outb(ahc, SCB_BASE+j, 0xFF); /* Clear the control byte. */ ahc_outb(ahc, SCB_CONTROL, 0); /* Set the next pointer */ if ((ahc->flags & AHC_PAGESCBS) != 0) ahc_outb(ahc, SCB_NEXT, i+1); else ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); /* Make the tag number, SCSIID, and lun invalid */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); ahc_outb(ahc, SCB_SCSIID, 0xFF); ahc_outb(ahc, SCB_LUN, 0xFF); } if ((ahc->flags & AHC_PAGESCBS) != 0) { /* SCB 0 heads the free list. */ ahc_outb(ahc, FREE_SCBH, 0); } else { /* No free list. */ ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL); } /* Make sure that the last SCB terminates the free list */ ahc_outb(ahc, SCBPTR, i-1); ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); } static int ahc_init_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; SLIST_INIT(&scb_data->free_scbs); SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ scb_data->scbarray = (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, M_DEVBUF, M_NOWAIT); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahc_probe_scbs(ahc); if (ahc->scb_data->maxhscbs == 0) { printf("%s: No SCB space found\n", ahc_name(ahc)); return (ENXIO); } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ - if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocation for our hscbs */ - if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, + if (aic_dmamem_alloc(ahc, scb_data->hscb_dmat, (void **)&scb_data->hscbs, BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ - ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, + aic_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, scb_data->hscbs, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb), ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our sense buffers */ - if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Allocate them */ - if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, + if (aic_dmamem_alloc(ahc, scb_data->sense_dmat, (void **)&scb_data->sense, BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { goto error_exit; } scb_data->init_level++; /* And permanently map them */ - ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, + aic_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, scb_data->sense, AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data), ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); scb_data->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ - if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, + if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ memset(scb_data->hscbs, 0, AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb)); ahc_alloc_scbs(ahc); if (scb_data->numscbs == 0) { printf("%s: ahc_init_scbdata - " "Unable to allocate initial scbs\n", ahc_name(ahc)); goto error_exit; } /* * Reserve the next queued SCB. */ ahc->next_queued_scb = ahc_get_scb(ahc); /* * Note that we were successfull */ return (0); error_exit: return (ENOMEM); } static void ahc_fini_scbdata(struct ahc_softc *ahc) { struct scb_data *scb_data; scb_data = ahc->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); - ahc_dmamap_unload(ahc, scb_data->sg_dmat, + aic_dmamap_unload(ahc, scb_data->sg_dmat, sg_map->sg_dmamap); - ahc_dmamem_free(ahc, scb_data->sg_dmat, + aic_dmamem_free(ahc, scb_data->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } - ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); + aic_dma_tag_destroy(ahc, scb_data->sg_dmat); } case 6: - ahc_dmamap_unload(ahc, scb_data->sense_dmat, + aic_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 5: - ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, + aic_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); - ahc_dmamap_destroy(ahc, scb_data->sense_dmat, + aic_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); case 4: - ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); + aic_dma_tag_destroy(ahc, scb_data->sense_dmat); case 3: - ahc_dmamap_unload(ahc, scb_data->hscb_dmat, + aic_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 2: - ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, + aic_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); - ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, + aic_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); case 1: - ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); + aic_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; case 0: break; } if (scb_data->scbarray != NULL) free(scb_data->scbarray, M_DEVBUF); } void ahc_alloc_scbs(struct ahc_softc *ahc) { struct scb_data *scb_data; struct scb *next_scb; struct sg_map_node *sg_map; bus_addr_t physaddr; struct ahc_dma_seg *segs; int newcount; int i; scb_data = ahc->scb_data; if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC) /* Can't allocate any more */ return; next_scb = &scb_data->scbarray[scb_data->numscbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of SCBS */ - if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, + if (aic_dmamem_alloc(ahc, scb_data->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); - ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, + aic_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, &sg_map->sg_physaddr, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); newcount = MIN(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; #ifndef __linux__ int error; #endif pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), M_DEVBUF, M_NOWAIT); if (pdata == NULL) break; next_scb->platform_data = pdata; next_scb->sg_map = sg_map; next_scb->sg_list = segs; /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->ahc_softc = ahc; - next_scb->flags = SCB_FREE; + next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ - error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, + error = aic_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) break; #endif next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb->tag = ahc->scb_data->numscbs; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, next_scb, links.sle); segs += AHC_NSEG; physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); next_scb++; ahc->scb_data->numscbs++; } } void ahc_controller_info(struct ahc_softc *ahc, char *buf) { int len; len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); buf += len; if ((ahc->features & AHC_TWIN) != 0) len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " "B SCSI Id=%d, primary %c, ", ahc->our_id, ahc->our_id_b, (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A'); else { const char *speed; const char *type; speed = ""; if ((ahc->features & AHC_ULTRA) != 0) { speed = "Ultra "; } else if ((ahc->features & AHC_DT) != 0) { speed = "Ultra160 "; } else if ((ahc->features & AHC_ULTRA2) != 0) { speed = "Ultra2 "; } if ((ahc->features & AHC_WIDE) != 0) { type = "Wide"; } else { type = "Single"; } len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ", speed, type, ahc->channel, ahc->our_id); } buf += len; if ((ahc->flags & AHC_PAGESCBS) != 0) sprintf(buf, "%d/%d SCBs", ahc->scb_data->maxhscbs, AHC_MAX_QUEUE); else sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); } int ahc_chip_init(struct ahc_softc *ahc) { int term; int error; u_int i; u_int scsi_conf; u_int scsiseq_template; uint32_t physaddr; ahc_outb(ahc, SEQ_FLAGS, 0); ahc_outb(ahc, SEQ_FLAGS2, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ if (ahc->features & AHC_TWIN) { /* * Setup Channel B first. */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; ahc_outb(ahc, SCSIID, ahc->our_id_b); scsi_conf = ahc_inb(ahc, SCSICONF + 1); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime_b|ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* Select Channel A */ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); } term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); else ahc_outb(ahc, SCSIID, ahc->our_id); scsi_conf = ahc_inb(ahc, SCSICONF); ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) |term|ahc->seltime |ENSTIMER|ACTNEGEN); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR); ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); /* There are no untagged SCBs active yet. */ for (i = 0; i < 16; i++) { ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0)); if ((ahc->flags & AHC_SCB_BTT) != 0) { int lun; /* * The SCB based BTT allows an entry per * target and lun pair. */ for (lun = 1; lun < AHC_NUM_LUNS; lun++) ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun)); } } /* All of our queues are empty */ for (i = 0; i < 256; i++) ahc->qoutfifo[i] = SCB_LIST_NULL; ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD); for (i = 0; i < 256; i++) ahc->qinfifo[i] = SCB_LIST_NULL; if ((ahc->features & AHC_MULTI_TID) != 0) { ahc_outb(ahc, TARGID, 0); ahc_outb(ahc, TARGID + 1, 0); } /* * Tell the sequencer where it can find our arrays in memory. */ physaddr = ahc->scb_data->hscb_busaddr; ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); physaddr = ahc->shared_data_busaddr; ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); /* * Initialize the group code to command length table. * This overrides the values in TARG_SCSIRATE, so only * setup the table after we have processed that information. */ ahc_outb(ahc, CMDSIZE_TABLE, 5); ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); if ((ahc->features & AHC_HS_MAILBOX) != 0) ahc_outb(ahc, HS_MAILBOX, 0); /* Tell the sequencer of our initial queue positions */ if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->tqinfifonext = 1; ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); } ahc->qinfifonext = 0; ahc->qoutfifonext = 0; if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext); ahc_outb(ahc, SDSCB_QOFF, 0); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); ahc_outb(ahc, QINPOS, ahc->qinfifonext); ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext); } /* We don't have any waiting selections */ ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); /* Our disconnection list is empty too */ ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); /* Message out buffer starts empty */ ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enalbe select in operations once * we've had a lun enabled. */ scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; if ((ahc->flags & AHC_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); /* Initialize our list of free SCBs. */ ahc_build_free_scb_list(ahc); /* * Tell the sequencer which SCB will be the next one it receives. */ ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); /* * Load the Sequencer program and Enable the adapter * in "fast" mode. */ if (bootverbose) printf("%s: Downloading Sequencer Program...", ahc_name(ahc)); error = ahc_loadseq(ahc); if (error != 0) return (error); if ((ahc->features & AHC_ULTRA2) != 0) { int wait; /* * Wait for up to 500ms for our transceivers * to settle. If the adapter does not have * a cable attached, the transceivers may * never settle, so don't complain if we * fail here. */ for (wait = 5000; (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) - ahc_delay(100); + aic_delay(100); } ahc_restart(ahc); return (0); } /* * Start the board, ready for normal operation */ int ahc_init(struct ahc_softc *ahc) { int max_targ; + int error; u_int i; u_int scsi_conf; u_int ultraenb; u_int discenable; u_int tagenable; size_t driver_data_size; #ifdef AHC_DEBUG if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0) ahc->flags |= AHC_SEQUENCER_DEBUG; #endif #ifdef AHC_PRINT_SRAM printf("Scratch Ram:"); for (i = 0x20; i < 0x5f; i++) { if (((i % 8) == 0) && (i != 0)) { printf ("\n "); } printf (" 0x%x", ahc_inb(ahc, i)); } if ((ahc->features & AHC_MORE_SRAM) != 0) { for (i = 0x70; i < 0x7f; i++) { if (((i % 8) == 0) && (i != 0)) { printf ("\n "); } printf (" 0x%x", ahc_inb(ahc, i)); } } printf ("\n"); /* * Reading uninitialized scratch ram may * generate parity errors. */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); #endif max_targ = 15; /* * Assume we have a board at this stage and it has been reset. */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) ahc->our_id = ahc->our_id_b = 7; /* * Default to allowing initiator operations. */ ahc->flags |= AHC_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) ahc->features &= ~AHC_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ - if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING ? (bus_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHC_NSEG, /*maxsegsz*/AHC_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahc->buffer_dmat) != 0) { return (ENOMEM); } #endif ahc->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qinfifo and qoutfifo. * The qinfifo and qoutfifo are composed of 256 1 byte elements. * When providing for the target mode role, we must additionally * provide space for the incoming target command fifo and an extra * byte to deal with a dma bug in some chip versions. */ driver_data_size = 2 * 256 * sizeof(uint8_t); if ((ahc->features & AHC_TARGETMODE) != 0) driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) + /*DMA WideOdd Bug Buffer*/1; - if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, + if (aic_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahc->shared_data_dmat) != 0) { return (ENOMEM); } ahc->init_level++; /* Allocation of driver data */ - if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, + if (aic_dmamem_alloc(ahc, ahc->shared_data_dmat, (void **)&ahc->qoutfifo, BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { return (ENOMEM); } ahc->init_level++; /* And permanently map it in */ - ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + aic_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, &ahc->shared_data_busaddr, /*flags*/0); if ((ahc->features & AHC_TARGETMODE) != 0) { ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS]; ahc->dma_bug_buf = ahc->shared_data_busaddr + driver_data_size - 1; /* All target command blocks start out invalid. */ for (i = 0; i < AHC_TMODE_CMDS; i++) ahc->targetcmds[i].cmd_valid = 0; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD); ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; } ahc->qinfifo = &ahc->qoutfifo[256]; ahc->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahc->scb_data->maxhscbs == 0) if (ahc_init_scbdata(ahc) != 0) return (ENOMEM); /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { printf("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } if ((ahc->features & AHC_TWIN) != 0) { if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { printf("%s: unable to allocate ahc_tmode_tstate. " "Failing attach\n", ahc_name(ahc)); return (ENOMEM); } } + /* + * Fire up a recovery thread for this controller. + */ + error = ahc_spawn_recovery_thread(ahc); + if (error != 0) + return (error); + if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) { ahc->flags |= AHC_PAGESCBS; } else { ahc->flags &= ~AHC_PAGESCBS; } #ifdef AHC_DEBUG if (ahc_debug & AHC_SHOW_MISC) { printf("%s: hardware scb %u bytes; kernel scb %u bytes; " "ahc_dma %u bytes\n", ahc_name(ahc), (u_int)sizeof(struct hardware_scb), (u_int)sizeof(struct scb), (u_int)sizeof(struct ahc_dma_seg)); } #endif /* AHC_DEBUG */ /* * Look at the information that board initialization or * the board bios has left us. */ if (ahc->features & AHC_TWIN) { scsi_conf = ahc_inb(ahc, SCSICONF + 1); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_B; } scsi_conf = ahc_inb(ahc, SCSICONF); if ((scsi_conf & RESET_SCSI) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) ahc->flags |= AHC_RESET_BUS_A; ultraenb = 0; tagenable = ALL_TARGETS_MASK; /* Grab the disconnection disable table and invert it for our needs */ if ((ahc->flags & AHC_USEDEFAULTS) != 0) { printf("%s: Host Adapter Bios disabled. Using default SCSI " "device parameters\n", ahc_name(ahc)); ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| AHC_TERM_ENB_A|AHC_TERM_ENB_B; discenable = ALL_TARGETS_MASK; if ((ahc->features & AHC_ULTRA) != 0) ultraenb = ALL_TARGETS_MASK; } else { discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) | ahc_inb(ahc, DISC_DSB)); if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) | ahc_inb(ahc, ULTRA_ENB); } if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) max_targ = 7; for (i = 0; i <= max_targ; i++) { struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int our_id; u_int target_id; char channel; channel = 'A'; our_id = ahc->our_id; target_id = i; if (i > 7 && (ahc->features & AHC_TWIN) != 0) { channel = 'B'; our_id = ahc->our_id_b; target_id = i % 8; } tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); /* Default to async narrow across the board */ memset(tinfo, 0, sizeof(*tinfo)); if (ahc->flags & AHC_USEDEFAULTS) { if ((ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * These will be truncated when we determine the * connection type we have with the target. */ tinfo->user.period = ahc_syncrates->period; tinfo->user.offset = MAX_OFFSET; } else { u_int scsirate; uint16_t mask; /* Take the settings leftover in scratch RAM. */ scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); mask = (0x01 << i); if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; u_int maxsync; if ((scsirate & SOFS) == 0x0F) { /* * Haven't negotiated yet, * so the format is different. */ scsirate = (scsirate & SXFR) >> 4 | (ultraenb & mask) ? 0x08 : 0x0 | (scsirate & WIDEXFER); offset = MAX_OFFSET_ULTRA2; } else offset = ahc_inb(ahc, TARG_OFFSET + i); if ((scsirate & ~WIDEXFER) == 0 && offset != 0) /* Set to the lowest sync rate, 5MHz */ scsirate |= 0x1c; maxsync = AHC_SYNCRATE_ULTRA2; if ((ahc->features & AHC_DT) != 0) maxsync = AHC_SYNCRATE_DT; tinfo->user.period = ahc_find_period(ahc, scsirate, maxsync); if (offset == 0) tinfo->user.period = 0; else tinfo->user.offset = MAX_OFFSET; if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ && (ahc->features & AHC_DT) != 0) tinfo->user.ppr_options = MSG_EXT_PPR_DT_REQ; } else if ((scsirate & SOFS) != 0) { if ((scsirate & SXFR) == 0x40 && (ultraenb & mask) != 0) { /* Treat 10MHz as a non-ultra speed */ scsirate &= ~SXFR; ultraenb &= ~mask; } tinfo->user.period = ahc_find_period(ahc, scsirate, (ultraenb & mask) ? AHC_SYNCRATE_ULTRA : AHC_SYNCRATE_FAST); if (tinfo->user.period != 0) tinfo->user.offset = MAX_OFFSET; } if (tinfo->user.period == 0) tinfo->user.offset = 0; if ((scsirate & WIDEXFER) != 0 && (ahc->features & AHC_WIDE) != 0) tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; tinfo->user.protocol_version = 4; if ((ahc->features & AHC_DT) != 0) tinfo->user.transport_version = 3; else tinfo->user.transport_version = 2; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; } tstate->ultraenb = 0; } ahc->user_discenable = discenable; ahc->user_tagenable = tagenable; return (ahc->bus_chip_init(ahc)); } void ahc_intr_enable(struct ahc_softc *ahc, int enable) { u_int hcntrl; hcntrl = ahc_inb(ahc, HCNTRL); hcntrl &= ~INTEN; ahc->pause &= ~INTEN; ahc->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahc->pause |= INTEN; ahc->unpause |= INTEN; } ahc_outb(ahc, HCNTRL, hcntrl); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahc_pause_and_flushwork(struct ahc_softc *ahc) { int intstat; int maxloops; int paused; maxloops = 1000; ahc->flags |= AHC_ALL_INTERRUPTS; paused = FALSE; do { - if (paused) + if (paused) { ahc_unpause(ahc); + /* + * Give the sequencer some time to service + * any active selections. + */ + aic_delay(200); + } ahc_intr(ahc); ahc_pause(ahc); paused = TRUE; ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_clear_critical_section(ahc); intstat = ahc_inb(ahc, INTSTAT); } while (--maxloops && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printf("Infinite interrupt loop, INTSTAT = %x", ahc_inb(ahc, INTSTAT)); } ahc_platform_flushwork(ahc); ahc->flags &= ~AHC_ALL_INTERRUPTS; } int ahc_suspend(struct ahc_softc *ahc) { ahc_pause_and_flushwork(ahc); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ahc_unpause(ahc); return (EBUSY); } #ifdef AHC_TARGET_MODE /* * XXX What about ATIOs that have not yet been serviced? * Perhaps we should just refuse to be suspended if we * are acting in a target role. */ if (ahc->pending_device != NULL) { ahc_unpause(ahc); return (EBUSY); } #endif ahc_shutdown(ahc); return (0); } int ahc_resume(struct ahc_softc *ahc) { ahc_reset(ahc, /*reinit*/TRUE); ahc_intr_enable(ahc, TRUE); ahc_restart(ahc); return (0); } /************************** Busy Target Table *********************************/ /* * Return the untagged transaction id for a given target/channel lun. * Optionally, clear the entry. */ u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int scbid; u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); } return (scbid); } void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL); } } void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) { u_int target_offset; if ((ahc->flags & AHC_SCB_BTT) != 0) { u_int saved_scbptr; saved_scbptr = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); ahc_outb(ahc, SCBPTR, saved_scbptr); } else { target_offset = TCL_TARGET_OFFSET(tcl); ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); } } /************************** SCB and SCB queue management **********************/ int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahc, scb); char chan = SCB_GET_CHANNEL(ahc, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHC_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHC_TARGET_MODE */ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); #endif /* AHC_TARGET_MODE */ } return match; } void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahc, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahc, scb); ahc_search_qinfifo(ahc, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahc_platform_freeze_devq(ahc, scb); } void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb) { struct scb *prev_scb; prev_scb = NULL; if (ahc_qinfifo_count(ahc) != 0) { u_int prev_tag; uint8_t prev_pos; prev_pos = ahc->qinfifonext - 1; prev_tag = ahc->qinfifo[prev_pos]; prev_scb = ahc_lookup_scb(ahc, prev_tag); } ahc_qinfifo_requeue(ahc, prev_scb, scb); if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } } static void ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); } else { prev_scb->hscb->next = scb->hscb->tag; ahc_sync_scb(ahc, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; scb->hscb->next = ahc->next_queued_scb->hscb->tag; ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahc_qinfifo_count(struct ahc_softc *ahc) { uint8_t qinpos; uint8_t diff; if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); diff = ahc->qinfifonext - qinpos; return (diff); } int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action) { struct scb *scb; struct scb *prev_scb; uint8_t qinstart; uint8_t qinpos; uint8_t qintail; uint8_t next; uint8_t prev; uint8_t curscbptr; int found; int have_qregs; qintail = ahc->qinfifonext; have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; if (have_qregs) { qinstart = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinstart); } else qinstart = ahc_inb(ahc, QINPOS); qinpos = qinstart; found = 0; prev_scb = NULL; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahc->qinfifonext = qinpos; ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); while (qinpos != qintail) { scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); if (scb == NULL) { printf("qinpos = %d, SCB index = %d\n", qinpos, ahc->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; - ostat = ahc_get_transaction_status(scb); + ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) - ahc_set_transaction_status(scb, status); - cstat = ahc_get_transaction_status(scb); + aic_set_transaction_status(scb, status); + cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) - ahc_freeze_scb(scb); + aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) printf("Inactive SCB in qinfifo\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: break; case SEARCH_COUNT: ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; break; } } else { ahc_qinfifo_requeue(ahc, prev_scb, scb); prev_scb = scb; } qinpos++; } if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); } if (action != SEARCH_COUNT && (found != 0) && (qinstart != ahc->qinfifonext)) { /* * The sequencer may be in the process of dmaing * down the SCB at the beginning of the queue. * This could be problematic if either the first, * or the second SCB is removed from the queue * (the first SCB includes a pointer to the "next" * SCB to dma). If we have removed any entries, swap * the first element in the queue with the next HSCB * so the sequencer will notice that NEXT_QUEUED_SCB * has changed during its dma attempt and will retry * the DMA. */ scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]); if (scb == NULL) { printf("found = %d, qinstart = %d, qinfifionext = %d\n", found, qinstart, ahc->qinfifonext); panic("First/Second Qinfifo fixup\n"); } /* * ahc_swap_with_next_hscb forces our next pointer to * point to the reserved SCB for future commands. Save * and restore our original next pointer to maintain * queue integrity. */ next = scb->hscb->next; ahc->scb_data->scbindex[scb->hscb->tag] = NULL; ahc_swap_with_next_hscb(ahc, scb); scb->hscb->next = next; ahc->qinfifo[qinstart] = scb->hscb->tag; /* Tell the card about the new head of the qinfifo. */ ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); /* Fixup the tail "next" pointer. */ qintail = ahc->qinfifonext - 1; scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]); scb->hscb->next = ahc->next_queued_scb->hscb->tag; } /* * Search waiting for selection list. */ curscbptr = ahc_inb(ahc, SCBPTR); next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ prev = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { uint8_t scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printf("Waiting List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } scb = ahc_lookup_scb(ahc, scb_index); if (scb == NULL) { printf("scb_index = %d, next = %d\n", scb_index, next); panic("Waiting List traversal\n"); } if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; - ostat = ahc_get_transaction_status(scb); + ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) - ahc_set_transaction_status(scb, + aic_set_transaction_status(scb, status); - cstat = ahc_get_transaction_status(scb); + cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) - ahc_freeze_scb(scb); + aic_freeze_scb(scb); if ((scb->flags & SCB_ACTIVE) == 0) - printf("Inactive SCB in Waiting List\n"); + printf("Inactive SCB in Wait List\n"); ahc_done(ahc, scb); /* FALLTHROUGH */ } case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; case SEARCH_COUNT: prev = next; next = ahc_inb(ahc, SCB_NEXT); break; } } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } ahc_outb(ahc, SCBPTR, curscbptr); - found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target, + found += ahc_search_untagged_queues(ahc, /*aic_io_ctx_t*/NULL, target, channel, lun, status, action); if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int -ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx, +ahc_search_untagged_queues(struct ahc_softc *ahc, aic_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action) { struct scb *scb; int maxtarget; int found; int i; if (action == SEARCH_COMPLETE) { /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); } found = 0; i = 0; if ((ahc->flags & AHC_SCB_BTT) == 0) { maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } } else { maxtarget = 0; } for (; i < maxtarget; i++) { struct scb_tailq *untagged_q; struct scb *next_scb; untagged_q = &(ahc->untagged_queues[i]); next_scb = TAILQ_FIRST(untagged_q); while (next_scb != NULL) { scb = next_scb; next_scb = TAILQ_NEXT(scb, links.tqe); /* * The head of the list may be the currently * active untagged command for a device. * We're only searching for commands that * have not been started. A transaction * marked active but still in the qinfifo * is removed by the qinfifo scanning code * above. */ if ((scb->flags & SCB_ACTIVE) != 0) continue; if (ahc_match_scb(ahc, scb, target, channel, lun, SCB_LIST_NULL, ROLE_INITIATOR) == 0 || (ctx != NULL && ctx != scb->io_ctx)) continue; /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: { cam_status ostat; cam_status cstat; - ostat = ahc_get_transaction_status(scb); + ostat = aic_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) - ahc_set_transaction_status(scb, status); - cstat = ahc_get_transaction_status(scb); + aic_set_transaction_status(scb, status); + cstat = aic_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) - ahc_freeze_scb(scb); - if ((scb->flags & SCB_ACTIVE) == 0) - printf("Inactive SCB in untaggedQ\n"); + aic_freeze_scb(scb); ahc_done(ahc, scb); break; } case SEARCH_REMOVE: scb->flags &= ~SCB_UNTAGGEDQ; TAILQ_REMOVE(untagged_q, scb, links.tqe); break; case SEARCH_COUNT: break; } } } if (action == SEARCH_COMPLETE) ahc_release_untagged_queues(ahc); return (found); } int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state) { struct scb *scbp; u_int next; u_int prev; u_int count; u_int active_scb; count = 0; next = ahc_inb(ahc, DISCONNECTED_SCBH); prev = SCB_LIST_NULL; if (save_state) { /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); } else /* Silence compiler */ active_scb = SCB_LIST_NULL; while (next != SCB_LIST_NULL) { u_int scb_index; ahc_outb(ahc, SCBPTR, next); scb_index = ahc_inb(ahc, SCB_TAG); if (scb_index >= ahc->scb_data->numscbs) { printf("Disconnected List inconsistency. " "SCB index == %d, yet numscbs == %d.", scb_index, ahc->scb_data->numscbs); ahc_dump_card_state(ahc); panic("for safety"); } if (next == prev) { panic("Disconnected List Loop. " "cur SCBPTR == %x, prev SCBPTR == %x.", next, prev); } scbp = ahc_lookup_scb(ahc, scb_index); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, ROLE_INITIATOR)) { count++; if (remove) { next = ahc_rem_scb_from_disc_list(ahc, prev, next); } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } if (stop_on_first) break; } else { prev = next; next = ahc_inb(ahc, SCB_NEXT); } } if (save_state) ahc_outb(ahc, SCBPTR, active_scb); return (count); } /* * Remove an SCB from the on chip list of disconnected transactions. * This is empty/unused if we are not performing SCB paging. */ static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) { u_int next; ahc_outb(ahc, SCBPTR, scbptr); next = ahc_inb(ahc, SCB_NEXT); ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); if (prev != SCB_LIST_NULL) { ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } else ahc_outb(ahc, DISCONNECTED_SCBH, next); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc) { /* * Invalidate the tag so that our abort * routines don't think it's active. */ ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); if ((ahc->flags & AHC_PAGESCBS) != 0) { ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) { u_int curscb, next; /* * Select the SCB we want to abort and * pull the next pointer out of it. */ curscb = ahc_inb(ahc, SCBPTR); ahc_outb(ahc, SCBPTR, scbpos); next = ahc_inb(ahc, SCB_NEXT); /* Clear the necessary fields */ ahc_outb(ahc, SCB_CONTROL, 0); ahc_add_curscb_to_free_list(ahc); /* update the waiting list */ if (prev == SCB_LIST_NULL) { /* First in the list */ ahc_outb(ahc, WAITING_SCBH, next); /* * Ensure we aren't attempting to perform * selection for this entry. */ ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); } else { /* * Select the scb that pointed to us * and update its next pointer. */ ahc_outb(ahc, SCBPTR, prev); ahc_outb(ahc, SCB_NEXT, next); } /* * Point us back at the original scb position. */ ahc_outb(ahc, SCBPTR, curscb); return next; } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int active_scb; int i, j; int maxtarget; int minlun; int maxlun; int found; /* * Don't attempt to run any queued untagged transactions * until we are done with the abort process. */ ahc_freeze_untagged_queues(ahc); /* restore this when we're done */ active_scb = ahc_inb(ahc, SCBPTR); found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { /* * Unless we are using an SCB based * busy targets table, there is only * one table entry for all luns of * a target. */ minlun = 0; maxlun = 1; if ((ahc->flags & AHC_SCB_BTT) != 0) maxlun = AHC_NUM_LUNS; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL(i << 4, j); scbid = ahc_index_busy_tcl(ahc, tcl); scbp = ahc_lookup_scb(ahc, scbid); if (scbp == NULL || ahc_match_scb(ahc, scbp, target, channel, lun, tag, role) == 0) continue; ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j)); } } /* * Go through the disconnected list and remove any entries we * have queued for completion, 0'ing their control byte too. * We save the active SCB and restore it ourselves, so there * is no reason for this search to restore it too. */ ahc_search_disc_list(ahc, target, channel, lun, tag, /*stop_on_first*/FALSE, /*remove*/TRUE, /*save_state*/FALSE); } /* * Go through the hardware SCB array looking for commands that * were active but not on any list. In some cases, these remnants * might not still have mappings in the scbindex array (e.g. unexpected * bus free with the same scb queued for an abort). Don't hold this * against them. */ for (i = 0; i < ahc->scb_data->maxhscbs; i++) { u_int scbid; ahc_outb(ahc, SCBPTR, i); scbid = ahc_inb(ahc, SCB_TAG); scbp = ahc_lookup_scb(ahc, scbid); if ((scbp == NULL && scbid != SCB_LIST_NULL) || (scbp != NULL && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))) ahc_add_curscb_to_free_list(ahc); } /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahc->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { cam_status ostat; - ostat = ahc_get_transaction_status(scbp); + ostat = aic_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) - ahc_set_transaction_status(scbp, status); - if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP) - ahc_freeze_scb(scbp); + aic_set_transaction_status(scbp, status); + if (aic_get_transaction_status(scbp) != CAM_REQ_CMP) + aic_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printf("Inactive SCB on pending list\n"); ahc_done(ahc, scbp); found++; } } ahc_outb(ahc, SCBPTR, active_scb); ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); ahc_release_untagged_queues(ahc); return found; } static void ahc_reset_current_bus(struct ahc_softc *ahc) { uint8_t scsiseq; ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); scsiseq = ahc_inb(ahc, SCSISEQ); ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); ahc_flush_device_writes(ahc); - ahc_delay(AHC_BUSRESET_DELAY); + aic_delay(AHC_BUSRESET_DELAY); /* Turn off the bus reset */ ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); ahc_clear_intstat(ahc); /* Re-enable reset interrupts */ ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); } int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) { struct ahc_devinfo devinfo; u_int initiator, target, max_scsiid; u_int sblkctl; u_int scsiseq; u_int simode1; int found; int restart_needed; char cur_channel; ahc->pending_device = NULL; ahc_compile_devinfo(&devinfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_pause(ahc); /* Make sure the sequencer is in a safe location. */ ahc_clear_critical_section(ahc); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE /* * XXX - In Twin mode, the tqinfifo may have commands * for an unaffected channel in it. However, if * we have run out of ATIO resources to drain that * queue, we may not get them all out here. Further, * the blocked transactions for the reset channel * should just be killed off, irrespecitve of whether * we are blocked on ATIO resources. Write a routine * to compact the tqinfifo appropriately. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { ahc_run_tqinfifo(ahc, /*paused*/TRUE); } #endif /* * Reset the bus if we are initiating this reset */ sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = 'A'; if ((ahc->features & AHC_TWIN) != 0 && ((sblkctl & SELBUSB) != 0)) cur_channel = 'B'; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); if (cur_channel != channel) { /* Case 1: Command for another bus is active * Stealthily reset the other bus without * upsetting the current bus. */ ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); ahc_outb(ahc, SBLKCTL, sblkctl); restart_needed = FALSE; } else { /* Case 2: A command from this bus is active or we're idle */ simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST); #ifdef AHC_TARGET_MODE /* * Bus resets clear ENSELI, so we cannot * defer re-enabling bus reset interrupts * if we are in target mode. */ if ((ahc->flags & AHC_TARGETROLE) != 0) simode1 |= ENSCSIRST; #endif ahc_outb(ahc, SIMODE1, simode1); if (initiate_reset) ahc_reset_current_bus(ahc); ahc_clear_intstat(ahc); ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); restart_needed = TRUE; } /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; #ifdef AHC_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahc_tmode_tstate* tstate; u_int lun; tstate = ahc->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct ahc_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahc_send_lstate_events(ahc, lstate); } } #endif /* Notify the XPT that a bus reset occurred */ ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET, NULL); /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahc->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahc_devinfo devinfo; ahc_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_CUR, /*paused*/TRUE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_CUR, /*paused*/TRUE); } } if (restart_needed) ahc_restart(ahc); else ahc_unpause(ahc); return found; } /***************************** Residual Processing ****************************/ /* * Calculate the residual for a just completed SCB. */ void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; struct status_pkt *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_RESID_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; - sgptr = ahc_le32toh(hscb->sgptr); + sgptr = aic_le32toh(hscb->sgptr); if ((sgptr & SG_RESID_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_RESID_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; spkt = &hscb->shared_data.status; - resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr); + resid_sgptr = aic_le32toh(spkt->residual_sg_ptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ - resid = ahc_get_transfer_length(scb); + resid = aic_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); } else { struct ahc_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ - resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; + resid = aic_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK; sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ - while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { + while ((aic_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) { sg++; - resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK; + resid += aic_le32toh(sg->len) & AHC_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) - ahc_set_residual(scb, resid); + aic_set_residual(scb, resid); else - ahc_set_sense_residual(scb, resid); + aic_set_sense_residual(scb, resid); #ifdef AHC_DEBUG if ((ahc_debug & AHC_SHOW_MISC) != 0) { ahc_print_path(ahc, scb); printf("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHC_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahc_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printf("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahc_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHC_DUMP_SEQ void ahc_dumpseq(struct ahc_softc* ahc) { int i; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < ahc->instruction_ram_size; i++) { uint8_t ins_bytes[4]; ahc_insb(ahc, SEQRAM, ins_bytes, 4); printf("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static int ahc_loadseq(struct ahc_softc *ahc) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; u_int skip_addr; u_int sg_prefetch_cnt; int downloaded; uint8_t download_consts[7]; /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* Setup downloadable constant table */ download_consts[QOUTFIFO_OFFSET] = 0; if (ahc->targetcmds != NULL) download_consts[QOUTFIFO_OFFSET] += 32; download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); sg_prefetch_cnt = ahc->pci_cachesize; if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); cur_patch = patches; downloaded = 0; skip_addr = 0; ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR1, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } if (downloaded == ahc->instruction_ram_size) { /* * We're about to exceed the instruction * storage capacity for this chip. Fail * the load. */ printf("\n%s: Program too large for instruction memory " "size of %d!\n", ahc_name(ahc), ahc->instruction_ram_size); return (ENOMEM); } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahc_download_instr(ahc, i, download_consts); downloaded++; } ahc->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); if (ahc->critical_sections == NULL) panic("ahc_loadseq: Could not malloc"); memcpy(ahc->critical_sections, cs_table, cs_count); } ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printf(" %d instructions downloaded\n", downloaded); printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags); } return (0); } static int ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, u_int start_instr, u_int *skip_addr) { struct patch *cur_patch; struct patch *last_patch; u_int num_patches; num_patches = sizeof(patches)/sizeof(struct patch); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahc) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static void ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ - instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); + instr.integer = aic_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { struct patch *cur_patch; int address_offset; u_int address; u_int skip_addr; u_int i; fmt3_ins = &instr.format3; address_offset = 0; address = fmt3_ins->address; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahc_check_patch(ahc, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = MIN(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } address -= address_offset; fmt3_ins->address = address; /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; if ((ahc->features & AHC_CMD_CHAN) == 0 && opcode == AIC_OP_BMOV) { /* * Block move was added at the same time * as the command channel. Verify that * this is only a move of a single element * and convert the BMOV to a MOV * (AND with an immediate of FF). */ if (fmt1_ins->immediate != 1) panic("%s: BMOV not supported\n", ahc_name(ahc)); fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } /* FALLTHROUGH */ case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; } else { /* Compress the instruction for older sequencers */ if (fmt3_ins != NULL) { instr.integer = fmt3_ins->immediate | (fmt3_ins->source << 8) | (fmt3_ins->address << 16) | (fmt3_ins->opcode << 25); } else { instr.integer = fmt1_ins->immediate | (fmt1_ins->source << 8) | (fmt1_ins->destination << 16) | (fmt1_ins->ret << 24) | (fmt1_ins->opcode << 25); } } /* The sequencer is a little endian cpu */ - instr.integer = ahc_htole32(instr.integer); + instr.integer = aic_htole32(instr.integer); ahc_outsb(ahc, SEQRAM, instr.bytes, 4); break; default: panic("Unknown opcode encountered in seq program"); break; } } int ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printf("\n"); *cur_column = 0; } printed = printf("%s[0x%x]", name, value); if (table == NULL) { printed += printf(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printf(") "); else printed += printf(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahc_dump_card_state(struct ahc_softc *ahc) { struct scb *scb; struct scb_tailq *untagged_q; u_int cur_col; int paused; int target; int maxtarget; int i; uint8_t last_phase; uint8_t qinpos; uint8_t qintail; uint8_t qoutpos; uint8_t scb_index; uint8_t saved_scbptr; if (ahc_is_paused(ahc)) { paused = 1; } else { paused = 0; ahc_pause(ahc); } saved_scbptr = ahc_inb(ahc, SCBPTR); last_phase = ahc_inb(ahc, LASTPHASE); printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State %s, at SEQADDR 0x%x\n", ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg, ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (paused) printf("Card was paused\n"); printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n", ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX), ahc_inb(ahc, ARG_2)); printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT), ahc_inb(ahc, SCBPTR)); cur_col = 0; if ((ahc->features & AHC_DT) != 0) ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50); ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50); ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50); ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50); ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50); ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50); ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50); ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50); ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50); ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50); ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50); ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50); ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50); ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50); ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50); ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50); ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50); ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50); ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50); if (cur_col != 0) printf("\n"); printf("STACK:"); for (i = 0; i < STACK_SIZE; i++) printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8)); printf("\nSCB count = %d\n", ahc->scb_data->numscbs); printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag); printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB)); /* QINFIFO */ printf("QINFIFO entries: "); if ((ahc->features & AHC_QUEUE_REGS) != 0) { qinpos = ahc_inb(ahc, SNSCB_QOFF); ahc_outb(ahc, SNSCB_QOFF, qinpos); } else qinpos = ahc_inb(ahc, QINPOS); qintail = ahc->qinfifonext; while (qinpos != qintail) { printf("%d ", ahc->qinfifo[qinpos]); qinpos++; } printf("\n"); printf("Waiting Queue entries: "); scb_index = ahc_inb(ahc, WAITING_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); printf("Disconnected Queue entries: "); scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD); printf("QOUTFIFO entries: "); qoutpos = ahc->qoutfifonext; i = 0; while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { printf("%d ", ahc->qoutfifo[qoutpos]); qoutpos++; } printf("\n"); printf("Sequencer Free SCB List: "); scb_index = ahc_inb(ahc, FREE_SCBH); i = 0; while (scb_index != SCB_LIST_NULL && i++ < 256) { ahc_outb(ahc, SCBPTR, scb_index); printf("%d ", scb_index); scb_index = ahc_inb(ahc, SCB_NEXT); } printf("\n"); printf("Sequencer SCB Info: "); for (i = 0; i < ahc->scb_data->maxhscbs; i++) { ahc_outb(ahc, SCBPTR, i); cur_col = printf("\n%3d ", i); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60); ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); } printf("\n"); printf("Pending list: "); i = 0; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { if (i++ > 256) break; cur_col = printf("\n%3d ", scb->hscb->tag); ahc_scb_control_print(scb->hscb->control, &cur_col, 60); ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60); ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60); if ((ahc->flags & AHC_PAGESCBS) == 0) { ahc_outb(ahc, SCBPTR, scb->hscb->tag); printf("("); ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60); ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60); printf(")"); } } printf("\n"); printf("Kernel Free SCB list: "); i = 0; SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { if (i++ > 256) break; printf("%d ", scb->hscb->tag); } printf("\n"); maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; for (target = 0; target <= maxtarget; target++) { untagged_q = &ahc->untagged_queues[target]; if (TAILQ_FIRST(untagged_q) == NULL) continue; printf("Untagged Q(%d): ", target); i = 0; TAILQ_FOREACH(scb, untagged_q, links.tqe) { if (i++ > 256) break; printf("%d ", scb->hscb->tag); } printf("\n"); } ahc_platform_dump_card_state(ahc); printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahc_outb(ahc, SCBPTR, saved_scbptr); if (paused == 0) ahc_unpause(ahc); } +/*************************** Timeout Handling *********************************/ +void +ahc_timeout(struct scb *scb) +{ + struct ahc_softc *ahc; + + ahc = scb->ahc_softc; + if ((scb->flags & SCB_ACTIVE) != 0) { + if ((scb->flags & SCB_TIMEDOUT) == 0) { + LIST_INSERT_HEAD(&ahc->timedout_scbs, scb, + timedout_links); + scb->flags |= SCB_TIMEDOUT; + } + ahc_wakeup_recovery_thread(ahc); + } +} + +/* + * ahc_recover_commands determines if any of the commands that have currently + * timedout are the root cause for this timeout. Innocent commands are given + * a new timeout while we wait for the command executing on the bus to timeout. + * This routine is invoked from a thread context so we are allowed to sleep. + * Our lock is not held on entry. + */ +void +ahc_recover_commands(struct ahc_softc *ahc) +{ + struct scb *scb; + long s; + int found; + int restart_needed; + u_int last_phase; + + ahc_lock(ahc, &s); + + /* + * Pause the controller and manually flush any + * commands that have just completed but that our + * interrupt handler has yet to see. + */ + ahc_pause_and_flushwork(ahc); + + if (LIST_EMPTY(&ahc->timedout_scbs) != 0) { + /* + * The timedout commands have already + * completed. This typically means + * that either the timeout value was on + * the hairy edge of what the device + * requires or - more likely - interrupts + * are not happening. + */ + printf("%s: Timedout SCBs already complete. " + "Interrupts may not be functioning.\n", ahc_name(ahc)); + ahc_unpause(ahc); + ahc_unlock(ahc, &s); + return; + } + + restart_needed = 0; + printf("%s: Recovery Initiated\n", ahc_name(ahc)); + ahc_dump_card_state(ahc); + + last_phase = ahc_inb(ahc, LASTPHASE); + while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { + u_int active_scb_index; + u_int saved_scbptr; + int target; + int lun; + int i; + char channel; + + target = SCB_GET_TARGET(ahc, scb); + channel = SCB_GET_CHANNEL(ahc, scb); + lun = SCB_GET_LUN(scb); + + ahc_print_path(ahc, scb); + printf("SCB 0x%x - timed out\n", scb->hscb->tag); + if (scb->sg_count > 0) { + for (i = 0; i < scb->sg_count; i++) { + printf("sg[%d] - Addr 0x%x : Length %d\n", + i, + scb->sg_list[i].addr, + scb->sg_list[i].len & AHC_SG_LEN_MASK); + } + } + if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { + /* + * Been down this road before. + * Do a full bus reset. + */ + aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); +bus_reset: + found = ahc_reset_channel(ahc, channel, + /*Initiate Reset*/TRUE); + printf("%s: Issued Channel %c Bus Reset. " + "%d SCBs aborted\n", ahc_name(ahc), channel, + found); + continue; + } + + /* + * Remove the command from the timedout list in + * preparation for requeing it. + */ + LIST_REMOVE(scb, timedout_links); + scb->flags &= ~SCB_TIMEDOUT; + + /* + * If we are a target, transition to bus free and report + * the timeout. + * + * The target/initiator that is holding up the bus may not + * be the same as the one that triggered this timeout + * (different commands have different timeout lengths). + * If the bus is idle and we are actiing as the initiator + * for this request, queue a BDR message to the timed out + * target. Otherwise, if the timed out transaction is + * active: + * Initiator transaction: + * Stuff the message buffer with a BDR message and assert + * ATN in the hopes that the target will let go of the bus + * and go to the mesgout phase. If this fails, we'll + * get another timeout 2 seconds later which will attempt + * a bus reset. + * + * Target transaction: + * Transition to BUS FREE and report the error. + * It's good to be the target! + */ + saved_scbptr = ahc_inb(ahc, SCBPTR); + active_scb_index = ahc_inb(ahc, SCB_TAG); + + if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 + && (active_scb_index < ahc->scb_data->numscbs)) { + struct scb *active_scb; + + /* + * If the active SCB is not us, assume that + * the active SCB has a longer timeout than + * the timedout SCB, and wait for the active + * SCB to timeout. + */ + active_scb = ahc_lookup_scb(ahc, active_scb_index); + if (active_scb != scb) { + u_int newtimeout; + + ahc_print_path(ahc, scb); + printf("Other SCB Timeout%s", + (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 + ? " again\n" : "\n"); + scb->flags |= SCB_OTHERTCL_TIMEOUT; + newtimeout = + MAX(aic_get_timeout(active_scb), + aic_get_timeout(scb)); + aic_scb_timer_reset(scb, newtimeout); + continue; + } + + /* It's us */ + if ((scb->flags & SCB_TARGET_SCB) != 0) { + + /* + * Send back any queued up transactions + * and properly record the error condition. + */ + ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), + SCB_GET_CHANNEL(ahc, scb), + SCB_GET_LUN(scb), + scb->hscb->tag, + ROLE_TARGET, + CAM_CMD_TIMEOUT); + + /* Will clear us from the bus */ + restart_needed = 1; + break; + } + + ahc_set_recoveryscb(ahc, active_scb); + ahc_outb(ahc, MSG_OUT, HOST_MSG); + ahc_outb(ahc, SCSISIGO, last_phase|ATNO); + ahc_print_path(ahc, active_scb); + printf("BDR message in message buffer\n"); + active_scb->flags |= SCB_DEVICE_RESET; + aic_scb_timer_reset(scb, 2 * 1000000); + } else if (last_phase != P_BUSFREE + && (ahc_inb(ahc, SSTAT1) & REQINIT) == 0) { + /* + * SCB is not identified, there + * is no pending REQ, and the sequencer + * has not seen a busfree. Looks like + * a stuck connection waiting to + * go busfree. Reset the bus. + */ + printf("%s: Connection stuck awaiting busfree or " + "Identify Msg.\n", ahc_name(ahc)); + goto bus_reset; + } else { + int disconnected; + + if (last_phase != P_BUSFREE + && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { + /* Hung target selection. Goto busfree */ + printf("%s: Hung target selection\n", + ahc_name(ahc)); + restart_needed = 1; + break; + } + + /* XXX Shouldn't panic. Just punt instead? */ + if ((scb->flags & SCB_TARGET_SCB) != 0) + panic("Timed-out target SCB but bus idle"); + + if (ahc_search_qinfifo(ahc, target, channel, lun, + scb->hscb->tag, ROLE_INITIATOR, + /*status*/0, SEARCH_COUNT) > 0) { + disconnected = FALSE; + } else { + disconnected = TRUE; + } + + if (disconnected) { + + ahc_set_recoveryscb(ahc, scb); + /* + * Actually re-queue this SCB in an attempt + * to select the device before it reconnects. + * In either case (selection or reselection), + * we will now issue a target reset to the + * timed-out device. + * + * Set the MK_MESSAGE control bit indicating + * that we desire to send a message. We + * also set the disconnected flag since + * in the paging case there is no guarantee + * that our SCB control byte matches the + * version on the card. We don't want the + * sequencer to abort the command thinking + * an unsolicited reselection occurred. + */ + scb->hscb->control |= MK_MESSAGE|DISCONNECTED; + scb->flags |= SCB_DEVICE_RESET; + + /* + * Remove any cached copy of this SCB in the + * disconnected list in preparation for the + * queuing of our abort SCB. We use the + * same element in the SCB, SCB_NEXT, for + * both the qinfifo and the disconnected list. + */ + ahc_search_disc_list(ahc, target, channel, + lun, scb->hscb->tag, + /*stop_on_first*/TRUE, + /*remove*/TRUE, + /*save_state*/FALSE); + + /* + * In the non-paging case, the sequencer will + * never re-reference the in-core SCB. + * To make sure we are notified during + * reslection, set the MK_MESSAGE flag in + * the card's copy of the SCB. + */ + if ((ahc->flags & AHC_PAGESCBS) == 0) { + ahc_outb(ahc, SCBPTR, scb->hscb->tag); + ahc_outb(ahc, SCB_CONTROL, + ahc_inb(ahc, SCB_CONTROL) + | MK_MESSAGE); + } + + /* + * Clear out any entries in the QINFIFO first + * so we are the next SCB for this target + * to run. + */ + ahc_search_qinfifo(ahc, + SCB_GET_TARGET(ahc, scb), + channel, SCB_GET_LUN(scb), + SCB_LIST_NULL, + ROLE_INITIATOR, + CAM_REQUEUE_REQ, + SEARCH_COMPLETE); + ahc_print_path(ahc, scb); + printf("Queuing a BDR SCB\n"); + ahc_qinfifo_requeue_tail(ahc, scb); + ahc_outb(ahc, SCBPTR, saved_scbptr); + aic_scb_timer_reset(scb, 2 * 1000000); + } else { + /* Go "immediatly" to the bus reset */ + /* This shouldn't happen */ + ahc_set_recoveryscb(ahc, scb); + ahc_print_path(ahc, scb); + printf("SCB %d: Immediate reset. " + "Flags = 0x%x\n", scb->hscb->tag, + scb->flags); + goto bus_reset; + } + } + break; + } + + /* + * Any remaining SCBs were not the "culprit", so remove + * them from the timeout list. The timer for these commands + * will be reset once the recovery SCB completes. + */ + while ((scb = LIST_FIRST(&ahc->timedout_scbs)) != NULL) { + + LIST_REMOVE(scb, timedout_links); + scb->flags &= ~SCB_TIMEDOUT; + } + + if (restart_needed) + ahc_restart(ahc); + else + ahc_unpause(ahc); + ahc_unlock(ahc, &s); +} + /************************* Target Mode ****************************************/ #ifdef AHC_TARGET_MODE cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure) { if ((ahc->features & AHC_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahc->black_hole; } else { u_int max_id; max_id = (ahc->features & AHC_WIDE) ? 15 : 7; if (ccb->ccb_h.target_id > max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_long s; u_int target; u_int lun; u_int target_mask; u_int our_id; int error; char channel; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if (cam_sim_bus(sim) == 0) our_id = ahc->our_id; else our_id = ahc->our_id_b; if (ccb->ccb_h.target_id != our_id) { /* * our_id represents our initiator ID, or * the ID of the first target to have an * enabled lun in target mode. There are * two cases that may preclude enabling a * target id other than our_id. * * o our_id is for an active initiator role. * Since the hardware does not support * reselections to the initiator role at * anything other than our_id, and our_id * is used by the hardware to indicate the * ID to use for both select-out and * reselect-out operations, the only target * ID we can support in this mode is our_id. * * o The MULTARGID feature is not available and * a previous target mode ID has been enabled. */ if ((ahc->features & AHC_MULTIROLE) != 0) { if ((ahc->features & AHC_MULTI_TID) != 0 && (ahc->flags & AHC_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahc->flags & AHC_INITIATORROLE) != 0 || ahc->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } else if ((ahc->features & AHC_MULTI_TID) == 0 && ahc->enabled_luns > 0) { status = CAM_TID_INVALID; } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahc->flags & AHC_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; ahc_flag saved_flags; printf("Configuring Target Mode\n"); ahc_lock(ahc, &s); if (LIST_FIRST(&ahc->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahc_unlock(ahc, &s); return; } saved_flags = ahc->flags; ahc->flags |= AHC_TARGETROLE; if ((ahc->features & AHC_MULTIROLE) == 0) ahc->flags &= ~AHC_INITIATORROLE; ahc_pause(ahc); error = ahc_loadseq(ahc); if (error != 0) { /* * Restore original configuration and notify * the caller that we cannot support target mode. * Since the adapter started out in this * configuration, the firmware load will succeed, * so there is no point in checking ahc_loadseq's * return value. */ ahc->flags = saved_flags; (void)ahc_loadseq(ahc); ahc_restart(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; } ahc_restart(ahc); ahc_unlock(ahc, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahc, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printf("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printf("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahc_alloc_tstate(ahc, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { free(lstate, M_DEVBUF); xpt_print_path(ccb->ccb_h.path); printf("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahc_lock(ahc, &s); ahc_pause(ahc); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahc->enabled_luns++; if ((ahc->features & AHC_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask |= target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahc, sim); our_id = SIM_SCSI_ID(ahc, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahc_inb(ahc, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahc->features & AHC_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; if (channel == 'A') ahc->our_id = target; else ahc->our_id_b = target; if (swap) ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); ahc_outb(ahc, SCSIID, target); if (swap) ahc_outb(ahc, SBLKCTL, sblkctl); } } } else ahc->black_hole = lstate; /* Allow select-in operations */ if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq |= ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); } ahc_unpause(ahc); ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printf("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahc_lock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printf("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahc_unlock(ahc, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printf("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printf("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahc_unlock(ahc, &s); return; } xpt_print_path(ccb->ccb_h.path); printf("Target mode disabled\n"); xpt_free_path(lstate->path); free(lstate, M_DEVBUF); ahc_pause(ahc); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahc->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahc_free_tstate(ahc, target, channel, /*force*/FALSE); if (ahc->features & AHC_MULTI_TID) { u_int targid_mask; targid_mask = ahc_inb(ahc, TARGID) | (ahc_inb(ahc, TARGID + 1) << 8); targid_mask &= ~target_mask; ahc_outb(ahc, TARGID, targid_mask); ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); ahc_update_scsiid(ahc, targid_mask); } } } else { ahc->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahc->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq; scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); scsiseq = ahc_inb(ahc, SCSISEQ); scsiseq &= ~ENSELI; ahc_outb(ahc, SCSISEQ, scsiseq); if ((ahc->features & AHC_MULTIROLE) == 0) { printf("Configuring Initiator Mode\n"); ahc->flags &= ~AHC_TARGETROLE; ahc->flags |= AHC_INITIATORROLE; /* * Returning to a configuration that * fit previously will always succeed. */ (void)ahc_loadseq(ahc); ahc_restart(ahc); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahc_unpause(ahc); ahc_unlock(ahc, &s); } } static void ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) { u_int scsiid_mask; u_int scsiid; if ((ahc->features & AHC_MULTI_TID) == 0) panic("ahc_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahc->features & AHC_ULTRA2) != 0) scsiid = ahc_inb(ahc, SCSIID_ULTRA2); else scsiid = ahc_inb(ahc, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahc->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, SCSIID_ULTRA2, scsiid); else ahc_outb(ahc, SCSIID, scsiid); } void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) { struct target_cmd *cmd; /* * If the card supports auto-access pause, * we can access the card directly regardless * of whether it is paused or not. */ if ((ahc->features & AHC_AUTOPAUSE) != 0) paused = TRUE; ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD); while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahc_handle_target_cmd(ahc, cmd) != 0) break; cmd->cmd_valid = 0; - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahc->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { if ((ahc->features & AHC_HS_MAILBOX) != 0) { u_int hs_mailbox; hs_mailbox = ahc_inb(ahc, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; ahc_outb(ahc, HS_MAILBOX, hs_mailbox); } else { if (!paused) ahc_pause(ahc); ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext & HOST_TQINPOS); if (!paused) ahc_unpause(ahc); } } } } static int ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahc, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahc->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahc->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahc->flags |= AHC_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ if (bootverbose) printf("%s: ATIOs exhausted\n", ahc_name(ahc)); return (1); } else ahc->flags &= ~AHC_TQINFIFO_BLOCKED; #if 0 printf("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahc->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahc->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printf("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #if 0 printf("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahc->pending_device); #endif ahc->pending_device = lstate; - ahc_freeze_ccb((union ccb *)atio); + aic_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif Index: stable/4/sys/dev/aic7xxx/aic7xxx.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx.h (revision 125849) @@ -1,1352 +1,1369 @@ /* * Core definitions and data structures shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#79 $ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_H_ #define _AIC7XXX_H_ /* Register Definitions */ #include "aic7xxx_reg.h" /************************* Forward Declarations *******************************/ struct ahc_platform_data; struct scb_platform_data; struct seeprom_descriptor; /****************************** Useful Macros *********************************/ #ifndef MAX #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #endif #ifndef MIN #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #endif #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define ALL_CHANNELS '\0' #define ALL_TARGETS_MASK 0xFFFF #define INITIATOR_WILDCARD (~0) #define SCSIID_TARGET(ahc, scsiid) \ (((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \ >> TID_SHIFT) #define SCSIID_OUR_ID(scsiid) \ ((scsiid) & OID) #define SCSIID_CHANNEL(ahc, scsiid) \ ((((ahc)->features & AHC_TWIN) != 0) \ ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \ : 'A') #define SCB_IS_SCSIBUS_B(ahc, scb) \ (SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B') #define SCB_GET_OUR_ID(scb) \ SCSIID_OUR_ID((scb)->hscb->scsiid) #define SCB_GET_TARGET(ahc, scb) \ SCSIID_TARGET((ahc), (scb)->hscb->scsiid) #define SCB_GET_CHANNEL(ahc, scb) \ SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) #define SCB_GET_LUN(scb) \ ((scb)->hscb->lun & LID) #define SCB_GET_TARGET_OFFSET(ahc, scb) \ (SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0)) #define SCB_GET_TARGET_MASK(ahc, scb) \ (0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb))) #ifdef AHC_DEBUG #define SCB_IS_SILENT(scb) \ ((ahc_debug & AHC_SHOW_MASKED_ERRORS) == 0 \ && (((scb)->flags & SCB_SILENT) != 0)) #else #define SCB_IS_SILENT(scb) \ (((scb)->flags & SCB_SILENT) != 0) #endif #define TCL_TARGET_OFFSET(tcl) \ ((((tcl) >> 4) & TID) >> 4) #define TCL_LUN(tcl) \ (tcl & (AHC_NUM_LUNS - 1)) #define BUILD_TCL(scsiid, lun) \ ((lun) | (((scsiid) & TID) << 4)) #ifndef AHC_TARGET_MODE #undef AHC_TMODE_ENABLE #define AHC_TMODE_ENABLE 0 #endif /**************************** Driver Constants ********************************/ /* * The maximum number of supported targets. */ #define AHC_NUM_TARGETS 16 /* * The maximum number of supported luns. * The identify message only supports 64 luns in SPI3. * You can have 2^64 luns when information unit transfers are enabled, * but it is doubtful this driver will ever support IUTs. */ #define AHC_NUM_LUNS 64 /* * The maximum transfer per S/G segment. */ #define AHC_MAXTRANSFER_SIZE 0x00ffffff /* limited by 24bit counter */ /* * The maximum amount of SCB storage in hardware on a controller. * This value represents an upper bound. Controllers vary in the number * they actually support. */ #define AHC_SCB_MAX 255 /* * The maximum number of concurrent transactions supported per driver instance. * Sequencer Control Blocks (SCBs) store per-transaction information. Although * the space for SCBs on the host adapter varies by model, the driver will * page the SCBs between host and controller memory as needed. We are limited * to 253 because: * 1) The 8bit nature of the RISC engine holds us to an 8bit value. * 2) We reserve one value, 255, to represent the invalid element. * 3) Our input queue scheme requires one SCB to always be reserved * in advance of queuing any SCBs. This takes us down to 254. * 4) To handle our output queue correctly on machines that only * support 32bit stores, we must clear the array 4 bytes at a * time. To avoid colliding with a DMA write from the sequencer, * we must be sure that 4 slots are empty when we write to clear * the queue. This reduces us to 253 SCBs: 1 that just completed * and the known three additional empty slots in the queue that * precede it. */ #define AHC_MAX_QUEUE 253 /* * The maximum amount of SCB storage we allocate in host memory. This * number should reflect the 1 additional SCB we require to handle our * qinfifo mechanism. */ #define AHC_SCB_MAX_ALLOC (AHC_MAX_QUEUE+1) /* * Ring Buffer of incoming target commands. * We allocate 256 to simplify the logic in the sequencer * by using the natural wrap point of an 8bit counter. */ #define AHC_TMODE_CMDS 256 /* Reset line assertion time in us */ #define AHC_BUSRESET_DELAY 25 /******************* Chip Characteristics/Operating Settings *****************/ /* * Chip Type * The chip order is from least sophisticated to most sophisticated. */ typedef enum { AHC_NONE = 0x0000, AHC_CHIPID_MASK = 0x00FF, AHC_AIC7770 = 0x0001, AHC_AIC7850 = 0x0002, AHC_AIC7855 = 0x0003, AHC_AIC7859 = 0x0004, AHC_AIC7860 = 0x0005, AHC_AIC7870 = 0x0006, AHC_AIC7880 = 0x0007, AHC_AIC7895 = 0x0008, AHC_AIC7895C = 0x0009, AHC_AIC7890 = 0x000a, AHC_AIC7896 = 0x000b, AHC_AIC7892 = 0x000c, AHC_AIC7899 = 0x000d, AHC_VL = 0x0100, /* Bus type VL */ AHC_EISA = 0x0200, /* Bus type EISA */ AHC_PCI = 0x0400, /* Bus type PCI */ AHC_BUS_MASK = 0x0F00 } ahc_chip; /* * Features available in each chip type. */ typedef enum { AHC_FENONE = 0x00000, AHC_ULTRA = 0x00001, /* Supports 20MHz Transfers */ AHC_ULTRA2 = 0x00002, /* Supports 40MHz Transfers */ AHC_WIDE = 0x00004, /* Wide Channel */ AHC_TWIN = 0x00008, /* Twin Channel */ AHC_MORE_SRAM = 0x00010, /* 80 bytes instead of 64 */ AHC_CMD_CHAN = 0x00020, /* Has a Command DMA Channel */ AHC_QUEUE_REGS = 0x00040, /* Has Queue management registers */ AHC_SG_PRELOAD = 0x00080, /* Can perform auto-SG preload */ AHC_SPIOCAP = 0x00100, /* Has a Serial Port I/O Cap Register */ AHC_MULTI_TID = 0x00200, /* Has bitmask of TIDs for select-in */ AHC_HS_MAILBOX = 0x00400, /* Has HS_MAILBOX register */ AHC_DT = 0x00800, /* Double Transition transfers */ AHC_NEW_TERMCTL = 0x01000, /* Newer termination scheme */ AHC_MULTI_FUNC = 0x02000, /* Multi-Function Twin Channel Device */ AHC_LARGE_SCBS = 0x04000, /* 64byte SCBs */ AHC_AUTORATE = 0x08000, /* Automatic update of SCSIRATE/OFFSET*/ AHC_AUTOPAUSE = 0x10000, /* Automatic pause on register access */ AHC_TARGETMODE = 0x20000, /* Has tested target mode support */ AHC_MULTIROLE = 0x40000, /* Space for two roles at a time */ AHC_REMOVABLE = 0x80000, /* Hot-Swap supported */ AHC_AIC7770_FE = AHC_FENONE, /* * The real 7850 does not support Ultra modes, but there are * several cards that use the generic 7850 PCI ID even though * they are using an Ultra capable chip (7859/7860). We start * out with the AHC_ULTRA feature set and then check the DEVSTATUS * register to determine if the capability is really present. */ AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, AHC_AIC7860_FE = AHC_AIC7850_FE, - AHC_AIC7870_FE = AHC_TARGETMODE, + AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE, AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, /* * Although we have space for both the initiator and * target roles on ULTRA2 chips, we currently disable * the initiator role to allow multi-scsi-id target mode * configurations. We can only respond on the same SCSI * ID as our initiator role if we allow initiator operation. * At some point, we should add a configuration knob to * allow both roles to be loaded. */ AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2 |AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_MULTI_TID |AHC_HS_MAILBOX|AHC_NEW_TERMCTL|AHC_LARGE_SCBS |AHC_TARGETMODE, AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_DT|AHC_AUTORATE|AHC_AUTOPAUSE, AHC_AIC7895_FE = AHC_AIC7880_FE|AHC_MORE_SRAM|AHC_AUTOPAUSE |AHC_CMD_CHAN|AHC_MULTI_FUNC|AHC_LARGE_SCBS, AHC_AIC7895C_FE = AHC_AIC7895_FE|AHC_MULTI_TID, AHC_AIC7896_FE = AHC_AIC7890_FE|AHC_MULTI_FUNC, AHC_AIC7899_FE = AHC_AIC7892_FE|AHC_MULTI_FUNC } ahc_feature; /* * Bugs in the silicon that we work around in software. */ typedef enum { AHC_BUGNONE = 0x00, /* * On all chips prior to the U2 product line, * the WIDEODD S/G segment feature does not * work during scsi->HostBus transfers. */ AHC_TMODE_WIDEODD_BUG = 0x01, /* * On the aic7890/91 Rev 0 chips, the autoflush * feature does not work. A manual flush of * the DMA FIFO is required. */ AHC_AUTOFLUSH_BUG = 0x02, /* * On many chips, cacheline streaming does not work. */ AHC_CACHETHEN_BUG = 0x04, /* * On the aic7896/97 chips, cacheline * streaming must be enabled. */ AHC_CACHETHEN_DIS_BUG = 0x08, /* * PCI 2.1 Retry failure on non-empty data fifo. */ AHC_PCI_2_1_RETRY_BUG = 0x10, /* * Controller does not handle cacheline residuals * properly on S/G segments if PCI MWI instructions * are allowed. */ AHC_PCI_MWI_BUG = 0x20, /* * An SCB upload using the SCB channel's * auto array entry copy feature may * corrupt data. This appears to only * occur on 66MHz systems. */ AHC_SCBCHAN_UPLOAD_BUG = 0x40 } ahc_bug; /* * Configuration specific settings. * The driver determines these settings by probing the * chip/controller's configuration. */ typedef enum { AHC_FNONE = 0x000, AHC_PRIMARY_CHANNEL = 0x003, /* * The channel that should * be probed first. */ AHC_USEDEFAULTS = 0x004, /* * For cards without an seeprom * or a BIOS to initialize the chip's * SRAM, we use the default target * settings. */ AHC_SEQUENCER_DEBUG = 0x008, AHC_SHARED_SRAM = 0x010, AHC_LARGE_SEEPROM = 0x020, /* Uses C56_66 not C46 */ AHC_RESET_BUS_A = 0x040, AHC_RESET_BUS_B = 0x080, AHC_EXTENDED_TRANS_A = 0x100, AHC_EXTENDED_TRANS_B = 0x200, AHC_TERM_ENB_A = 0x400, AHC_TERM_ENB_B = 0x800, AHC_INITIATORROLE = 0x1000, /* * Allow initiator operations on * this controller. */ AHC_TARGETROLE = 0x2000, /* * Allow target operations on this * controller. */ AHC_NEWEEPROM_FMT = 0x4000, AHC_RESOURCE_SHORTAGE = 0x8000, AHC_TQINFIFO_BLOCKED = 0x10000, /* Blocked waiting for ATIOs */ AHC_INT50_SPEEDFLEX = 0x20000, /* * Internal 50pin connector * sits behind an aic3860 */ AHC_SCB_BTT = 0x40000, /* * The busy targets table is * stored in SCB space rather * than SRAM. */ AHC_BIOS_ENABLED = 0x80000, AHC_ALL_INTERRUPTS = 0x100000, AHC_PAGESCBS = 0x400000, /* Enable SCB paging */ AHC_EDGE_INTERRUPT = 0x800000, /* Device uses edge triggered ints */ AHC_39BIT_ADDRESSING = 0x1000000, /* Use 39 bit addressing scheme. */ AHC_LSCBS_ENABLED = 0x2000000, /* 64Byte SCBs enabled */ AHC_SCB_CONFIG_USED = 0x4000000, /* No SEEPROM but SCB2 had info. */ AHC_NO_BIOS_INIT = 0x8000000, /* No BIOS left over settings. */ AHC_DISABLE_PCI_PERR = 0x10000000, - AHC_HAS_TERM_LOGIC = 0x20000000 + AHC_HAS_TERM_LOGIC = 0x20000000, + AHC_SHUTDOWN_RECOVERY = 0x40000000 /* Terminate recovery thread. */ } ahc_flag; /************************* Hardware SCB Definition ***************************/ /* * The driver keeps up to MAX_SCB scb structures per card in memory. The SCB * consists of a "hardware SCB" mirroring the fields available on the card * and additional information the kernel stores for each transaction. * * To minimize space utilization, a portion of the hardware scb stores * different data during different portions of a SCSI transaction. * As initialized by the host driver for the initiator role, this area * contains the SCSI cdb (or a pointer to the cdb) to be executed. After * the cdb has been presented to the target, this area serves to store * residual transfer information and the SCSI status byte. * For the target role, the contents of this area do not change, but * still serve a different purpose than for the initiator role. See * struct target_data for details. */ /* * Status information embedded in the shared poriton of * an SCB after passing the cdb to the target. The kernel * driver will only read this data for transactions that * complete abnormally (non-zero status byte). */ struct status_pkt { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sg_ptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* Standard SCSI status byte */ }; /* * Target mode version of the shared data SCB segment. */ struct target_data { uint32_t residual_datacnt; /* Residual in the current S/G seg */ uint32_t residual_sg_ptr; /* The next S/G for this transfer */ uint8_t scsi_status; /* SCSI status to give to initiator */ uint8_t target_phases; /* Bitmap of phases to execute */ uint8_t data_phase; /* Data-In or Data-Out */ uint8_t initiator_tag; /* Initiator's transaction tag */ }; +#define MAX_CDB_LEN 16 struct hardware_scb { /*0*/ union { /* * If the cdb is 12 bytes or less, we embed it directly * in the SCB. For longer cdbs, we embed the address * of the cdb payload as seen by the chip and a DMA * is used to pull it in. */ uint8_t cdb[12]; uint32_t cdb_ptr; struct status_pkt status; struct target_data tdata; } shared_data; /* * A word about residuals. * The scb is presented to the sequencer with the dataptr and datacnt * fields initialized to the contents of the first S/G element to * transfer. The sgptr field is initialized to the bus address for * the S/G element that follows the first in the in core S/G array * or'ed with the SG_FULL_RESID flag. Sgptr may point to an invalid * S/G entry for this transfer (single S/G element transfer with the * first elements address and length preloaded in the dataptr/datacnt * fields). If no transfer is to occur, sgptr is set to SG_LIST_NULL. * The SG_FULL_RESID flag ensures that the residual will be correctly * noted even if no data transfers occur. Once the data phase is entered, * the residual sgptr and datacnt are loaded from the sgptr and the * datacnt fields. After each S/G element's dataptr and length are * loaded into the hardware, the residual sgptr is advanced. After * each S/G element is expired, its datacnt field is checked to see * if the LAST_SEG flag is set. If so, SG_LIST_NULL is set in the * residual sg ptr and the transfer is considered complete. If the * sequencer determines that there is a residual in the tranfer, it * will set the SG_RESID_VALID flag in sgptr and dma the scb back into * host memory. To sumarize: * * Sequencer: * o A residual has occurred if SG_FULL_RESID is set in sgptr, * or residual_sgptr does not have SG_LIST_NULL set. * * o We are transfering the last segment if residual_datacnt has * the SG_LAST_SEG flag set. * * Host: * o A residual has occurred if a completed scb has the * SG_RESID_VALID flag set. * * o residual_sgptr and sgptr refer to the "next" sg entry * and so may point beyond the last valid sg entry for the * transfer. */ /*12*/ uint32_t dataptr; /*16*/ uint32_t datacnt; /* * Byte 3 (numbered from 0) of * the datacnt is really the * 4th byte in that data address. */ /*20*/ uint32_t sgptr; #define SG_PTR_MASK 0xFFFFFFF8 /*24*/ uint8_t control; /* See SCB_CONTROL in aic7xxx.reg for details */ /*25*/ uint8_t scsiid; /* what to load in the SCSIID register */ /*26*/ uint8_t lun; /*27*/ uint8_t tag; /* * Index into our kernel SCB array. * Also used as the tag for tagged I/O */ /*28*/ uint8_t cdb_len; /*29*/ uint8_t scsirate; /* Value for SCSIRATE register */ /*30*/ uint8_t scsioffset; /* Value for SCSIOFFSET register */ /*31*/ uint8_t next; /* * Used for threading SCBs in the * "Waiting for Selection" and * "Disconnected SCB" lists down * in the sequencer. */ /*32*/ uint8_t cdb32[32]; /* * CDB storage for cdbs of size * 13->32. We store them here * because hardware scbs are * allocated from DMA safe * memory so we are guaranteed * the controller can access * this data. */ }; /************************ Kernel SCB Definitions ******************************/ /* * Some fields of the SCB are OS dependent. Here we collect the * definitions for elements that all OS platforms need to include * in there SCB definition. */ /* * Definition of a scatter/gather element as transfered to the controller. * The aic7xxx chips only support a 24bit length. We use the top byte of * the length to store additional address bits and a flag to indicate * that a given segment terminates the transfer. This gives us an * addressable range of 512GB on machines with 64bit PCI or with chips * that can support dual address cycles on 32bit PCI busses. */ struct ahc_dma_seg { uint32_t addr; uint32_t len; #define AHC_DMA_LAST_SEG 0x80000000 #define AHC_SG_HIGH_ADDR_MASK 0x7F000000 #define AHC_SG_LEN_MASK 0x00FFFFFF }; struct sg_map_node { bus_dmamap_t sg_dmamap; bus_addr_t sg_physaddr; struct ahc_dma_seg* sg_vaddr; SLIST_ENTRY(sg_map_node) links; }; /* * The current state of this SCB. */ typedef enum { - SCB_FREE = 0x0000, + SCB_FLAG_NONE = 0x0000, SCB_OTHERTCL_TIMEOUT = 0x0002,/* * Another device was active * during the first timeout for * this SCB so we gave ourselves * an additional timeout period * in case it was hogging the * bus. */ SCB_DEVICE_RESET = 0x0004, SCB_SENSE = 0x0008, SCB_CDB32_PTR = 0x0010, SCB_RECOVERY_SCB = 0x0020, SCB_AUTO_NEGOTIATE = 0x0040,/* Negotiate to achieve goal. */ SCB_NEGOTIATE = 0x0080,/* Negotiation forced for command. */ SCB_ABORT = 0x0100, SCB_UNTAGGEDQ = 0x0200, SCB_ACTIVE = 0x0400, SCB_TARGET_IMMEDIATE = 0x0800, SCB_TRANSMISSION_ERROR = 0x1000,/* * We detected a parity or CRC * error that has effected the * payload of the command. This * flag is checked when normal * status is returned to catch * the case of a target not * responding to our attempt * to report the error. */ SCB_TARGET_SCB = 0x2000, - SCB_SILENT = 0x4000 /* + SCB_SILENT = 0x4000,/* * Be quiet about transmission type * errors. They are expected and we * don't want to upset the user. This * flag is typically used during DV. */ + SCB_TIMEDOUT = 0x8000 /* + * SCB has timed out and is on the + * timedout list. + */ } scb_flag; struct scb { struct hardware_scb *hscb; union { SLIST_ENTRY(scb) sle; TAILQ_ENTRY(scb) tqe; } links; LIST_ENTRY(scb) pending_links; - ahc_io_ctx_t io_ctx; + LIST_ENTRY(scb) timedout_links; + aic_io_ctx_t io_ctx; struct ahc_softc *ahc_softc; scb_flag flags; #ifndef __linux__ bus_dmamap_t dmamap; #endif struct scb_platform_data *platform_data; struct sg_map_node *sg_map; struct ahc_dma_seg *sg_list; bus_addr_t sg_list_phys; u_int sg_count;/* How full ahc_dma_seg is */ }; struct scb_data { SLIST_HEAD(, scb) free_scbs; /* * Pool of SCBs ready to be assigned * commands to execute. */ struct scb *scbindex[256]; /* * Mapping from tag to SCB. * As tag identifiers are an * 8bit value, we provide space * for all possible tag values. * Any lookups to entries at or * above AHC_SCB_MAX_ALLOC will * always fail. */ struct hardware_scb *hscbs; /* Array of hardware SCBs */ struct scb *scbarray; /* Array of kernel SCBs */ struct scsi_sense_data *sense; /* Per SCB sense data */ /* * "Bus" addresses of our data structures. */ bus_dma_tag_t hscb_dmat; /* dmat for our hardware SCB array */ bus_dmamap_t hscb_dmamap; bus_addr_t hscb_busaddr; bus_dma_tag_t sense_dmat; bus_dmamap_t sense_dmamap; bus_addr_t sense_busaddr; bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ SLIST_HEAD(, sg_map_node) sg_maps; uint8_t numscbs; uint8_t maxhscbs; /* Number of SCBs on the card */ uint8_t init_level; /* * How far we've initialized * this structure. */ }; /************************ Target Mode Definitions *****************************/ /* * Connection desciptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ uint8_t identify; /* Identify message */ uint8_t bytes[22]; /* * Bytes contains any additional message * bytes terminated by 0xFF. The remainder * is the cdb to execute. */ uint8_t cmd_valid; /* * When a command is complete, the firmware * will set cmd_valid to all bits set. * After the host has seen the command, * the bits are cleared. This allows us * to just peek at host memory to determine * if more work is complete. cmd_valid is on * an 8 byte boundary to simplify setting * it on aic7880 hardware which only has * limited direct access to the DMA FIFO. */ uint8_t pad[7]; }; /* * Number of events we can buffer up if we run out * of immediate notify ccbs. */ #define AHC_TMODE_EVENT_BUFFER_SIZE 8 struct ahc_tmode_event { uint8_t initiator_id; uint8_t event_type; /* MSG type or EVENT_TYPE_BUS_RESET */ #define EVENT_TYPE_BUS_RESET 0xFF uint8_t event_arg; }; /* * Per enabled lun target mode state. * As this state is directly influenced by the host OS'es target mode * environment, we let the OS module define it. Forward declare the * structure here so we can store arrays of them, etc. in OS neutral * data structures. */ #ifdef AHC_TARGET_MODE struct ahc_tmode_lstate { struct cam_path *path; struct ccb_hdr_slist accept_tios; struct ccb_hdr_slist immed_notifies; struct ahc_tmode_event event_buffer[AHC_TMODE_EVENT_BUFFER_SIZE]; uint8_t event_r_idx; uint8_t event_w_idx; }; #else struct ahc_tmode_lstate; #endif /******************** Transfer Negotiation Datastructures *********************/ #define AHC_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define AHC_TRANS_ACTIVE 0x03 /* Assume this target is on the bus */ #define AHC_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define AHC_TRANS_USER 0x08 /* Modify user negotiation settings */ #define AHC_WIDTH_UNKNOWN 0xFF #define AHC_PERIOD_UNKNOWN 0xFF #define AHC_OFFSET_UNKNOWN 0xFF #define AHC_PPR_OPTS_UNKNOWN 0xFF /* * Transfer Negotiation Information. */ struct ahc_transinfo { uint8_t protocol_version; /* SCSI Revision level */ uint8_t transport_version; /* SPI Revision level */ uint8_t width; /* Bus width */ uint8_t period; /* Sync rate factor */ uint8_t offset; /* Sync offset */ uint8_t ppr_options; /* Parallel Protocol Request options */ }; /* * Per-initiator current, goal and user transfer negotiation information. */ struct ahc_initiator_tinfo { uint8_t scsirate; /* Computed value for SCSIRATE reg */ struct ahc_transinfo curr; struct ahc_transinfo goal; struct ahc_transinfo user; }; /* * Per enabled target ID state. * Pointers to lun target state as well as sync/wide negotiation information * for each initiator<->target mapping. For the initiator role we pretend * that we are the target and the targets are the initiators since the * negotiation is the same regardless of role. */ struct ahc_tmode_tstate { struct ahc_tmode_lstate* enabled_luns[AHC_NUM_LUNS]; struct ahc_initiator_tinfo transinfo[AHC_NUM_TARGETS]; /* * Per initiator state bitmasks. */ uint16_t auto_negotiate;/* Auto Negotiation Required */ uint16_t ultraenb; /* Using ultra sync rate */ uint16_t discenable; /* Disconnection allowed */ uint16_t tagenable; /* Tagged Queuing allowed */ }; /* * Data structure for our table of allowed synchronous transfer rates. */ struct ahc_syncrate { u_int sxfr_u2; /* Value of the SXFR parameter for Ultra2+ Chips */ u_int sxfr; /* Value of the SXFR parameter for <= Ultra Chips */ #define ULTRA_SXFR 0x100 /* Rate Requires Ultra Mode set */ #define ST_SXFR 0x010 /* Rate Single Transition Only */ #define DT_SXFR 0x040 /* Rate Double Transition Only */ uint8_t period; /* Period to send to SCSI target */ char *rate; }; /* Safe and valid period for async negotiations. */ #define AHC_ASYNC_XFER_PERIOD 0x45 #define AHC_ULTRA2_XFER_PERIOD 0x0a /* * Indexes into our table of syncronous transfer rates. */ #define AHC_SYNCRATE_DT 0 #define AHC_SYNCRATE_ULTRA2 1 #define AHC_SYNCRATE_ULTRA 3 #define AHC_SYNCRATE_FAST 6 #define AHC_SYNCRATE_MAX AHC_SYNCRATE_DT #define AHC_SYNCRATE_MIN 13 /***************************** Lookup Tables **********************************/ /* * Phase -> name and message out response * to parity errors in each phase table. */ struct ahc_phase_table_entry { uint8_t phase; uint8_t mesg_out; /* Message response to parity errors */ char *phasemsg; }; /************************** Serial EEPROM Format ******************************/ struct seeprom_config { /* * Per SCSI ID Configuration Flags */ uint16_t device_flags[16]; /* words 0-15 */ #define CFXFER 0x0007 /* synchronous transfer rate */ #define CFSYNCH 0x0008 /* enable synchronous transfer */ #define CFDISC 0x0010 /* enable disconnection */ #define CFWIDEB 0x0020 /* wide bus device */ #define CFSYNCHISULTRA 0x0040 /* CFSYNCH is an ultra offset (2940AU)*/ #define CFSYNCSINGLE 0x0080 /* Single-Transition signalling */ #define CFSTART 0x0100 /* send start unit SCSI command */ #define CFINCBIOS 0x0200 /* include in BIOS scan */ #define CFRNFOUND 0x0400 /* report even if not found */ #define CFMULTILUNDEV 0x0800 /* Probe multiple luns in BIOS scan */ #define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */ #define CFWBCACHENOP 0xc000 /* Don't touch W-Behind Cache */ /* * BIOS Control Bits */ uint16_t bios_control; /* word 16 */ #define CFSUPREM 0x0001 /* support all removeable drives */ #define CFSUPREMB 0x0002 /* support removeable boot drives */ #define CFBIOSEN 0x0004 /* BIOS enabled */ #define CFBIOS_BUSSCAN 0x0008 /* Have the BIOS Scan the Bus */ #define CFSM2DRV 0x0010 /* support more than two drives */ #define CFSTPWLEVEL 0x0010 /* Termination level control */ #define CF284XEXTEND 0x0020 /* extended translation (284x cards) */ #define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */ #define CFTERM_MENU 0x0040 /* BIOS displays termination menu */ #define CFEXTEND 0x0080 /* extended translation enabled */ #define CFSCAMEN 0x0100 /* SCAM enable */ #define CFMSG_LEVEL 0x0600 /* BIOS Message Level */ #define CFMSG_VERBOSE 0x0000 #define CFMSG_SILENT 0x0200 #define CFMSG_DIAG 0x0400 #define CFBOOTCD 0x0800 /* Support Bootable CD-ROM */ /* UNUSED 0xff00 */ /* * Host Adapter Control Bits */ uint16_t adapter_control; /* word 17 */ #define CFAUTOTERM 0x0001 /* Perform Auto termination */ #define CFULTRAEN 0x0002 /* Ultra SCSI speed enable */ #define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */ #define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */ #define CFSTERM 0x0004 /* SCSI low byte termination */ #define CFWSTERM 0x0008 /* SCSI high byte termination */ #define CFSPARITY 0x0010 /* SCSI parity */ #define CF284XSTERM 0x0020 /* SCSI low byte term (284x cards) */ #define CFMULTILUN 0x0020 #define CFRESETB 0x0040 /* reset SCSI bus at boot */ #define CFCLUSTERENB 0x0080 /* Cluster Enable */ #define CFBOOTCHAN 0x0300 /* probe this channel first */ #define CFBOOTCHANSHIFT 8 #define CFSEAUTOTERM 0x0400 /* Ultra2 Perform secondary Auto Term*/ #define CFSELOWTERM 0x0800 /* Ultra2 secondary low term */ #define CFSEHIGHTERM 0x1000 /* Ultra2 secondary high term */ #define CFENABLEDV 0x4000 /* Perform Domain Validation*/ /* * Bus Release Time, Host Adapter ID */ uint16_t brtime_id; /* word 18 */ #define CFSCSIID 0x000f /* host adapter SCSI ID */ /* UNUSED 0x00f0 */ #define CFBRTIME 0xff00 /* bus release time */ /* * Maximum targets */ uint16_t max_targets; /* word 19 */ #define CFMAXTARG 0x00ff /* maximum targets */ #define CFBOOTLUN 0x0f00 /* Lun to boot from */ #define CFBOOTID 0xf000 /* Target to boot from */ uint16_t res_1[10]; /* words 20-29 */ uint16_t signature; /* Signature == 0x250 */ #define CFSIGNATURE 0x250 #define CFSIGNATURE2 0x300 uint16_t checksum; /* word 31 */ }; /**************************** Message Buffer *********************************/ typedef enum { MSG_TYPE_NONE = 0x00, MSG_TYPE_INITIATOR_MSGOUT = 0x01, MSG_TYPE_INITIATOR_MSGIN = 0x02, MSG_TYPE_TARGET_MSGOUT = 0x03, MSG_TYPE_TARGET_MSGIN = 0x04 } ahc_msg_type; typedef enum { MSGLOOP_IN_PROG, MSGLOOP_MSGCOMPLETE, MSGLOOP_TERMINATED } msg_loop_stat; /*********************** Software Configuration Structure *********************/ TAILQ_HEAD(scb_tailq, scb); struct ahc_aic7770_softc { /* * Saved register state used for chip_init(). */ uint8_t busspd; uint8_t bustime; }; struct ahc_pci_softc { /* * Saved register state used for chip_init(). */ uint32_t devconfig; uint16_t targcrccnt; uint8_t command; uint8_t csize_lattime; uint8_t optionmode; uint8_t crccontrol1; uint8_t dscommand0; uint8_t dspcistatus; uint8_t scbbaddr; uint8_t dff_thrsh; }; union ahc_bus_softc { struct ahc_aic7770_softc aic7770_softc; struct ahc_pci_softc pci_softc; }; typedef void (*ahc_bus_intr_t)(struct ahc_softc *); typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *); typedef int (*ahc_bus_suspend_t)(struct ahc_softc *); typedef int (*ahc_bus_resume_t)(struct ahc_softc *); typedef void ahc_callback_t (void *); struct ahc_softc { bus_space_tag_t tag; bus_space_handle_t bsh; #ifndef __linux__ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ #endif struct scb_data *scb_data; struct scb *next_queued_scb; /* * SCBs that have been sent to the controller */ LIST_HEAD(, scb) pending_scbs; /* + * SCBs whose timeout routine has been called. + */ + LIST_HEAD(, scb) timedout_scbs; + + /* * Counting lock for deferring the release of additional * untagged transactions from the untagged_queues. When * the lock is decremented to 0, all queues in the * untagged_queues array are run. */ u_int untagged_queue_lock; /* * Per-target queue of untagged-transactions. The * transaction at the head of the queue is the * currently pending untagged transaction for the * target. The driver only allows a single untagged * transaction per target. */ struct scb_tailq untagged_queues[AHC_NUM_TARGETS]; /* * Bus attachment specific data. */ union ahc_bus_softc bus_softc; /* * Platform specific data. */ struct ahc_platform_data *platform_data; /* * Platform specific device information. */ - ahc_dev_softc_t dev_softc; + aic_dev_softc_t dev_softc; /* * Bus specific device information. */ ahc_bus_intr_t bus_intr; /* * Bus specific initialization required * after a chip reset. */ ahc_bus_chip_init_t bus_chip_init; /* * Bus specific suspend routine. */ ahc_bus_suspend_t bus_suspend; /* * Bus specific resume routine. */ ahc_bus_resume_t bus_resume; /* * Target mode related state kept on a per enabled lun basis. * Targets that are not enabled will have null entries. * As an initiator, we keep one target entry for our initiator * ID to store our sync/wide transfer settings. */ struct ahc_tmode_tstate *enabled_targets[AHC_NUM_TARGETS]; /* * The black hole device responsible for handling requests for * disabled luns on enabled targets. */ struct ahc_tmode_lstate *black_hole; /* * Device instance currently on the bus awaiting a continue TIO * for a command that was not given the disconnect priveledge. */ struct ahc_tmode_lstate *pending_device; /* * Card characteristics */ ahc_chip chip; ahc_feature features; ahc_bug bugs; ahc_flag flags; struct seeprom_config *seep_config; /* Values to store in the SEQCTL register for pause and unpause */ uint8_t unpause; uint8_t pause; /* Command Queues */ uint8_t qoutfifonext; uint8_t qinfifonext; uint8_t *qoutfifo; uint8_t *qinfifo; /* Critical Section Data */ struct cs *critical_sections; u_int num_critical_sections; /* Links for chaining softcs */ TAILQ_ENTRY(ahc_softc) links; /* Channel Names ('A', 'B', etc.) */ char channel; char channel_b; /* Initiator Bus ID */ uint8_t our_id; uint8_t our_id_b; /* * PCI error detection. */ int unsolicited_ints; /* * Target incoming command FIFO. */ struct target_cmd *targetcmds; uint8_t tqinfifonext; /* * Cached copy of the sequencer control register. */ uint8_t seqctl; /* * Incoming and outgoing message handling. */ uint8_t send_msg_perror; ahc_msg_type msg_type; uint8_t msgout_buf[12];/* Message we are sending */ uint8_t msgin_buf[12];/* Message we are receiving */ u_int msgout_len; /* Length of message to send */ u_int msgout_index; /* Current index in msgout */ u_int msgin_index; /* Current index in msgin */ /* * Mapping information for data structures shared * between the sequencer and kernel. */ bus_dma_tag_t parent_dmat; bus_dma_tag_t shared_data_dmat; bus_dmamap_t shared_data_dmamap; bus_addr_t shared_data_busaddr; /* * Bus address of the one byte buffer used to * work-around a DMA bug for chips <= aic7880 * in target mode. */ bus_addr_t dma_bug_buf; /* Number of enabled target mode device on this card */ u_int enabled_luns; /* Initialization level of this data structure */ u_int init_level; /* PCI cacheline size. */ u_int pci_cachesize; /* * Count of parity errors we have seen as a target. * We auto-disable parity error checking after seeing * AHC_PCI_TARGET_PERR_THRESH number of errors. */ u_int pci_target_perr_count; #define AHC_PCI_TARGET_PERR_THRESH 10 /* Maximum number of sequencer instructions supported. */ u_int instruction_ram_size; /* Per-Unit descriptive information */ const char *description; char *name; int unit; /* Selection Timer settings */ int seltime; int seltime_b; uint16_t user_discenable;/* Disconnection allowed */ uint16_t user_tagenable;/* Tagged Queuing allowed */ }; TAILQ_HEAD(ahc_softc_tailq, ahc_softc); extern struct ahc_softc_tailq ahc_tailq; /************************ Active Device Information ***************************/ typedef enum { ROLE_UNKNOWN, ROLE_INITIATOR, ROLE_TARGET } role_t; struct ahc_devinfo { int our_scsiid; int target_offset; uint16_t target_mask; u_int target; u_int lun; char channel; role_t role; /* * Only guaranteed to be correct if not * in the busfree state. */ }; /****************************** PCI Structures ********************************/ +#define AHC_PCI_IOADDR PCIR_BAR(0) /* I/O Address */ +#define AHC_PCI_MEMADDR PCIR_BAR(1) /* Mem I/O Address */ + typedef int (ahc_device_setup_t)(struct ahc_softc *); struct ahc_pci_identity { uint64_t full_id; uint64_t id_mask; char *name; ahc_device_setup_t *setup; }; extern struct ahc_pci_identity ahc_pci_ident_table[]; extern const u_int ahc_num_pci_devs; /***************************** VL/EISA Declarations ***************************/ struct aic7770_identity { uint32_t full_id; uint32_t id_mask; const char *name; ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 /*************************** Function Declarations ****************************/ /******************************************************************************/ u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl); void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl); void ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int busyid); /***************************** PCI Front End *********************************/ -struct ahc_pci_identity *ahc_find_pci_device(ahc_dev_softc_t); +struct ahc_pci_identity *ahc_find_pci_device(aic_dev_softc_t); int ahc_pci_config(struct ahc_softc *, struct ahc_pci_identity *); int ahc_pci_test_register_access(struct ahc_softc *); /*************************** EISA/VL Front End ********************************/ struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); /************************** SCB and SCB queue management **********************/ int ahc_probe_scbs(struct ahc_softc *); void ahc_run_untagged_queues(struct ahc_softc *ahc); void ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue); void ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb); int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); /****************************** Initialization ********************************/ struct ahc_softc *ahc_alloc(void *platform_arg, char *name); int ahc_softc_init(struct ahc_softc *); void ahc_controller_info(struct ahc_softc *ahc, char *buf); int ahc_chip_init(struct ahc_softc *ahc); int ahc_init(struct ahc_softc *ahc); void ahc_intr_enable(struct ahc_softc *ahc, int enable); void ahc_pause_and_flushwork(struct ahc_softc *ahc); int ahc_suspend(struct ahc_softc *ahc); int ahc_resume(struct ahc_softc *ahc); void ahc_softc_insert(struct ahc_softc *); struct ahc_softc *ahc_find_softc(struct ahc_softc *ahc); void ahc_set_unit(struct ahc_softc *, int); void ahc_set_name(struct ahc_softc *, char *); void ahc_alloc_scbs(struct ahc_softc *ahc); void ahc_free(struct ahc_softc *ahc); int ahc_reset(struct ahc_softc *ahc, int reinit); void ahc_shutdown(void *arg); /*************************** Interrupt Services *******************************/ void ahc_clear_intstat(struct ahc_softc *ahc); void ahc_run_qoutfifo(struct ahc_softc *ahc); #ifdef AHC_TARGET_MODE void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused); #endif void ahc_handle_brkadrint(struct ahc_softc *ahc); void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat); void ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat); void ahc_clear_critical_section(struct ahc_softc *ahc); /***************************** Error Recovery *********************************/ typedef enum { SEARCH_COMPLETE, SEARCH_COUNT, SEARCH_REMOVE } ahc_search_action; int ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahc_search_action action); int ahc_search_untagged_queues(struct ahc_softc *ahc, - ahc_io_ctx_t ctx, + aic_io_ctx_t ctx, int target, char channel, int lun, uint32_t status, ahc_search_action action); int ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, int stop_on_first, int remove, int save_state); void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb); int ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset); int ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); void ahc_restart(struct ahc_softc *ahc); void ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb); +void ahc_timeout(struct scb *scb); +void ahc_recover_commands(struct ahc_softc *ahc); /*************************** Utility Functions ********************************/ struct ahc_phase_table_entry* ahc_lookup_phase_entry(int phase); void ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role); /************************** Transfer Negotiation ******************************/ struct ahc_syncrate* ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, u_int *ppr_options, u_int maxsync); u_int ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync); void ahc_validate_offset(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, struct ahc_syncrate *syncrate, u_int *offset, int wide, role_t role); void ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, u_int *bus_width, role_t role); /* * Negotiation types. These are used to qualify if we should renegotiate * even if our goal and current transport parameters are identical. */ typedef enum { AHC_NEG_TO_GOAL, /* Renegotiate only if goal and curr differ. */ AHC_NEG_IF_NON_ASYNC, /* Renegotiate so long as goal is non-async. */ AHC_NEG_ALWAYS /* Renegotiat even if goal is async. */ } ahc_neg_type; int ahc_update_neg_request(struct ahc_softc*, struct ahc_devinfo*, struct ahc_tmode_tstate*, struct ahc_initiator_tinfo*, ahc_neg_type); void ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, u_int width, u_int type, int paused); void ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, struct ahc_syncrate *syncrate, u_int period, u_int offset, u_int ppr_options, u_int type, int paused); typedef enum { AHC_QUEUE_NONE, AHC_QUEUE_BASIC, AHC_QUEUE_TAGGED } ahc_queue_alg; void ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, ahc_queue_alg alg); /**************************** Target Mode *************************************/ #ifdef AHC_TARGET_MODE void ahc_send_lstate_events(struct ahc_softc *, struct ahc_tmode_lstate *); void ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb); cam_status ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, struct ahc_tmode_tstate **tstate, struct ahc_tmode_lstate **lstate, int notfound_failure); #ifndef AHC_TMODE_ENABLE #define AHC_TMODE_ENABLE 0 #endif #endif /******************************* Debug ***************************************/ #ifdef AHC_DEBUG extern uint32_t ahc_debug; #define AHC_SHOW_MISC 0x0001 #define AHC_SHOW_SENSE 0x0002 #define AHC_DUMP_SEEPROM 0x0004 #define AHC_SHOW_TERMCTL 0x0008 #define AHC_SHOW_MEMORY 0x0010 #define AHC_SHOW_MESSAGES 0x0020 #define AHC_SHOW_DV 0x0040 #define AHC_SHOW_SELTO 0x0080 #define AHC_SHOW_QFULL 0x0200 #define AHC_SHOW_QUEUE 0x0400 #define AHC_SHOW_TQIN 0x0800 #define AHC_SHOW_MASKED_ERRORS 0x1000 #define AHC_DEBUG_SEQUENCER 0x2000 #endif void ahc_print_scb(struct scb *scb); void ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *dev); void ahc_dump_card_state(struct ahc_softc *ahc); int ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point); /******************************* SEEPROM *************************************/ int ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd); void ahc_release_seeprom(struct seeprom_descriptor *sd); #endif /* _AIC7XXX_H_ */ Index: stable/4/sys/dev/aic7xxx/aic7xxx.reg =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx.reg (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx.reg (revision 125849) @@ -1,1594 +1,1594 @@ /* * Aic7xxx register and scratch ram definitions. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ -VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $" +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $" /* * This file is processed by the aic7xxx_asm utility for use in assembling * firmware for the aic7xxx family of SCSI host adapters as well as to generate * a C header file for use in the kernel portion of the Aic7xxx driver. * * All page numbers refer to the Adaptec AIC-7770 Data Book available from * Adaptec's Technical Documents Department 1-800-934-2766 */ /* * SCSI Sequence Control (p. 3-11). * Each bit, when set starts a specific SCSI sequence on the bus */ register SCSISEQ { address 0x000 access_mode RW field TEMODE 0x80 field ENSELO 0x40 field ENSELI 0x20 field ENRSELI 0x10 field ENAUTOATNO 0x08 field ENAUTOATNI 0x04 field ENAUTOATNP 0x02 field SCSIRSTO 0x01 } /* * SCSI Transfer Control 0 Register (pp. 3-13). * Controls the SCSI module data path. */ register SXFRCTL0 { address 0x001 access_mode RW field DFON 0x80 field DFPEXP 0x40 field FAST20 0x20 field CLRSTCNT 0x10 field SPIOEN 0x08 field SCAMEN 0x04 field CLRCHN 0x02 } /* * SCSI Transfer Control 1 Register (pp. 3-14,15). * Controls the SCSI module data path. */ register SXFRCTL1 { address 0x002 access_mode RW field BITBUCKET 0x80 field SWRAPEN 0x40 field ENSPCHK 0x20 mask STIMESEL 0x18 field ENSTIMER 0x04 field ACTNEGEN 0x02 field STPWEN 0x01 /* Powered Termination */ } /* * SCSI Control Signal Read Register (p. 3-15). * Reads the actual state of the SCSI bus pins */ register SCSISIGI { address 0x003 access_mode RO field CDI 0x80 field IOI 0x40 field MSGI 0x20 field ATNI 0x10 field SELI 0x08 field BSYI 0x04 field REQI 0x02 field ACKI 0x01 /* * Possible phases in SCSISIGI */ mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_DATAOUT_DT P_DATAOUT|MSGI mask P_DATAIN_DT P_DATAIN|MSGI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI } /* * SCSI Control Signal Write Register (p. 3-16). * Writing to this register modifies the control signals on the bus. Only * those signals that are allowed in the current mode (Initiator/Target) are * asserted. */ register SCSISIGO { address 0x003 access_mode WO field CDO 0x80 field IOO 0x40 field MSGO 0x20 field ATNO 0x10 field SELO 0x08 field BSYO 0x04 field REQO 0x02 field ACKO 0x01 /* * Possible phases to write into SCSISIG0 */ mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI } /* * SCSI Rate Control (p. 3-17). * Contents of this register determine the Synchronous SCSI data transfer * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the * SOFS (3:0) bits disables synchronous data transfers. Any offset value * greater than 0 enables synchronous transfers. */ register SCSIRATE { address 0x004 access_mode RW field WIDEXFER 0x80 /* Wide transfer control */ field ENABLE_CRC 0x40 /* CRC for D-Phases */ field SINGLE_EDGE 0x10 /* Disable DT Transfers */ mask SXFR 0x70 /* Sync transfer rate */ mask SXFR_ULTRA2 0x0f /* Sync transfer rate */ mask SOFS 0x0f /* Sync offset */ } /* * SCSI ID (p. 3-18). * Contains the ID of the board and the current target on the * selected channel. */ register SCSIID { address 0x005 access_mode RW mask TID 0xf0 /* Target ID mask */ mask TWIN_TID 0x70 field TWIN_CHNLB 0x80 mask OID 0x0f /* Our ID mask */ /* * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book) * The aic7890/91 allow an offset of up to 127 transfers in both wide * and narrow mode. */ alias SCSIOFFSET mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ } /* * SCSI Latched Data (p. 3-19). * Read/Write latches used to transfer data on the SCSI bus during * Automatic or Manual PIO mode. SCSIDATH can be used for the * upper byte of a 16bit wide asynchronouse data phase transfer. */ register SCSIDATL { address 0x006 access_mode RW } register SCSIDATH { address 0x007 access_mode RW } /* * SCSI Transfer Count (pp. 3-19,20) * These registers count down the number of bytes transferred * across the SCSI bus. The counter is decremented only once * the data has been safely transferred. SDONE in SSTAT0 is * set when STCNT goes to 0 */ register STCNT { address 0x008 size 3 access_mode RW } /* ALT_MODE registers (Ultra2 and Ultra160 chips) */ register SXFRCTL2 { address 0x013 access_mode RW field AUTORSTDIS 0x10 field CMDDMAEN 0x08 mask ASYNC_SETUP 0x07 } /* ALT_MODE register on Ultra160 chips */ register OPTIONMODE { address 0x008 access_mode RW field AUTORATEEN 0x80 field AUTOACKEN 0x40 field ATNMGMNTEN 0x20 field BUSFREEREV 0x10 field EXPPHASEDIS 0x08 field SCSIDATL_IMGEN 0x04 field AUTO_MSGOUT_DE 0x02 field DIS_MSGIN_DUALEDGE 0x01 mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE } /* ALT_MODE register on Ultra160 chips */ register TARGCRCCNT { address 0x00a size 2 access_mode RW } /* * Clear SCSI Interrupt 0 (p. 3-20) * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. */ register CLRSINT0 { address 0x00b access_mode WO field CLRSELDO 0x40 field CLRSELDI 0x20 field CLRSELINGO 0x10 field CLRSWRAP 0x08 field CLRIOERR 0x08 /* Ultra2 Only */ field CLRSPIORDY 0x02 } /* * SCSI Status 0 (p. 3-21) * Contains one set of SCSI Interrupt codes * These are most likely of interest to the sequencer */ register SSTAT0 { address 0x00b access_mode RO field TARGET 0x80 /* Board acting as target */ field SELDO 0x40 /* Selection Done */ field SELDI 0x20 /* Board has been selected */ field SELINGO 0x10 /* Selection In Progress */ field SWRAP 0x08 /* 24bit counter wrap */ field IOERR 0x08 /* LVD Tranceiver mode changed */ field SDONE 0x04 /* STCNT = 0x000000 */ field SPIORDY 0x02 /* SCSI PIO Ready */ field DMADONE 0x01 /* DMA transfer completed */ } /* * Clear SCSI Interrupt 1 (p. 3-23) * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. */ register CLRSINT1 { address 0x00c access_mode WO field CLRSELTIMEO 0x80 field CLRATNO 0x40 field CLRSCSIRSTI 0x20 field CLRBUSFREE 0x08 field CLRSCSIPERR 0x04 field CLRPHASECHG 0x02 field CLRREQINIT 0x01 } /* * SCSI Status 1 (p. 3-24) */ register SSTAT1 { address 0x00c access_mode RO field SELTO 0x80 field ATNTARG 0x40 field SCSIRSTI 0x20 field PHASEMIS 0x10 field BUSFREE 0x08 field SCSIPERR 0x04 field PHASECHG 0x02 field REQINIT 0x01 } /* * SCSI Status 2 (pp. 3-25,26) */ register SSTAT2 { address 0x00d access_mode RO field OVERRUN 0x80 field SHVALID 0x40 /* Shaddow Layer non-zero */ field EXP_ACTIVE 0x10 /* SCSI Expander Active */ field CRCVALERR 0x08 /* CRC doesn't match (U3 only) */ field CRCENDERR 0x04 /* No terminal CRC packet (U3 only) */ field CRCREQERR 0x02 /* Illegal CRC packet req (U3 only) */ field DUAL_EDGE_ERR 0x01 /* Incorrect data phase (U3 only) */ mask SFCNT 0x1f } /* * SCSI Status 3 (p. 3-26) */ register SSTAT3 { address 0x00e access_mode RO mask SCSICNT 0xf0 mask OFFCNT 0x0f mask U2OFFCNT 0x7f } /* * SCSI ID for the aic7890/91 chips */ register SCSIID_ULTRA2 { address 0x00f access_mode RW mask TID 0xf0 /* Target ID mask */ mask OID 0x0f /* Our ID mask */ } /* * SCSI Interrupt Mode 1 (p. 3-28) * Setting any bit will enable the corresponding function * in SIMODE0 to interrupt via the IRQ pin. */ register SIMODE0 { address 0x010 access_mode RW field ENSELDO 0x40 field ENSELDI 0x20 field ENSELINGO 0x10 field ENSWRAP 0x08 field ENIOERR 0x08 /* LVD Tranceiver mode changes */ field ENSDONE 0x04 field ENSPIORDY 0x02 field ENDMADONE 0x01 } /* * SCSI Interrupt Mode 1 (pp. 3-28,29) * Setting any bit will enable the corresponding function * in SIMODE1 to interrupt via the IRQ pin. */ register SIMODE1 { address 0x011 access_mode RW field ENSELTIMO 0x80 field ENATNTARG 0x40 field ENSCSIRST 0x20 field ENPHASEMIS 0x10 field ENBUSFREE 0x08 field ENSCSIPERR 0x04 field ENPHASECHG 0x02 field ENREQINIT 0x01 } /* * SCSI Data Bus (High) (p. 3-29) * This register reads data on the SCSI Data bus directly. */ register SCSIBUSL { address 0x012 access_mode RW } register SCSIBUSH { address 0x013 access_mode RW } /* * SCSI/Host Address (p. 3-30) * These registers hold the host address for the byte about to be * transferred on the SCSI bus. They are counted up in the same * manner as STCNT is counted down. SHADDR should always be used * to determine the address of the last byte transferred since HADDR * can be skewed by write ahead. */ register SHADDR { address 0x014 size 4 access_mode RO } /* * Selection Timeout Timer (p. 3-30) */ register SELTIMER { address 0x018 access_mode RW field STAGE6 0x20 field STAGE5 0x10 field STAGE4 0x08 field STAGE3 0x04 field STAGE2 0x02 field STAGE1 0x01 alias TARGIDIN } /* * Selection/Reselection ID (p. 3-31) * Upper four bits are the device id. The ONEBIT is set when the re/selecting * device did not set its own ID. */ register SELID { address 0x019 access_mode RW mask SELID_MASK 0xf0 field ONEBIT 0x08 } register SCAMCTL { address 0x01a access_mode RW field ENSCAMSELO 0x80 field CLRSCAMSELID 0x40 field ALTSTIM 0x20 field DFLTTID 0x10 mask SCAMLVL 0x03 } /* * Target Mode Selecting in ID bitmask (aic7890/91/96/97) */ register TARGID { address 0x01b size 2 access_mode RW } /* * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book) * Indicates if external logic has been attached to the chip to * perform the tasks of accessing a serial eeprom, testing termination * strength, and performing cable detection. On the aic7860, most of * these features are handled on chip, but on the aic7855 an attached * aic3800 does the grunt work. */ register SPIOCAP { address 0x01b access_mode RW field SOFT1 0x80 field SOFT0 0x40 field SOFTCMDEN 0x20 field EXT_BRDCTL 0x10 /* External Board control */ field SEEPROM 0x08 /* External serial eeprom logic */ field EEPROM 0x04 /* Writable external BIOS ROM */ field ROM 0x02 /* Logic for accessing external ROM */ field SSPIOCPS 0x01 /* Termination and cable detection */ } register BRDCTL { address 0x01d field BRDDAT7 0x80 field BRDDAT6 0x40 field BRDDAT5 0x20 field BRDSTB 0x10 field BRDCS 0x08 field BRDRW 0x04 field BRDCTL1 0x02 field BRDCTL0 0x01 /* 7890 Definitions */ field BRDDAT4 0x10 field BRDDAT3 0x08 field BRDDAT2 0x04 field BRDRW_ULTRA2 0x02 field BRDSTB_ULTRA2 0x01 } /* * Serial EEPROM Control (p. 4-92 in 7870 Databook) * Controls the reading and writing of an external serial 1-bit * EEPROM Device. In order to access the serial EEPROM, you must * first set the SEEMS bit that generates a request to the memory * port for access to the serial EEPROM device. When the memory * port is not busy servicing another request, it reconfigures * to allow access to the serial EEPROM. When this happens, SEERDY * gets set high to verify that the memory port access has been * granted. * * After successful arbitration for the memory port, the SEECS bit of * the SEECTL register is connected to the chip select. The SEECK, * SEEDO, and SEEDI are connected to the clock, data out, and data in * lines respectively. The SEERDY bit of SEECTL is useful in that it * gives us an 800 nsec timer. After a write to the SEECTL register, * the SEERDY goes high 800 nsec later. The one exception to this is * when we first request access to the memory port. The SEERDY goes * high to signify that access has been granted and, for this case, has * no implied timing. * * See 93cx6.c for detailed information on the protocol necessary to * read the serial EEPROM. */ register SEECTL { address 0x01e field EXTARBACK 0x80 field EXTARBREQ 0x40 field SEEMS 0x20 field SEERDY 0x10 field SEECS 0x08 field SEECK 0x04 field SEEDO 0x02 field SEEDI 0x01 } /* * SCSI Block Control (p. 3-32) * Controls Bus type and channel selection. In a twin channel configuration * addresses 0x00-0x1e are gated to the appropriate channel based on this * register. SELWIDE allows for the coexistence of 8bit and 16bit devices * on a wide bus. */ register SBLKCTL { address 0x01f access_mode RW field DIAGLEDEN 0x80 /* Aic78X0 only */ field DIAGLEDON 0x40 /* Aic78X0 only */ field AUTOFLUSHDIS 0x20 field SELBUSB 0x08 field ENAB40 0x08 /* LVD transceiver active */ field ENAB20 0x04 /* SE/HVD transceiver active */ field SELWIDE 0x02 field XCVR 0x01 /* External transceiver active */ } /* * Sequencer Control (p. 3-33) * Error detection mode and speed configuration */ register SEQCTL { address 0x060 access_mode RW field PERRORDIS 0x80 field PAUSEDIS 0x40 field FAILDIS 0x20 field FASTMODE 0x10 field BRKADRINTEN 0x08 field STEP 0x04 field SEQRESET 0x02 field LOADRAM 0x01 } /* * Sequencer RAM Data (p. 3-34) * Single byte window into the Scratch Ram area starting at the address * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write * four bytes in succession. The SEQADDRs will increment after the most * significant byte is written */ register SEQRAM { address 0x061 access_mode RW } /* * Sequencer Address Registers (p. 3-35) * Only the first bit of SEQADDR1 holds addressing information */ register SEQADDR0 { address 0x062 access_mode RW } register SEQADDR1 { address 0x063 access_mode RW mask SEQADDR1_MASK 0x01 } /* * Accumulator * We cheat by passing arguments in the Accumulator up to the kernel driver */ register ACCUM { address 0x064 access_mode RW accumulator } register SINDEX { address 0x065 access_mode RW sindex } register DINDEX { address 0x066 access_mode RW } register ALLONES { address 0x069 access_mode RO allones } register ALLZEROS { address 0x06a access_mode RO allzeros } register NONE { address 0x06a access_mode WO none } register FLAGS { address 0x06b access_mode RO field ZERO 0x02 field CARRY 0x01 } register SINDIR { address 0x06c access_mode RO } register DINDIR { address 0x06d access_mode WO } register FUNCTION1 { address 0x06e access_mode RW } register STACK { address 0x06f access_mode RO } const STACK_SIZE 4 /* * Board Control (p. 3-43) */ register BCTL { address 0x084 access_mode RW field ACE 0x08 field ENABLE 0x01 } /* * On the aic78X0 chips, Board Control is replaced by the DSCommand * register (p. 4-64) */ register DSCOMMAND0 { address 0x084 access_mode RW field CACHETHEN 0x80 /* Cache Threshold enable */ field DPARCKEN 0x40 /* Data Parity Check Enable */ field MPARCKEN 0x20 /* Memory Parity Check Enable */ field EXTREQLCK 0x10 /* External Request Lock */ /* aic7890/91/96/97 only */ field INTSCBRAMSEL 0x08 /* Internal SCB RAM Select */ field RAMPS 0x04 /* External SCB RAM Present */ field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */ field CIOPARCKEN 0x01 /* Internal bus parity error enable */ } register DSCOMMAND1 { address 0x085 access_mode RW mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */ field HADDLDSEL1 0x02 /* Host Address Load Select Bits */ field HADDLDSEL0 0x01 } /* * Bus On/Off Time (p. 3-44) aic7770 only */ register BUSTIME { address 0x085 access_mode RW mask BOFF 0xf0 mask BON 0x0f } /* * Bus Speed (p. 3-45) aic7770 only */ register BUSSPD { address 0x086 access_mode RW mask DFTHRSH 0xc0 mask STBOFF 0x38 mask STBON 0x07 mask DFTHRSH_100 0xc0 mask DFTHRSH_75 0x80 } /* aic7850/55/60/70/80/95 only */ register DSPCISTATUS { address 0x086 mask DFTHRSH_100 0xc0 } /* aic7890/91/96/97 only */ register HS_MAILBOX { address 0x086 mask HOST_MAILBOX 0xF0 mask SEQ_MAILBOX 0x0F mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */ } const HOST_MAILBOX_SHIFT 4 const SEQ_MAILBOX_SHIFT 0 /* * Host Control (p. 3-47) R/W * Overall host control of the device. */ register HCNTRL { address 0x087 access_mode RW field POWRDN 0x40 field SWINT 0x10 field IRQMS 0x08 field PAUSE 0x04 field INTEN 0x02 field CHIPRST 0x01 field CHIPRSTACK 0x01 } /* * Host Address (p. 3-48) * This register contains the address of the byte about * to be transferred across the host bus. */ register HADDR { address 0x088 size 4 access_mode RW } register HCNT { address 0x08c size 3 access_mode RW } /* * SCB Pointer (p. 3-49) * Gate one of the SCBs into the SCBARRAY window. */ register SCBPTR { address 0x090 access_mode RW } /* * Interrupt Status (p. 3-50) * Status for system interrupts */ register INTSTAT { address 0x091 access_mode RW field BRKADRINT 0x08 field SCSIINT 0x04 field CMDCMPLT 0x02 field SEQINT 0x01 mask BAD_PHASE SEQINT /* unknown scsi bus phase */ mask SEND_REJECT 0x10|SEQINT /* sending a message reject */ mask PROTO_VIOLATION 0x20|SEQINT /* SCSI protocol violation */ mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */ mask IGN_WIDE_RES 0x40|SEQINT /* Complex IGN Wide Res Msg */ mask PDATA_REINIT 0x50|SEQINT /* * Returned to data phase * that requires data * transfer pointers to be * recalculated from the * transfer residual. */ mask HOST_MSG_LOOP 0x60|SEQINT /* * The bus is ready for the * host to perform another * message transaction. This * mechanism is used for things * like sync/wide negotiation * that require a kernel based * message state engine. */ mask BAD_STATUS 0x70|SEQINT /* Bad status from target */ mask PERR_DETECTED 0x80|SEQINT /* * Either the phase_lock * or inb_next routine has * noticed a parity error. */ mask DATA_OVERRUN 0x90|SEQINT /* * Target attempted to write * beyond the bounds of its * command. */ mask MKMSG_FAILED 0xa0|SEQINT /* * Target completed command * without honoring our ATN * request to issue a message. */ mask MISSED_BUSFREE 0xb0|SEQINT /* * The sequencer never saw * the bus go free after * either a command complete * or disconnect message. */ mask SCB_MISMATCH 0xc0|SEQINT /* * Downloaded SCB's tag does * not match the entry we * intended to download. */ mask NO_FREE_SCB 0xd0|SEQINT /* * get_free_or_disc_scb failed. */ mask OUT_OF_RANGE 0xe0|SEQINT mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) } /* * Hard Error (p. 3-53) * Reporting of catastrophic errors. You usually cannot recover from * these without a full board reset. */ register ERROR { address 0x092 access_mode RO field CIOPARERR 0x80 /* Ultra2 only */ field PCIERRSTAT 0x40 /* PCI only */ field MPARERR 0x20 /* PCI only */ field DPARERR 0x10 /* PCI only */ field SQPARERR 0x08 field ILLOPCODE 0x04 field ILLSADDR 0x02 field ILLHADDR 0x01 } /* * Clear Interrupt Status (p. 3-52) */ register CLRINT { address 0x092 access_mode WO field CLRPARERR 0x10 /* PCI only */ field CLRBRKADRINT 0x08 field CLRSCSIINT 0x04 field CLRCMDINT 0x02 field CLRSEQINT 0x01 } register DFCNTRL { address 0x093 access_mode RW field PRELOADEN 0x80 /* aic7890 only */ field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 field FIFOFLUSH 0x02 field FIFORESET 0x01 } register DFSTATUS { address 0x094 access_mode RO field PRELOAD_AVAIL 0x80 field DFCACHETH 0x40 field FIFOQWDEMP 0x20 field MREQPEND 0x10 field HDONE 0x08 field DFTHRESH 0x04 field FIFOFULL 0x02 field FIFOEMP 0x01 } register DFWADDR { address 0x95 access_mode RW } register DFRADDR { address 0x97 access_mode RW } register DFDAT { address 0x099 access_mode RW } /* * SCB Auto Increment (p. 3-59) * Byte offset into the SCB Array and an optional bit to allow auto * incrementing of the address during download and upload operations */ register SCBCNT { address 0x09a access_mode RW field SCBAUTO 0x80 mask SCBCNT_MASK 0x1f } /* * Queue In FIFO (p. 3-60) * Input queue for queued SCBs (commands that the seqencer has yet to start) */ register QINFIFO { address 0x09b access_mode RW } /* * Queue In Count (p. 3-60) * Number of queued SCBs */ register QINCNT { address 0x09c access_mode RO } /* * Queue Out FIFO (p. 3-61) * Queue of SCBs that have completed and await the host */ register QOUTFIFO { address 0x09d access_mode WO } register CRCCONTROL1 { address 0x09d access_mode RW field CRCONSEEN 0x80 field CRCVALCHKEN 0x40 field CRCENDCHKEN 0x20 field CRCREQCHKEN 0x10 field TARGCRCENDEN 0x08 field TARGCRCCNTEN 0x04 } /* * Queue Out Count (p. 3-61) * Number of queued SCBs in the Out FIFO */ register QOUTCNT { address 0x09e access_mode RO } register SCSIPHASE { address 0x09e access_mode RO field STATUS_PHASE 0x20 field COMMAND_PHASE 0x10 field MSG_IN_PHASE 0x08 field MSG_OUT_PHASE 0x04 field DATA_IN_PHASE 0x02 field DATA_OUT_PHASE 0x01 mask DATA_PHASE_MASK 0x03 } /* * Special Function */ register SFUNCT { address 0x09f access_mode RW field ALT_MODE 0x80 } /* * SCB Definition (p. 5-4) */ scb { address 0x0a0 size 64 SCB_CDB_PTR { size 4 alias SCB_RESIDUAL_DATACNT alias SCB_CDB_STORE } SCB_RESIDUAL_SGPTR { size 4 } SCB_SCSI_STATUS { size 1 } SCB_TARGET_PHASES { size 1 } SCB_TARGET_DATA_DIR { size 1 } SCB_TARGET_ITAG { size 1 } SCB_DATAPTR { size 4 } SCB_DATACNT { /* * The last byte is really the high address bits for * the data address. */ size 4 field SG_LAST_SEG 0x80 /* In the fourth byte */ mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */ } SCB_SGPTR { size 4 field SG_RESID_VALID 0x04 /* In the first byte */ field SG_FULL_RESID 0x02 /* In the first byte */ field SG_LIST_NULL 0x01 /* In the first byte */ } SCB_CONTROL { size 1 field TARGET_SCB 0x80 field STATUS_RCVD 0x80 field DISCENB 0x40 field TAG_ENB 0x20 field MK_MESSAGE 0x10 field ULTRAENB 0x08 field DISCONNECTED 0x04 mask SCB_TAG_TYPE 0x03 } SCB_SCSIID { size 1 field TWIN_CHNLB 0x80 mask TWIN_TID 0x70 mask TID 0xf0 mask OID 0x0f } SCB_LUN { field SCB_XFERLEN_ODD 0x80 mask LID 0x3f size 1 } SCB_TAG { size 1 } SCB_CDB_LEN { size 1 } SCB_SCSIRATE { size 1 } SCB_SCSIOFFSET { size 1 } SCB_NEXT { size 1 } SCB_64_SPARE { size 16 } SCB_64_BTT { size 16 } } const SCB_UPLOAD_SIZE 32 const SCB_DOWNLOAD_SIZE 32 const SCB_DOWNLOAD_SIZE_64 48 const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */ /* --------------------- AHA-2840-only definitions -------------------- */ register SEECTL_2840 { address 0x0c0 access_mode RW field CS_2840 0x04 field CK_2840 0x02 field DO_2840 0x01 } register STATUS_2840 { address 0x0c1 access_mode RW field EEPROM_TF 0x80 mask BIOS_SEL 0x60 mask ADSEL 0x1e field DI_2840 0x01 } /* --------------------- AIC-7870-only definitions -------------------- */ register CCHADDR { address 0x0E0 size 8 } register CCHCNT { address 0x0E8 } register CCSGRAM { address 0x0E9 } register CCSGADDR { address 0x0EA } register CCSGCTL { address 0x0EB field CCSGDONE 0x80 field CCSGEN 0x08 field SG_FETCH_NEEDED 0x02 /* Bit used for software state */ field CCSGRESET 0x01 } register CCSCBCNT { address 0xEF } register CCSCBCTL { address 0x0EE field CCSCBDONE 0x80 field ARRDONE 0x40 /* SCB Array prefetch done */ field CCARREN 0x10 field CCSCBEN 0x08 field CCSCBDIR 0x04 field CCSCBRESET 0x01 } register CCSCBADDR { address 0x0ED } register CCSCBRAM { address 0xEC } /* * SCB bank address (7895/7896/97 only) */ register SCBBADDR { address 0x0F0 access_mode RW } register CCSCBPTR { address 0x0F1 } register HNSCB_QOFF { address 0x0F4 } register SNSCB_QOFF { address 0x0F6 } register SDSCB_QOFF { address 0x0F8 } register QOFF_CTLSTA { address 0x0FA field SCB_AVAIL 0x40 field SNSCB_ROLLOVER 0x20 field SDSCB_ROLLOVER 0x10 mask SCB_QSIZE 0x07 mask SCB_QSIZE_256 0x06 } register DFF_THRSH { address 0x0FB mask WR_DFTHRSH 0x70 mask RD_DFTHRSH 0x07 mask RD_DFTHRSH_MIN 0x00 mask RD_DFTHRSH_25 0x01 mask RD_DFTHRSH_50 0x02 mask RD_DFTHRSH_63 0x03 mask RD_DFTHRSH_75 0x04 mask RD_DFTHRSH_85 0x05 mask RD_DFTHRSH_90 0x06 mask RD_DFTHRSH_MAX 0x07 mask WR_DFTHRSH_MIN 0x00 mask WR_DFTHRSH_25 0x10 mask WR_DFTHRSH_50 0x20 mask WR_DFTHRSH_63 0x30 mask WR_DFTHRSH_75 0x40 mask WR_DFTHRSH_85 0x50 mask WR_DFTHRSH_90 0x60 mask WR_DFTHRSH_MAX 0x70 } register SG_CACHE_PRE { access_mode WO address 0x0fc mask SG_ADDR_MASK 0xf8 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } register SG_CACHE_SHADOW { access_mode RO address 0x0fc mask SG_ADDR_MASK 0xf8 field LAST_SEG 0x02 field LAST_SEG_DONE 0x01 } /* ---------------------- Scratch RAM Offsets ------------------------- */ /* These offsets are either to values that are initialized by the board's * BIOS or are specified by the sequencer code. * * The host adapter card (at least the BIOS) uses 20-2f for SCSI * device information, 32-33 and 5a-5f as well. As it turns out, the * BIOS trashes 20-2f, writing the synchronous negotiation results * on top of the BIOS values, so we re-use those for our per-target * scratchspace (actually a value that can be copied directly into * SCSIRATE). The kernel driver will enable synchronous negotiation * for all targets that have a value other than 0 in the lower four * bits of the target scratch space. This should work regardless of * whether the bios has been installed. */ scratch_ram { address 0x020 size 58 /* * 1 byte per target starting at this address for configuration values */ BUSY_TARGETS { alias TARG_SCSIRATE size 16 } /* * Bit vector of targets that have ULTRA enabled as set by * the BIOS. The Sequencer relies on a per-SCB field to * control whether to enable Ultra transfers or not. During * initialization, we read this field and reuse it for 2 * entries in the busy target table. */ ULTRA_ENB { alias CMDSIZE_TABLE size 2 } /* * Bit vector of targets that have disconnection disabled as set by * the BIOS. The Sequencer relies in a per-SCB field to control the * disconnect priveldge. During initialization, we read this field * and reuse it for 2 entries in the busy target table. */ DISC_DSB { size 2 } CMDSIZE_TABLE_TAIL { size 4 } /* * Partial transfer past cacheline end to be * transferred using an extra S/G. */ MWI_RESIDUAL { size 1 - alias TARG_IMMEDIATE_SCB } /* * SCBID of the next SCB to be started by the controller. */ NEXT_QUEUED_SCB { size 1 } /* * Single byte buffer used to designate the type or message * to send to a target. */ MSG_OUT { size 1 } /* Parameters for DMA Logic */ DMAPARAMS { size 1 field PRELOADEN 0x80 field WIDEODD 0x40 field SCSIEN 0x20 field SDMAEN 0x10 field SDMAENACK 0x10 field HDMAEN 0x08 field HDMAENACK 0x08 field DIRECTION 0x04 /* Set indicates PCI->SCSI */ field FIFOFLUSH 0x02 field FIFORESET 0x01 } SEQ_FLAGS { size 1 field NOT_IDENTIFIED 0x80 field NO_CDB_SENT 0x40 field TARGET_CMD_IS_TAGGED 0x40 field DPHASE 0x20 /* Target flags */ field TARG_CMD_PENDING 0x10 field CMDPHASE_PENDING 0x08 field DPHASE_PENDING 0x04 field SPHASE_PENDING 0x02 field NO_DISCONNECT 0x01 } /* * Temporary storage for the * target/channel/lun of a * reconnecting target */ SAVED_SCSIID { size 1 } SAVED_LUN { size 1 } /* * The last bus phase as seen by the sequencer. */ LASTPHASE { size 1 field CDI 0x80 field IOI 0x40 field MSGI 0x20 mask PHASE_MASK CDI|IOI|MSGI mask P_DATAOUT 0x00 mask P_DATAIN IOI mask P_COMMAND CDI mask P_MESGOUT CDI|MSGI mask P_STATUS CDI|IOI mask P_MESGIN CDI|IOI|MSGI mask P_BUSFREE 0x01 } /* * head of list of SCBs awaiting * selection */ WAITING_SCBH { size 1 } /* * head of list of SCBs that are * disconnected. Used for SCB * paging. */ DISCONNECTED_SCBH { size 1 } /* * head of list of SCBs that are * not in use. Used for SCB paging. */ FREE_SCBH { size 1 } /* * head of list of SCBs that have * completed but have not been * put into the qoutfifo. */ COMPLETE_SCBH { size 1 } /* * Address of the hardware scb array in the host. */ HSCB_ADDR { size 4 } /* * Base address of our shared data with the kernel driver in host * memory. This includes the qoutfifo and target mode * incoming command queue. */ SHARED_DATA_ADDR { size 4 } KERNEL_QINPOS { size 1 } QINPOS { size 1 } QOUTPOS { size 1 } /* * Kernel and sequencer offsets into the queue of * incoming target mode command descriptors. The * queue is full when the KERNEL_TQINPOS == TQINPOS. */ KERNEL_TQINPOS { size 1 } TQINPOS { size 1 } ARG_1 { size 1 mask SEND_MSG 0x80 mask SEND_SENSE 0x40 mask SEND_REJ 0x20 mask MSGOUT_PHASEMIS 0x10 mask EXIT_MSG_LOOP 0x08 mask CONT_MSG_LOOP 0x04 mask CONT_TARG_SESSION 0x02 alias RETURN_1 } ARG_2 { size 1 alias RETURN_2 } /* * Snapshot of MSG_OUT taken after each message is sent. */ LAST_MSG { size 1 + alias TARG_IMMEDIATE_SCB } /* * Sequences the kernel driver has okayed for us. This allows * the driver to do things like prevent initiator or target * operations. */ SCSISEQ_TEMPLATE { size 1 field ENSELO 0x40 field ENSELI 0x20 field ENRSELI 0x10 field ENAUTOATNO 0x08 field ENAUTOATNI 0x04 field ENAUTOATNP 0x02 } } scratch_ram { address 0x056 size 4 /* * These scratch ram locations are initialized by the 274X BIOS. * We reuse them after capturing the BIOS settings during * initialization. */ /* * The initiator specified tag for this target mode transaction. */ HA_274_BIOSGLOBAL { size 1 field HA_274_EXTENDED_TRANS 0x01 alias INITIATOR_TAG } SEQ_FLAGS2 { size 1 field SCB_DMA 0x01 field TARGET_MSG_PENDING 0x02 } } scratch_ram { address 0x05a size 6 /* * These are reserved registers in the card's scratch ram on the 2742. * The EISA configuraiton chip is mapped here. On Rev E. of the * aic7770, the sequencer can use this area for scratch, but the * host cannot directly access these registers. On later chips, this * area can be read and written by both the host and the sequencer. * Even on later chips, many of these locations are initialized by * the BIOS. */ SCSICONF { size 1 field TERM_ENB 0x80 field RESET_SCSI 0x40 field ENSPCHK 0x20 mask HSCSIID 0x07 /* our SCSI ID */ mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ } INTDEF { address 0x05c size 1 field EDGE_TRIG 0x80 mask VECTOR 0x0f } HOSTCONF { address 0x05d size 1 } HA_274_BIOSCTRL { address 0x05f size 1 mask BIOSMODE 0x30 mask BIOSDISABLED 0x30 field CHANNEL_B_PRIMARY 0x08 } } scratch_ram { address 0x070 size 16 /* * Per target SCSI offset values for Ultra2 controllers. */ TARG_OFFSET { size 16 } } const TID_SHIFT 4 const SCB_LIST_NULL 0xff const TARGET_CMD_CMPLT 0xfe const CCSGADDR_MAX 0x80 const CCSGRAM_MAXSEGS 16 /* WDTR Message values */ const BUS_8_BIT 0x00 const BUS_16_BIT 0x01 const BUS_32_BIT 0x02 /* Offset maximums */ const MAX_OFFSET_8BIT 0x0f const MAX_OFFSET_16BIT 0x08 const MAX_OFFSET_ULTRA2 0x7f const MAX_OFFSET 0x7f const HOST_MSG 0xff /* Target mode command processing constants */ const CMD_GROUP_CODE_SHIFT 0x05 const STATUS_BUSY 0x08 const STATUS_QUEUE_FULL 0x28 const TARGET_DATA_IN 1 /* * Downloaded (kernel inserted) constants */ /* Offsets into the SCBID array where different data is stored */ const QOUTFIFO_OFFSET download const QINFIFO_OFFSET download const CACHESIZE_MASK download const INVERTED_CACHESIZE_MASK download const SG_PREFETCH_CNT download const SG_PREFETCH_ALIGN_MASK download const SG_PREFETCH_ADDR_MASK download Index: stable/4/sys/dev/aic7xxx/aic7xxx.seq =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx.seq (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx.seq (revision 125849) @@ -1,2398 +1,2399 @@ /* * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ -VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $" +VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $" PATCH_ARG_LIST = "struct ahc_softc *ahc" PREFIX = "ahc_" #include "aic7xxx.reg" #include "scsi_message.h" /* * A few words on the waiting SCB list: * After starting the selection hardware, we check for reconnecting targets * as well as for our selection to complete just in case the reselection wins * bus arbitration. The problem with this is that we must keep track of the * SCB that we've already pulled from the QINFIFO and started the selection * on just in case the reselection wins so that we can retry the selection at * a later time. This problem cannot be resolved by holding a single entry * in scratch ram since a reconnecting target can request sense and this will * create yet another SCB waiting for selection. The solution used here is to * use byte 27 of the SCB as a psuedo-next pointer and to thread a list * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes, * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to * this list everytime a request sense occurs or after completing a non-tagged * command for which a second SCB has been queued. The sequencer will * automatically consume the entries. */ bus_free_sel: /* * Turn off the selection hardware. We need to reset the * selection request in order to perform a new selection. */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP; and SIMODE1, ~ENBUSFREE; poll_for_work: call clear_target_state; and SXFRCTL0, ~SPIOEN; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } test SCSISEQ, ENSELO jnz poll_for_selection; if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ test SCSISEQ, ENSELO jnz poll_for_selection; } cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting; poll_for_work_loop: if ((ahc->features & AHC_TWIN) != 0) { xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ } test SSTAT0, SELDO|SELDI jnz selection; test_queue: /* Has the driver posted any work for us? */ BEGIN_CRITICAL; if ((ahc->features & AHC_QUEUE_REGS) != 0) { test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; } else { mov A, QINPOS; cmp KERNEL_QINPOS, A je poll_for_work_loop; } mov ARG_1, NEXT_QUEUED_SCB; /* * We have at least one queued SCB now and we don't have any * SCBs in the list of SCBs awaiting selection. Allocate a * card SCB for the host's SCB and get to work on it. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } else { /* In the non-paging case, the SCBID == hardware SCB index */ mov SCBPTR, ARG_1; } or SEQ_FLAGS2, SCB_DMA; END_CRITICAL; dma_queued_scb: /* * DMA the SCB from host ram into the current SCB location. */ mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 call dma_scb; /* * Check one last time to see if this SCB was canceled * before we completed the DMA operation. If it was, * the QINFIFO next pointer will not match our saved * value. */ mov A, ARG_1; BEGIN_CRITICAL; cmp NEXT_QUEUED_SCB, A jne abort_qinscb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { cmp SCB_TAG, A je . + 2; mvi SCB_MISMATCH call set_seqint; } mov NEXT_QUEUED_SCB, SCB_NEXT; mov SCB_NEXT,WAITING_SCBH; mov WAITING_SCBH, SCBPTR; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov NONE, SNSCB_QOFF; } else { inc QINPOS; } and SEQ_FLAGS2, ~SCB_DMA; END_CRITICAL; start_waiting: /* * Start the first entry on the waiting SCB list. */ mov SCBPTR, WAITING_SCBH; call start_selection; poll_for_selection: /* * Twin channel devices cannot handle things like SELTO * interrupts on the "background" channel. So, while * selecting, keep polling the current channel until * either a selection or reselection occurs. */ test SSTAT0, SELDO|SELDI jz poll_for_selection; selection: /* * We aren't expecting a bus free, so interrupt * the kernel driver if it happens. */ mvi CLRSINT1,CLRBUSFREE; if ((ahc->features & AHC_DT) == 0) { or SIMODE1, ENBUSFREE; } /* * Guard against a bus free after (re)selection * but prior to enabling the busfree interrupt. SELDI * and SELDO will be cleared in that case. */ test SSTAT0, SELDI|SELDO jz bus_free_sel; test SSTAT0,SELDO jnz select_out; select_in: if ((ahc->flags & AHC_TARGETROLE) != 0) { if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT0, TARGET jz initiator_reselect; } mvi CLRSINT0, CLRSELDI; /* * We've just been selected. Assert BSY and * setup the phase for receiving messages * from the target. */ mvi SCSISIGO, P_MESGOUT|BSYO; /* * Setup the DMA for sending the identify and * command information. */ mvi SEQ_FLAGS, CMDPHASE_PENDING; mov A, TQINPOS; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi CCSCBCTL, CCSCBRESET; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_32byte_addr; mvi DFCNTRL, FIFORESET; } /* Initiator that selected us */ and SAVED_SCSIID, SELID_MASK, SELID; /* The Target ID we were selected at */ if ((ahc->features & AHC_MULTI_TID) != 0) { and A, OID, TARGIDIN; } else if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SAVED_SCSIID; } else { mov DFDAT, SAVED_SCSIID; } /* * If ATN isn't asserted, the target isn't interested * in talking to us. Go directly to bus free. * XXX SCSI-1 may require us to assume lun 0 if * ATN is false. */ test SCSISIGI, ATNI jz target_busfree; /* * Watch ATN closely now as we pull in messages from the * initiator. We follow the guidlines from section 6.5 * of the SCSI-2 spec for what messages are allowed when. */ call target_inb; /* * Our first message must be one of IDENTIFY, ABORT, or * BUS_DEVICE_RESET. */ test DINDEX, MSG_IDENTIFYFLAG jz host_target_message_loop; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } and SAVED_LUN, MSG_IDENTIFY_LUNMASK, DINDEX; /* Remember for disconnection decision */ test DINDEX, MSG_IDENTIFY_DISCFLAG jnz . + 2; /* XXX Honor per target settings too */ or SEQ_FLAGS, NO_DISCONNECT; test SCSISIGI, ATNI jz ident_messages_done; call target_inb; /* * If this is a tagged request, the tagged message must * immediately follow the identify. We test for a valid * tag message by seeing if it is >= MSG_SIMPLE_Q_TAG and * < MSG_IGN_WIDE_RESIDUE. */ add A, -MSG_SIMPLE_Q_TAG, DINDEX; jnc ident_messages_done_msg_pending; add A, -MSG_IGN_WIDE_RESIDUE, DINDEX; jc ident_messages_done_msg_pending; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } /* * If the initiator doesn't feel like providing a tag number, * we've got a failed selection and must transition to bus * free. */ test SCSISIGI, ATNI jz target_busfree; /* * Store the tag for the host. */ call target_inb; if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, DINDEX; } else { mov DFDAT, DINDEX; } mov INITIATOR_TAG, DINDEX; or SEQ_FLAGS, TARGET_CMD_IS_TAGGED; ident_messages_done: /* Terminate the ident list */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi CCSCBRAM, SCB_LIST_NULL; } else { mvi DFDAT, SCB_LIST_NULL; } or SEQ_FLAGS, TARG_CMD_PENDING; test SEQ_FLAGS2, TARGET_MSG_PENDING jnz target_mesgout_pending; test SCSISIGI, ATNI jnz target_mesgout_continue; jmp target_ITloop; ident_messages_done_msg_pending: or SEQ_FLAGS2, TARGET_MSG_PENDING; jmp ident_messages_done; /* * Pushed message loop to allow the kernel to * run it's own target mode message state engine. */ host_target_message_loop: mvi HOST_MSG_LOOP call set_seqint; cmp RETURN_1, EXIT_MSG_LOOP je target_ITloop; test SSTAT0, SPIORDY jz .; jmp host_target_message_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Reselection has been initiated by a target. Make a note that we've been * reselected, but haven't seen an IDENTIFY message from the target yet. */ initiator_reselect: /* XXX test for and handle ONE BIT condition */ or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; and SAVED_SCSIID, SELID_MASK, SELID; if ((ahc->features & AHC_ULTRA2) != 0) { and A, OID, SCSIID_ULTRA2; } else { and A, OID, SCSIID; } or SAVED_SCSIID, A; if ((ahc->features & AHC_TWIN) != 0) { test SBLKCTL, SELBUSB jz . + 2; or SAVED_SCSIID, TWIN_CHNLB; } mvi CLRSINT0, CLRSELDI; jmp ITloop; } abort_qinscb: call add_scb_to_free_list; jmp poll_for_work_loop; start_selection: /* * If bus reset interrupts have been disabled (from a previous * reset), re-enable them now. Resets are only of interest * when we have outstanding transactions, so we can safely * defer re-enabling the interrupt until, as an initiator, * we start sending out transactions again. */ test SIMODE1, ENSCSIRST jnz . + 3; mvi CLRSINT1, CLRSCSIRSTI; or SIMODE1, ENSCSIRST; if ((ahc->features & AHC_TWIN) != 0) { and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ test SCB_SCSIID, TWIN_CHNLB jz . + 2; or SINDEX, SELBUSB; mov SBLKCTL,SINDEX; /* select channel */ } initialize_scsiid: if ((ahc->features & AHC_ULTRA2) != 0) { mov SCSIID_ULTRA2, SCB_SCSIID; } else if ((ahc->features & AHC_TWIN) != 0) { and SCSIID, TWIN_TID|OID, SCB_SCSIID; } else { mov SCSIID, SCB_SCSIID; } if ((ahc->flags & AHC_TARGETROLE) != 0) { mov SINDEX, SCSISEQ_TEMPLATE; test SCB_CONTROL, TARGET_SCB jz . + 2; or SINDEX, TEMODE; mov SCSISEQ, SINDEX ret; } else { mov SCSISEQ, SCSISEQ_TEMPLATE ret; } /* * Initialize transfer settings with SCB provided settings. */ set_transfer_settings: if ((ahc->features & AHC_ULTRA) != 0) { test SCB_CONTROL, ULTRAENB jz . + 2; or SXFRCTL0, FAST20; } /* * Initialize SCSIRATE with the appropriate value for this target. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, SCB_SCSIRATE, 2 ret; } else { mov SCSIRATE, SCB_SCSIRATE ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * We carefully toggle SPIOEN to allow us to return the * message byte we receive so it can be checked prior to * driving REQ on the bus for the next byte. */ target_inb: /* * Drive REQ on the bus by enabling SCSI PIO. */ or SXFRCTL0, SPIOEN; /* Wait for the byte */ test SSTAT0, SPIORDY jz .; /* Prevent our read from triggering another REQ */ and SXFRCTL0, ~SPIOEN; /* Save latched contents */ mov DINDEX, SCSIDATL ret; } /* * After the selection, remove this SCB from the "waiting SCB" * list. This is achieved by simply moving our "next" pointer into * WAITING_SCBH. Our next pointer will be set to null the next time this * SCB is used, so don't bother with it now. */ select_out: /* Turn off the selection hardware */ and SCSISEQ, TEMODE|ENSELI|ENRSELI|ENAUTOATNP, SCSISEQ; mov SCBPTR, WAITING_SCBH; mov WAITING_SCBH,SCB_NEXT; mov SAVED_SCSIID, SCB_SCSIID; and SAVED_LUN, LID, SCB_LUN; call set_transfer_settings; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz initiator_select; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * Put tag in connonical location since not * all connections have an SCB. */ mov INITIATOR_TAG, SCB_TARGET_ITAG; /* * We've just re-selected an initiator. * Assert BSY and setup the phase for * sending our identify messages. */ mvi P_MESGIN|BSYO call change_phase; mvi CLRSINT0, CLRSELDO; /* * Start out with a simple identify message. */ or SAVED_LUN, MSG_IDENTIFYFLAG call target_outb; /* * If we are the result of a tagged command, send * a simple Q tag and the tag id. */ test SCB_CONTROL, TAG_ENB jz . + 3; mvi MSG_SIMPLE_Q_TAG call target_outb; mov SCB_TARGET_ITAG call target_outb; target_synccmd: /* * Now determine what phases the host wants us * to go through. */ mov SEQ_FLAGS, SCB_TARGET_PHASES; test SCB_CONTROL, MK_MESSAGE jz target_ITloop; mvi P_MESGIN|BSYO call change_phase; jmp host_target_message_loop; target_ITloop: /* * Start honoring ATN signals now that * we properly identified ourselves. */ test SCSISIGI, ATNI jnz target_mesgout; test SEQ_FLAGS, CMDPHASE_PENDING jnz target_cmdphase; test SEQ_FLAGS, DPHASE_PENDING jnz target_dphase; test SEQ_FLAGS, SPHASE_PENDING jnz target_sphase; /* * No more work to do. Either disconnect or not depending * on the state of NO_DISCONNECT. */ test SEQ_FLAGS, NO_DISCONNECT jz target_disconnect; mvi TARG_IMMEDIATE_SCB, SCB_LIST_NULL; call complete_target_cmd; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ALLZEROS call get_free_or_disc_scb; } cmp TARG_IMMEDIATE_SCB, SCB_LIST_NULL je .; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov TARG_IMMEDIATE_SCB call dma_scb; call set_transfer_settings; or SXFRCTL0, CLRSTCNT|CLRCHN; jmp target_synccmd; target_mesgout: mvi SCSISIGO, P_MESGOUT|BSYO; target_mesgout_continue: call target_inb; target_mesgout_pending: and SEQ_FLAGS2, ~TARGET_MSG_PENDING; /* Local Processing goes here... */ jmp host_target_message_loop; target_disconnect: mvi P_MESGIN|BSYO call change_phase; test SEQ_FLAGS, DPHASE jz . + 2; mvi MSG_SAVEDATAPOINTER call target_outb; mvi MSG_DISCONNECT call target_outb; target_busfree_wait: /* Wait for preceding I/O session to complete. */ test SCSISIGI, ACKI jnz .; target_busfree: and SIMODE1, ~ENBUSFREE; if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; } clr SCSISIGO; mvi LASTPHASE, P_BUSFREE; call complete_target_cmd; jmp poll_for_work; target_cmdphase: /* * The target has dropped ATN (doesn't want to abort or BDR) * and we believe this selection to be valid. If the ring * buffer for new commands is full, return busy or queue full. */ if ((ahc->features & AHC_HS_MAILBOX) != 0) { and A, HOST_TQINPOS, HS_MAILBOX; } else { mov A, KERNEL_TQINPOS; } cmp TQINPOS, A jne tqinfifo_has_space; mvi P_STATUS|BSYO call change_phase; test SEQ_FLAGS, TARGET_CMD_IS_TAGGED jz . + 3; mvi STATUS_QUEUE_FULL call target_outb; jmp target_busfree_wait; mvi STATUS_BUSY call target_outb; jmp target_busfree_wait; tqinfifo_has_space: mvi P_COMMAND|BSYO call change_phase; call target_inb; mov A, DINDEX; /* Store for host */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, A; } else { mov DFDAT, A; } /* * Determine the number of bytes to read * based on the command group code via table lookup. * We reuse the first 8 bytes of the TARG_SCSIRATE * BIOS array for this table. Count is one less than * the total for the command since we've already fetched * the first byte. */ shr A, CMD_GROUP_CODE_SHIFT; add SINDEX, CMDSIZE_TABLE, A; mov A, SINDIR; test A, 0xFF jz command_phase_done; or SXFRCTL0, SPIOEN; command_loop: test SSTAT0, SPIORDY jz .; cmp A, 1 jne . + 2; and SXFRCTL0, ~SPIOEN; /* Last Byte */ if ((ahc->features & AHC_CMD_CHAN) != 0) { mov CCSCBRAM, SCSIDATL; } else { mov DFDAT, SCSIDATL; } dec A; test A, 0xFF jnz command_loop; command_phase_done: and SEQ_FLAGS, ~CMDPHASE_PENDING; jmp target_ITloop; target_dphase: /* * Data phases on the bus are from the * perspective of the initiator. The dma * code looks at LASTPHASE to determine the * data direction of the DMA. Toggle it for * target transfers. */ xor LASTPHASE, IOI, SCB_TARGET_DATA_DIR; or SCB_TARGET_DATA_DIR, BSYO call change_phase; jmp p_data; target_sphase: mvi P_STATUS|BSYO call change_phase; mvi LASTPHASE, P_STATUS; mov SCB_SCSI_STATUS call target_outb; /* XXX Watch for ATN or parity errors??? */ mvi SCSISIGO, P_MESGIN|BSYO; /* MSG_CMDCMPLT is 0, but we can't do an immediate of 0 */ mov ALLZEROS call target_outb; jmp target_busfree_wait; complete_target_cmd: test SEQ_FLAGS, TARG_CMD_PENDING jnz . + 2; mov SCB_TAG jmp complete_post; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Set the valid byte */ mvi CCSCBADDR, 24; mov CCSCBRAM, ALLONES; mvi CCHCNT, 28; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL; } else { /* Set the valid byte */ or DFCNTRL, FIFORESET; mvi DFWADDR, 3; /* Third 64bit word or byte 24 */ mov DFDAT, ALLONES; mvi 28 call set_hcnt; or DFCNTRL, HDMAEN|FIFOFLUSH; call dma_finish; } inc TQINPOS; mvi INTSTAT,CMDCMPLT ret; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { initiator_select: or SXFRCTL0, SPIOEN|CLRSTCNT|CLRCHN; /* * As soon as we get a successful selection, the target * should go into the message out phase since we have ATN * asserted. */ mvi MSG_OUT, MSG_IDENTIFYFLAG; mvi SEQ_FLAGS, NO_CDB_SENT; mvi CLRSINT0, CLRSELDO; /* * Main loop for information transfer phases. Wait for the * target to assert REQ before checking MSG, C/D and I/O for * the bus phase. */ mesgin_phasemis: ITloop: call phase_lock; mov A, LASTPHASE; test A, ~P_DATAIN jz p_data; cmp A,P_COMMAND je p_command; cmp A,P_MESGOUT je p_mesgout; cmp A,P_STATUS je p_status; cmp A,P_MESGIN je p_mesgin; mvi BAD_PHASE call set_seqint; jmp ITloop; /* Try reading the bus again. */ await_busfree: and SIMODE1, ~ENBUSFREE; mov NONE, SCSIDATL; /* Ack the last byte */ if ((ahc->features & AHC_ULTRA2) != 0) { clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ } and SXFRCTL0, ~SPIOEN; + mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT; test SSTAT1,REQINIT|BUSFREE jz .; test SSTAT1, BUSFREE jnz poll_for_work; mvi MISSED_BUSFREE call set_seqint; } clear_target_state: /* * We assume that the kernel driver may reset us * at any time, even in the middle of a DMA, so * clear DFCNTRL too. */ clr DFCNTRL; or SXFRCTL0, CLRSTCNT|CLRCHN; /* * We don't know the target we will connect to, * so default to narrow transfers to avoid * parity problems. */ if ((ahc->features & AHC_ULTRA2) != 0) { bmov SCSIRATE, ALLZEROS, 2; } else { clr SCSIRATE; if ((ahc->features & AHC_ULTRA) != 0) { and SXFRCTL0, ~(FAST20); } } mvi LASTPHASE, P_BUSFREE; /* clear target specific flags */ mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT ret; sg_advance: clr A; /* add sizeof(struct scatter) */ add SCB_RESIDUAL_SGPTR[0],SG_SIZEOF; adc SCB_RESIDUAL_SGPTR[1],A; adc SCB_RESIDUAL_SGPTR[2],A; adc SCB_RESIDUAL_SGPTR[3],A ret; if ((ahc->features & AHC_CMD_CHAN) != 0) { disable_ccsgen: test CCSGCTL, CCSGEN jz return; test CCSGCTL, CCSGDONE jz .; disable_ccsgen_fetch_done: clr CCSGCTL; test CCSGCTL, CCSGEN jnz .; ret; idle_loop: /* * Do we need any more segments for this transfer? */ test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jnz return; /* Did we just finish fetching segs? */ cmp CCSGCTL, CCSGEN|CCSGDONE je idle_sgfetch_complete; /* Are we actively fetching segments? */ test CCSGCTL, CCSGEN jnz return; /* * Do we have any prefetch left??? */ cmp CCSGADDR, SG_PREFETCH_CNT jne idle_sg_avail; /* * Need to fetch segments, but we can only do that * if the command channel is completely idle. Make * sure we don't have an SCB prefetch going on. */ test CCSCBCTL, CCSCBEN jnz return; /* * We fetch a "cacheline aligned" and sized amount of data * so we don't end up referencing a non-existant page. * Cacheline aligned is in quotes because the kernel will * set the prefetch amount to a reasonable level if the * cacheline size is unknown. */ mvi CCHCNT, SG_PREFETCH_CNT; and CCHADDR[0], SG_PREFETCH_ALIGN_MASK, SCB_RESIDUAL_SGPTR; bmov CCHADDR[1], SCB_RESIDUAL_SGPTR[1], 3; mvi CCSGCTL, CCSGEN|CCSGRESET ret; idle_sgfetch_complete: call disable_ccsgen_fetch_done; and CCSGADDR, SG_PREFETCH_ADDR_MASK, SCB_RESIDUAL_SGPTR; idle_sg_avail: if ((ahc->features & AHC_ULTRA2) != 0) { /* Does the hardware have space for another SG entry? */ test DFSTATUS, PRELOAD_AVAIL jz return; bmov HADDR, CCSGRAM, 7; bmov SCB_RESIDUAL_DATACNT[3], CCSGRAM, 1; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; } call sg_advance; mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; /* Load the segment */ or DFCNTRL, PRELOADEN; } ret; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { /* * Calculate the trailing portion of this S/G segment that cannot * be transferred using memory write and invalidate PCI transactions. * XXX Can we optimize this for PCI writes only??? */ calc_mwi_residual: /* * If the ending address is on a cacheline boundary, * there is no need for an extra segment. */ mov A, HCNT[0]; add A, A, HADDR[0]; and A, CACHESIZE_MASK; test A, 0xFF jz return; /* * If the transfer is less than a cachline, * there is no need for an extra segment. */ test HCNT[1], 0xFF jnz calc_mwi_residual_final; test HCNT[2], 0xFF jnz calc_mwi_residual_final; add NONE, INVERTED_CACHESIZE_MASK, HCNT[0]; jnc return; calc_mwi_residual_final: mov MWI_RESIDUAL, A; not A; inc A; add HCNT[0], A; adc HCNT[1], -1; adc HCNT[2], -1 ret; } p_data: test SEQ_FLAGS,NOT_IDENTIFIED|NO_CDB_SENT jz p_data_allowed; mvi PROTO_VIOLATION call set_seqint; p_data_allowed: if ((ahc->features & AHC_ULTRA2) != 0) { mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; } else { mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; } test LASTPHASE, IOI jnz . + 2; or DMAPARAMS, DIRECTION; if ((ahc->features & AHC_CMD_CHAN) != 0) { /* We don't have any valid S/G elements */ mvi CCSGADDR, SG_PREFETCH_CNT; } test SEQ_FLAGS, DPHASE jz data_phase_initialize; /* * If we re-enter the data phase after going through another * phase, our transfer location has almost certainly been * corrupted by the interveining, non-data, transfers. Ask * the host driver to fix us up based on the transfer residual. */ mvi PDATA_REINIT call set_seqint; jmp data_phase_loop; data_phase_initialize: /* We have seen a data phase for the first time */ or SEQ_FLAGS, DPHASE; /* * Initialize the DMA address and counter from the SCB. * Also set SCB_RESIDUAL_SGPTR, including the LAST_SEG * flag in the highest byte of the data count. We cannot * modify the saved values in the SCB until we see a save * data pointers message. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* The lowest address byte must be loaded last. */ mov SCB_DATACNT[3] call set_hhaddr; } if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SCB_DATAPTR, 7; bmov SCB_RESIDUAL_DATACNT[3], SCB_DATACNT[3], 5; } else { mvi DINDEX, HADDR; mvi SCB_DATAPTR call bcopy_7; mvi DINDEX, SCB_RESIDUAL_DATACNT + 3; mvi SCB_DATACNT + 3 call bcopy_5; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } and SCB_RESIDUAL_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) == 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } } data_phase_loop: /* Guard against overruns */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz data_phase_inbounds; /* * Turn on `Bit Bucket' mode, wait until the target takes * us to another phase, and then notify the host. */ and DMAPARAMS, DIRECTION; mov DFCNTRL, DMAPARAMS; or SXFRCTL1,BITBUCKET; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz .; } else { test SCSIPHASE, DATA_PHASE_MASK jnz .; } and SXFRCTL1, ~BITBUCKET; mvi DATA_OVERRUN call set_seqint; jmp ITloop; data_phase_inbounds: if ((ahc->features & AHC_ULTRA2) != 0) { mov SINDEX, SCB_RESIDUAL_SGPTR[0]; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz . + 2; or SINDEX, LAST_SEG; mov SG_CACHE_PRE, SINDEX; mov DFCNTRL, DMAPARAMS; ultra2_dma_loop: call idle_loop; /* * The transfer is complete if either the last segment * completes or the target changes phase. */ test SG_CACHE_SHADOW, LAST_SEG_DONE jnz ultra2_dmafinish; if ((ahc->features & AHC_DT) == 0) { if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * As a target, we control the phases, * so ignore PHASEMIS. */ test SSTAT0, TARGET jnz ultra2_dma_loop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1,PHASEMIS jz ultra2_dma_loop; } } else { test DFCNTRL, SCSIEN jnz ultra2_dma_loop; } ultra2_dmafinish: /* * The transfer has terminated either due to a phase * change, and/or the completion of the last segment. * We have two goals here. Do as much other work * as possible while the data fifo drains on a read * and respond as quickly as possible to the standard * messages (save data pointers/disconnect and command * complete) that usually follow a data phase. */ if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On chips with broken auto-flush, start * the flushing process now. We'll poke * the chip from time to time to keep the * flush process going as we complete the * data phase. */ or DFCNTRL, FIFOFLUSH; } /* * We assume that, even though data may still be * transferring to the host, that the SCSI side of * the DMA engine is now in a static state. This * allows us to update our notion of where we are * in this transfer. * * If, by chance, we stopped before being able * to fetch additional segments for this transfer, * yet the last S/G was completely exhausted, * call our idle loop until it is able to load * another segment. This will allow us to immediately * pickup on the next segment on the next data phase. * * If we happened to stop on the last segment, then * our residual information is still correct from * the idle loop and there is no need to perform * any fixups. */ ultra2_ensure_sg: test SG_CACHE_SHADOW, LAST_SEG jz ultra2_shvalid; /* Record if we've consumed all S/G entries */ test SSTAT2, SHVALID jnz residuals_correct; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp residuals_correct; ultra2_shvalid: test SSTAT2, SHVALID jnz sgptr_fixup; call idle_loop; jmp ultra2_ensure_sg; sgptr_fixup: /* * Fixup the residual next S/G pointer. The S/G preload * feature of the chip allows us to load two elements * in addition to the currently active element. We * store the bottom byte of the next S/G pointer in * the SG_CACEPTR register so we can restore the * correct value when the DMA completes. If the next * sg ptr value has advanced to the point where higher * bytes in the address have been affected, fix them * too. */ test SG_CACHE_SHADOW, 0x80 jz sgptr_fixup_done; test SCB_RESIDUAL_SGPTR[0], 0x80 jnz sgptr_fixup_done; add SCB_RESIDUAL_SGPTR[1], -1; adc SCB_RESIDUAL_SGPTR[2], -1; adc SCB_RESIDUAL_SGPTR[3], -1; sgptr_fixup_done: and SCB_RESIDUAL_SGPTR[0], SG_ADDR_MASK, SG_CACHE_SHADOW; /* We are not the last seg */ and SCB_RESIDUAL_DATACNT[3], ~SG_LAST_SEG; residuals_correct: /* * Go ahead and shut down the DMA engine now. * In the future, we'll want to handle end of * transfer messages prior to doing this, but this * requires similar restructuring for pre-ULTRA2 * controllers. */ test DMAPARAMS, DIRECTION jnz ultra2_fifoempty; ultra2_fifoflush: if ((ahc->features & AHC_DT) == 0) { if ((ahc->bugs & AHC_AUTOFLUSH_BUG) != 0) { /* * On Rev A of the aic7890, the autoflush * feature doesn't function correctly. * Perform an explicit manual flush. During * a manual flush, the FIFOEMP bit becomes * true every time the PCI FIFO empties * regardless of the state of the SCSI FIFO. * It can take up to 4 clock cycles for the * SCSI FIFO to get data into the PCI FIFO * and for FIFOEMP to de-assert. Here we * guard against this condition by making * sure the FIFOEMP bit stays on for 5 full * clock cycles. */ or DFCNTRL, FIFOFLUSH; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } test DFSTATUS, FIFOEMP jz ultra2_fifoflush; } else { /* * We enable the auto-ack feature on DT capable * controllers. This means that the controller may * have already transferred some overrun bytes into * the data FIFO and acked them on the bus. The only * way to detect this situation is to wait for * LAST_SEG_DONE to come true on a completed transfer * and then test to see if the data FIFO is non-empty. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz ultra2_wait_fifoemp; test SG_CACHE_SHADOW, LAST_SEG_DONE jz .; /* * FIFOEMP can lag LAST_SEG_DONE. Wait a few * clocks before calling this an overrun. */ test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; test DFSTATUS, FIFOEMP jnz ultra2_fifoempty; /* Overrun */ jmp data_phase_loop; ultra2_wait_fifoemp: test DFSTATUS, FIFOEMP jz .; } ultra2_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz ultra2_fifoempty; ultra2_dmahalt: and DFCNTRL, ~(SCSIEN|HDMAEN); test DFCNTRL, SCSIEN|HDMAEN jnz .; if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { /* * Keep HHADDR cleared for future, 32bit addressed * only, DMA operations. * * Due to bayonette style S/G handling, our residual * data must be "fixed up" once the transfer is halted. * Here we fixup the HSHADDR stored in the high byte * of the residual data cnt. By postponing the fixup, * we can batch the clearing of HADDR with the fixup. * If we halted on the last segment, the residual is * already correct. If we are not on the last * segment, copy the high address directly from HSHADDR. * We don't need to worry about maintaining the * SG_LAST_SEG flag as it will always be false in the * case where an update is required. */ or DSCOMMAND1, HADDLDSEL0; test SG_CACHE_SHADOW, LAST_SEG jnz . + 2; mov SCB_RESIDUAL_DATACNT[3], SHADDR; clr HADDR; and DSCOMMAND1, ~HADDLDSEL0; } } else { /* If we are the last SG block, tell the hardware. */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jnz dma_mid_sg; } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jz dma_last_sg; - if ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0) { + if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) { test DMAPARAMS, DIRECTION jz dma_mid_sg; } } dma_last_sg: and DMAPARAMS, ~WIDEODD; dma_mid_sg: /* Start DMA data transfer. */ mov DFCNTRL, DMAPARAMS; dma_loop: if ((ahc->features & AHC_CMD_CHAN) != 0) { call idle_loop; } test SSTAT0,DMADONE jnz dma_dmadone; test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */ dma_phasemis: /* * We will be "done" DMAing when the transfer count goes to * zero, or the target changes the phase (in light of this, * it makes sense that the DMA circuitry doesn't ACK when * PHASEMIS is active). If we are doing a SCSI->Host transfer, * the data FIFO should be flushed auto-magically on STCNT=0 * or a phase change, so just wait for FIFO empty status. */ dma_checkfifo: test DFCNTRL,DIRECTION jnz dma_fifoempty; dma_fifoflush: test DFSTATUS,FIFOEMP jz dma_fifoflush; dma_fifoempty: /* Don't clobber an inprogress host data transfer */ test DFSTATUS, MREQPEND jnz dma_fifoempty; /* * Now shut off the DMA and make sure that the DMA * hardware has actually stopped. Touching the DMA * counters, etc. while a DMA is active will result * in an ILLSADDR exception. */ dma_dmadone: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); dma_halt: /* * Some revisions of the aic78XX have a problem where, if the * data fifo is full, but the PCI input latch is not empty, * HDMAEN cannot be cleared. The fix used here is to drain * the prefetched but unused data from the data fifo until * there is space for the input latch to drain. */ if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { mov NONE, DFDAT; } test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; /* See if we have completed this last segment */ test STCNT[0], 0xff jnz data_phase_finish; test STCNT[1], 0xff jnz data_phase_finish; test STCNT[2], 0xff jnz data_phase_finish; /* * Advance the scatter-gather pointers if needed */ if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { test MWI_RESIDUAL, 0xFF jz no_mwi_resid; /* * Reload HADDR from SHADDR and setup the * count to be the size of our residual. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR, SHADDR, 4; mov HCNT, MWI_RESIDUAL; bmov HCNT[1], ALLZEROS, 2; } else { mvi DINDEX, HADDR; mvi SHADDR call bcopy_4; mov MWI_RESIDUAL call set_hcnt; } clr MWI_RESIDUAL; jmp sg_load_done; no_mwi_resid: } test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz sg_load; or SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL; jmp data_phase_finish; sg_load: /* * Load the next SG element's data address and length * into the DMA engine. If we don't have hardware * to perform a prefetch, we'll have to fetch the * segment from host memory first. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Wait for the idle loop to complete */ test CCSGCTL, CCSGEN jz . + 3; call idle_loop; test CCSGCTL, CCSGEN jnz . - 1; bmov HADDR, CCSGRAM, 7; /* * Workaround for flaky external SCB RAM * on certain aic7895 setups. It seems * unable to handle direct transfers from * S/G ram to certain SCB locations. */ mov SINDEX, CCSGRAM; mov SCB_RESIDUAL_DATACNT[3], SINDEX; } else { if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } mvi DINDEX, HADDR; mvi SCB_RESIDUAL_SGPTR call bcopy_4; mvi SG_SIZEOF call set_hcnt; or DFCNTRL, HDMAEN|DIRECTION|FIFORESET; call dma_finish; mvi DINDEX, HADDR; call dfdat_in_7; mov SCB_RESIDUAL_DATACNT[3], DFDAT; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov SCB_RESIDUAL_DATACNT[3] call set_hhaddr; /* * The lowest address byte must be loaded * last as it triggers the computation of * some items in the PCI block. The ULTRA2 * chips do this on PRELOAD. */ mov HADDR, HADDR; } if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { call calc_mwi_residual; } /* Point to the new next sg in memory */ call sg_advance; sg_load_done: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT, HCNT, 3; } else { call set_stcnt_from_hcnt; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_loop; } } data_phase_finish: /* * If the target has left us in data phase, loop through * the dma code again. In the case of ULTRA2 adapters, * we should only loop if there is a data overrun. For * all other adapters, we'll loop after each S/G element * is loaded as well as if there is an overrun. */ if ((ahc->flags & AHC_TARGETROLE) != 0) { test SSTAT0, TARGET jnz data_phase_done; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { test SSTAT1, REQINIT jz .; if ((ahc->features & AHC_DT) == 0) { test SSTAT1,PHASEMIS jz data_phase_loop; } else { test SCSIPHASE, DATA_PHASE_MASK jnz data_phase_loop; } } data_phase_done: /* * After a DMA finishes, save the SG and STCNT residuals back into * the SCB. We use STCNT instead of HCNT, since it's a reflection * of how many bytes were transferred on the SCSI (as opposed to the * host) bus. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { /* Kill off any pending prefetch */ call disable_ccsgen; } if ((ahc->features & AHC_ULTRA2) == 0) { /* * Clear the high address byte so that all other DMA * operations, which use 32bit addressing, can assume * HHADDR is 0. */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { mov ALLZEROS call set_hhaddr; } } /* * Update our residual information before the information is * lost by some other type of SCSI I/O (e.g. PIO). If we have * transferred all data, no update is needed. * */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jnz residual_update_done; if ((ahc->bugs & AHC_PCI_MWI_BUG) != 0 && ahc->pci_cachesize != 0) { if ((ahc->features & AHC_CMD_CHAN) != 0) { test MWI_RESIDUAL, 0xFF jz bmov_resid; } mov A, MWI_RESIDUAL; add SCB_RESIDUAL_DATACNT[0], A, STCNT[0]; clr A; adc SCB_RESIDUAL_DATACNT[1], A, STCNT[1]; adc SCB_RESIDUAL_DATACNT[2], A, STCNT[2]; clr MWI_RESIDUAL; if ((ahc->features & AHC_CMD_CHAN) != 0) { jmp . + 2; bmov_resid: bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_RESIDUAL_DATACNT, STCNT, 3; } else { mov SCB_RESIDUAL_DATACNT[0], STCNT[0]; mov SCB_RESIDUAL_DATACNT[1], STCNT[1]; mov SCB_RESIDUAL_DATACNT[2], STCNT[2]; } residual_update_done: /* * Since we've been through a data phase, the SCB_RESID* fields * are now initialized. Clear the full residual flag. */ and SCB_SGPTR[0], ~SG_FULL_RESID; if ((ahc->features & AHC_ULTRA2) != 0) { /* Clear the channel in case we return to data phase later */ or SXFRCTL0, CLRSTCNT|CLRCHN; or SXFRCTL0, CLRSTCNT|CLRCHN; } if ((ahc->flags & AHC_TARGETROLE) != 0) { test SEQ_FLAGS, DPHASE_PENDING jz ITloop; and SEQ_FLAGS, ~DPHASE_PENDING; /* * For data-in phases, wait for any pending acks from the * initiator before changing phase. We only need to * send Ignore Wide Residue messages for data-in phases. */ test DFCNTRL, DIRECTION jz target_ITloop; test SSTAT1, REQINIT jnz .; test SCB_LUN, SCB_XFERLEN_ODD jz target_ITloop; test SCSIRATE, WIDEXFER jz target_ITloop; /* * Issue an Ignore Wide Residue Message. */ mvi P_MESGIN|BSYO call change_phase; mvi MSG_IGN_WIDE_RESIDUE call target_outb; mvi 1 call target_outb; jmp target_ITloop; } else { jmp ITloop; } if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Command phase. Set up the DMA registers and let 'er rip. */ p_command: test SEQ_FLAGS, NOT_IDENTIFIED jz p_command_okay; mvi PROTO_VIOLATION call set_seqint; p_command_okay: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HCNT[0], SCB_CDB_LEN, 1; bmov HCNT[1], ALLZEROS, 2; mvi SG_CACHE_PRE, LAST_SEG; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov STCNT[0], SCB_CDB_LEN, 1; bmov STCNT[1], ALLZEROS, 2; } else { mov STCNT[0], SCB_CDB_LEN; clr STCNT[1]; clr STCNT[2]; } add NONE, -13, SCB_CDB_LEN; mvi SCB_CDB_STORE jnc p_command_embedded; p_command_from_host: if ((ahc->features & AHC_ULTRA2) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); } else { if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov HADDR[0], SCB_CDB_PTR, 4; bmov HCNT, STCNT, 3; } else { mvi DINDEX, HADDR; mvi SCB_CDB_PTR call bcopy_4; mov SCB_CDB_LEN call set_hcnt; } mvi DFCNTRL, (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET); } jmp p_command_xfer; p_command_embedded: /* * The data fifo seems to require 4 byte aligned * transfers from the sequencer. Force this to * be the case by clearing HADDR[0] even though * we aren't going to touch host memory. */ clr HADDR[0]; if ((ahc->features & AHC_ULTRA2) != 0) { mvi DFCNTRL, (PRELOADEN|SCSIEN|DIRECTION); bmov DFDAT, SCB_CDB_STORE, 12; } else if ((ahc->features & AHC_CMD_CHAN) != 0) { if ((ahc->flags & AHC_SCB_BTT) != 0) { /* * On the 7895 the data FIFO will * get corrupted if you try to dump * data from external SCB memory into * the FIFO while it is enabled. So, * fill the fifo and then enable SCSI * transfers. */ mvi DFCNTRL, (DIRECTION|FIFORESET); } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); } bmov DFDAT, SCB_CDB_STORE, 12; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFOFLUSH); } else { or DFCNTRL, FIFOFLUSH; } } else { mvi DFCNTRL, (SCSIEN|SDMAEN|DIRECTION|FIFORESET); call copy_to_fifo_6; call copy_to_fifo_6; or DFCNTRL, FIFOFLUSH; } p_command_xfer: and SEQ_FLAGS, ~NO_CDB_SENT; if ((ahc->features & AHC_DT) == 0) { test SSTAT0, SDONE jnz . + 2; test SSTAT1, PHASEMIS jz . - 1; /* * Wait for our ACK to go-away on it's own * instead of being killed by SCSIEN getting cleared. */ test SCSISIGI, ACKI jnz .; } else { test DFCNTRL, SCSIEN jnz .; } test SSTAT0, SDONE jnz p_command_successful; /* * Don't allow a data phase if the command * was not fully transferred. */ or SEQ_FLAGS, NO_CDB_SENT; p_command_successful: and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz .; jmp ITloop; /* * Status phase. Wait for the data byte to appear, then read it * and store it into the SCB. */ p_status: test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; p_status_okay: mov SCB_SCSI_STATUS, SCSIDATL; or SCB_CONTROL, STATUS_RCVD; jmp ITloop; /* * Message out phase. If MSG_OUT is MSG_IDENTIFYFLAG, build a full * indentify message sequence and send it to the target. The host may * override this behavior by setting the MK_MESSAGE bit in the SCB * control byte. This will cause us to interrupt the host and allow * it to handle the message phase completely on its own. If the bit * associated with this target is set, we will also interrupt the host, * thereby allowing it to send a message on the next selection regardless * of the transaction being sent. * * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. * This is done to allow the host to send messages outside of an identify * sequence while protecting the seqencer from testing the MK_MESSAGE bit * on an SCB that might not be for the current nexus. (For example, a * BDR message in responce to a bad reselection would leave us pointed to * an SCB that doesn't have anything to do with the current target). * * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, * bus device reset). * * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, * in case the target decides to put us in this phase for some strange * reason. */ p_mesgout_retry: /* Turn on ATN for the retry */ if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } p_mesgout: mov SINDEX, MSG_OUT; cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; test SCB_CONTROL,MK_MESSAGE jnz host_message_loop; p_mesgout_identify: or SINDEX, MSG_IDENTIFYFLAG|DISCENB, SAVED_LUN; test SCB_CONTROL, DISCENB jnz . + 2; and SINDEX, ~DISCENB; /* * Send a tag message if TAG_ENB is set in the SCB control block. * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. */ p_mesgout_tag: test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; mov SCSIDATL, SINDEX; /* Send the identify message */ call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; call phase_lock; cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; mov SCB_TAG jmp p_mesgout_onebyte; /* * Interrupt the driver, and allow it to handle this message * phase and any required retries. */ p_mesgout_from_host: cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; jmp host_message_loop; p_mesgout_onebyte: mvi CLRSINT1, CLRATNO; mov SCSIDATL, SINDEX; /* * If the next bus phase after ATN drops is message out, it means * that the target is requesting that the last message(s) be resent. */ call phase_lock; cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; p_mesgout_done: mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ mov LAST_MSG, MSG_OUT; mvi MSG_OUT, MSG_NOOP; /* No message left */ jmp ITloop; /* * Message in phase. Bytes are read using Automatic PIO mode. */ p_mesgin: mvi ACCUM call inb_first; /* read the 1st message byte */ test A,MSG_IDENTIFYFLAG jnz mesgin_identify; cmp A,MSG_DISCONNECT je mesgin_disconnect; cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; cmp ALLZEROS,A je mesgin_complete; cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_ign_wide_residue; cmp A,MSG_NOOP je mesgin_done; /* * Pushed message loop to allow the kernel to * run it's own message state engine. To avoid an * extra nop instruction after signaling the kernel, * we perform the phase_lock before checking to see * if we should exit the loop and skip the phase_lock * in the ITloop. Performing back to back phase_locks * shouldn't hurt, but why do it twice... */ host_message_loop: mvi HOST_MSG_LOOP call set_seqint; call phase_lock; cmp RETURN_1, EXIT_MSG_LOOP je ITloop + 1; jmp host_message_loop; mesgin_ign_wide_residue: if ((ahc->features & AHC_WIDE) != 0) { test SCSIRATE, WIDEXFER jz mesgin_reject; /* Pull the residue byte */ mvi ARG_1 call inb_next; cmp ARG_1, 0x01 jne mesgin_reject; test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz . + 2; test SCB_LUN, SCB_XFERLEN_ODD jnz mesgin_done; mvi IGN_WIDE_RES call set_seqint; jmp mesgin_done; } mesgin_proto_violation: mvi PROTO_VIOLATION call set_seqint; jmp mesgin_done; mesgin_reject: mvi MSG_MESSAGE_REJECT call mk_mesg; mesgin_done: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ jmp ITloop; /* * We received a "command complete" message. Put the SCB_TAG into the QOUTFIFO, * and trigger a completion interrupt. Before doing so, check to see if there * is a residual or the status byte is something other than STATUS_GOOD (0). * In either of these conditions, we upload the SCB back to the host so it can * process this information. In the case of a non zero status byte, we * additionally interrupt the kernel driver synchronously, allowing it to * decide if sense should be retrieved. If the kernel driver wishes to request * sense, it will fill the kernel SCB with a request sense command, requeue * it to the QINFIFO and tell us not to post to the QOUTFIFO by setting * RETURN_1 to SEND_SENSE. */ mesgin_complete: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte. * Either way, the target should take us to message out phase * and then attempt to complete the command again. We should use a * critical section here to guard against a timeout triggering * for this command and setting ATN while we are still processing * the completion. test SCSISIGI, ATNI jnz mesgin_done; */ /* * If we are identified and have successfully sent the CDB, * any status will do. Optimize this fast path. */ test SCB_CONTROL, STATUS_RCVD jz mesgin_proto_violation; test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jz complete_accepted; /* * If the target never sent an identify message but instead went * to mesgin to give an invalid message, let the host abort us. */ test SEQ_FLAGS, NOT_IDENTIFIED jnz mesgin_proto_violation; /* * If we recevied good status but never successfully sent the * cdb, abort the command. */ test SCB_SCSI_STATUS,0xff jnz complete_accepted; test SEQ_FLAGS, NO_CDB_SENT jnz mesgin_proto_violation; complete_accepted: /* * See if we attempted to deliver a message but the target ingnored us. */ test SCB_CONTROL, MK_MESSAGE jz . + 2; mvi MKMSG_FAILED call set_seqint; /* * Check for residuals */ test SCB_SGPTR, SG_LIST_NULL jnz check_status;/* No xfer */ test SCB_SGPTR, SG_FULL_RESID jnz upload_scb;/* Never xfered */ test SCB_RESIDUAL_SGPTR, SG_LIST_NULL jz upload_scb; check_status: test SCB_SCSI_STATUS,0xff jz complete; /* Good Status? */ upload_scb: or SCB_SGPTR, SG_RESID_VALID; mvi DMAPARAMS, FIFORESET; mov SCB_TAG call dma_scb; test SCB_SCSI_STATUS, 0xff jz complete; /* Just a residual? */ mvi BAD_STATUS call set_seqint; /* let driver know */ cmp RETURN_1, SEND_SENSE jne complete; call add_scb_to_free_list; jmp await_busfree; complete: mov SCB_TAG call complete_post; jmp await_busfree; } complete_post: /* Post the SCBID in SINDEX and issue an interrupt */ call add_scb_to_free_list; mov ARG_1, SINDEX; if ((ahc->features & AHC_QUEUE_REGS) != 0) { mov A, SDSCB_QOFF; } else { mov A, QOUTPOS; } mvi QOUTFIFO_OFFSET call post_byte_setup; mov ARG_1 call post_byte; if ((ahc->features & AHC_QUEUE_REGS) == 0) { inc QOUTPOS; } mvi INTSTAT,CMDCMPLT ret; if ((ahc->flags & AHC_INITIATORROLE) != 0) { /* * Is it a disconnect message? Set a flag in the SCB to remind us * and await the bus going free. If this is an untagged transaction * store the SCB id for it in our untagged target table for lookup on * a reselction. */ mesgin_disconnect: /* * If ATN is raised, we still want to give the target a message. * Perhaps there was a parity error on this last message byte * or we want to abort this command. Either way, the target * should take us to message out phase and then attempt to * disconnect again. * XXX - Wait for more testing. test SCSISIGI, ATNI jnz mesgin_done; */ test SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT jnz mesgin_proto_violation; or SCB_CONTROL,DISCONNECTED; if ((ahc->flags & AHC_PAGESCBS) != 0) { call add_scb_to_disc_list; } test SCB_CONTROL, TAG_ENB jnz await_busfree; mov ARG_1, SCB_TAG; and SAVED_LUN, LID, SCB_LUN; mov SCB_SCSIID call set_busy_target; jmp await_busfree; /* * Save data pointers message: * Copying RAM values back to SCB, for Save Data Pointers message, but * only if we've actually been into a data phase to change them. This * protects against bogus data in scratch ram and the residual counts * since they are only initialized when we go into data_in or data_out. * Ack the message as soon as possible. For chips without S/G pipelining, * we can only ack the message after SHADDR has been saved. On these * chips, SHADDR increments with every bus transaction, even PIO. */ mesgin_sdptrs: if ((ahc->features & AHC_ULTRA2) != 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ test SEQ_FLAGS, DPHASE jz ITloop; } else { test SEQ_FLAGS, DPHASE jz mesgin_done; } /* * If we are asked to save our position at the end of the * transfer, just mark us at the end rather than perform a * full save. */ test SCB_RESIDUAL_SGPTR[0], SG_LIST_NULL jz mesgin_sdptrs_full; or SCB_SGPTR, SG_LIST_NULL; if ((ahc->features & AHC_ULTRA2) != 0) { jmp ITloop; } else { jmp mesgin_done; } mesgin_sdptrs_full: /* * The SCB_SGPTR becomes the next one we'll download, * and the SCB_DATAPTR becomes the current SHADDR. * Use the residual number since STCNT is corrupted by * any message transfer. */ if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov SCB_DATAPTR, SHADDR, 4; if ((ahc->features & AHC_ULTRA2) == 0) { mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ } bmov SCB_DATACNT, SCB_RESIDUAL_DATACNT, 8; } else { mvi DINDEX, SCB_DATAPTR; mvi SHADDR call bcopy_4; mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ mvi SCB_RESIDUAL_DATACNT call bcopy_8; } jmp ITloop; /* * Restore pointers message? Data pointers are recopied from the * SCB anytime we enter a data phase for the first time, so all * we need to do is clear the DPHASE flag and let the data phase * code do the rest. We also reset/reallocate the FIFO to make * sure we have a clean start for the next data or command phase. */ mesgin_rdptrs: and SEQ_FLAGS, ~DPHASE; /* * We'll reload them * the next time through * the dataphase. */ or SXFRCTL0, CLRSTCNT|CLRCHN; jmp mesgin_done; /* * Index into our Busy Target table. SINDEX and DINDEX are modified * upon return. SCBPTR may be modified by this action. */ set_busy_target: shr DINDEX, 4, SINDEX; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, SAVED_LUN; add DINDEX, SCB_64_BTT; } else { add DINDEX, BUSY_TARGETS; } mov DINDIR, ARG_1 ret; /* * Identify message? For a reconnecting target, this tells us the lun * that the reconnection is for - find the correct SCB and switch to it, * clearing the "disconnected" bit so we don't "find" it by accident later. */ mesgin_identify: /* * Determine whether a target is using tagged or non-tagged * transactions by first looking at the transaction stored in * the busy target array. If there is no untagged transaction * for this target or the transaction is for a different lun, then * this must be a tagged transaction. */ shr SINDEX, 4, SAVED_SCSIID; and SAVED_LUN, MSG_IDENTIFY_LUNMASK, A; if ((ahc->flags & AHC_SCB_BTT) != 0) { add SINDEX, SCB_64_BTT; mov SCBPTR, SAVED_LUN; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -SCB_64_BTT, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(SCB_64_BTT + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } else { add SINDEX, BUSY_TARGETS; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { add NONE, -BUSY_TARGETS, SINDEX; jc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; add NONE, -(BUSY_TARGETS + 16), SINDEX; jnc . + 2; mvi INTSTAT, OUT_OF_RANGE; nop; } } mov ARG_1, SINDIR; cmp ARG_1, SCB_LIST_NULL je snoop_tag; if ((ahc->flags & AHC_PAGESCBS) != 0) { mov ARG_1 call findSCB; } else { mov SCBPTR, ARG_1; } if ((ahc->flags & AHC_SCB_BTT) != 0) { jmp setup_SCB_id_lun_okay; } else { /* * We only allow one untagged command per-target * at a time. So, if the lun doesn't match, look * for a tag message. */ and A, LID, SCB_LUN; cmp SAVED_LUN, A je setup_SCB_id_lun_okay; if ((ahc->flags & AHC_PAGESCBS) != 0) { /* * findSCB removes the SCB from the * disconnected list, so we must replace * it there should this SCB be for another * lun. */ call cleanup_scb; } } /* * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. * If we get one, we use the tag returned to find the proper * SCB. With SCB paging, we must search for non-tagged * transactions since the SCB may exist in any slot. If we're not * using SCB paging, we can use the tag as the direct index to the * SCB. */ snoop_tag: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x80; } mov NONE,SCSIDATL; /* ACK Identify MSG */ call phase_lock; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x1; } cmp LASTPHASE, P_MESGIN jne not_found; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x2; } cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; get_tag: if ((ahc->flags & AHC_PAGESCBS) != 0) { mvi ARG_1 call inb_next; /* tag value */ mov ARG_1 call findSCB; } else { mvi ARG_1 call inb_next; /* tag value */ mov SCBPTR, ARG_1; } /* * Ensure that the SCB the tag points to is for * an SCB transaction to the reconnecting target. */ setup_SCB: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x4; } mov A, SCB_SCSIID; cmp SAVED_SCSIID, A jne not_found_cleanup_scb; if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x8; } setup_SCB_id_okay: and A, LID, SCB_LUN; cmp SAVED_LUN, A jne not_found_cleanup_scb; setup_SCB_id_lun_okay: if ((ahc->flags & AHC_SEQUENCER_DEBUG) != 0) { or SEQ_FLAGS, 0x10; } test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; and SCB_CONTROL,~DISCONNECTED; test SCB_CONTROL, TAG_ENB jnz setup_SCB_tagged; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov A, SCBPTR; } mvi ARG_1, SCB_LIST_NULL; mov SAVED_SCSIID call set_busy_target; if ((ahc->flags & AHC_SCB_BTT) != 0) { mov SCBPTR, A; } setup_SCB_tagged: clr SEQ_FLAGS; /* make note of IDENTIFY */ call set_transfer_settings; /* See if the host wants to send a message upon reconnection */ test SCB_CONTROL, MK_MESSAGE jz mesgin_done; mvi HOST_MSG call mk_mesg; jmp mesgin_done; not_found_cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { call cleanup_scb; } not_found: mvi NO_MATCH call set_seqint; jmp mesgin_done; mk_mesg: if ((ahc->features & AHC_DT) == 0) { or SCSISIGO, ATNO, LASTPHASE; } else { mvi SCSISIGO, ATNO; } mov MSG_OUT,SINDEX ret; /* * Functions to read data in Automatic PIO mode. * * According to Adaptec's documentation, an ACK is not sent on input from * the target until SCSIDATL is read from. So we wait until SCSIDATL is * latched (the usual way), then read the data byte directly off the bus * using SCSIBUSL. When we have pulled the ATN line, or we just want to * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI * spec guarantees that the target will hold the data byte on the bus until * we send our ACK. * * The assumption here is that these are called in a particular sequence, * and that REQ is already set when inb_first is called. inb_{first,next} * use the same calling convention as inb. */ inb_next_wait_perr: mvi PERR_DETECTED call set_seqint; jmp inb_next_wait; inb_next: mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ inb_next_wait: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz inb_next_wait; test SSTAT1, SCSIPERR jnz inb_next_wait_perr; inb_next_check_phase: and LASTPHASE, PHASE_MASK, SCSISIGI; cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; inb_first: mov DINDEX,SINDEX; mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/ inb_last: mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/ } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Change to a new phase. If we are changing the state of the I/O signal, * from out to in, wait an additional data release delay before continuing. */ change_phase: /* Wait for preceeding I/O session to complete. */ test SCSISIGI, ACKI jnz .; /* Change the phase */ and DINDEX, IOI, SCSISIGI; mov SCSISIGO, SINDEX; and A, IOI, SINDEX; /* * If the data direction has changed, from * out (initiator driving) to in (target driving), * we must wait at least a data release delay plus * the normal bus settle delay. [SCSI III SPI 10.11.0] */ cmp DINDEX, A je change_phase_wait; test SINDEX, IOI jz change_phase_wait; call change_phase_wait; change_phase_wait: nop; nop; nop; nop ret; /* * Send a byte to an initiator in Automatic PIO mode. */ target_outb: or SXFRCTL0, SPIOEN; test SSTAT0, SPIORDY jz .; mov SCSIDATL, SINDEX; test SSTAT0, SPIORDY jz .; and SXFRCTL0, ~SPIOEN ret; } /* * Locate a disconnected SCB by SCBID. Upon return, SCBPTR and SINDEX will * be set to the position of the SCB. If the SCB cannot be found locally, * it will be paged in from host memory. RETURN_2 stores the address of the * preceding SCB in the disconnected list which can be used to speed up * removal of the found SCB from the disconnected list. */ if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; findSCB: mov A, SINDEX; /* Tag passed in SINDEX */ cmp DISCONNECTED_SCBH, SCB_LIST_NULL je findSCB_notFound; mov SCBPTR, DISCONNECTED_SCBH; /* Initialize SCBPTR */ mvi ARG_2, SCB_LIST_NULL; /* Head of list */ jmp findSCB_loop; findSCB_next: cmp SCB_NEXT, SCB_LIST_NULL je findSCB_notFound; mov ARG_2, SCBPTR; mov SCBPTR,SCB_NEXT; findSCB_loop: cmp SCB_TAG, A jne findSCB_next; rem_scb_from_disc_list: cmp ARG_2, SCB_LIST_NULL je rHead; mov DINDEX, SCB_NEXT; mov SINDEX, SCBPTR; mov SCBPTR, ARG_2; mov SCB_NEXT, DINDEX; mov SCBPTR, SINDEX ret; rHead: mov DISCONNECTED_SCBH,SCB_NEXT ret; END_CRITICAL; findSCB_notFound: /* * We didn't find it. Page in the SCB. */ mov ARG_1, A; /* Save tag */ mov ALLZEROS call get_free_or_disc_scb; mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; mov ARG_1 jmp dma_scb; } /* * Prepare the hardware to post a byte to host memory given an * index of (A + (256 * SINDEX)) and a base address of SHARED_DATA_ADDR. */ post_byte_setup: mov ARG_2, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi CCHCNT, 1; mvi CCSCBCTL, CCSCBRESET ret; } else { mvi DINDEX, HADDR; mvi SHARED_DATA_ADDR call set_1byte_addr; mvi 1 call set_hcnt; mvi DFCNTRL, FIFORESET ret; } post_byte: if ((ahc->features & AHC_CMD_CHAN) != 0) { bmov CCSCBRAM, SINDEX, 1; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; clr CCSCBCTL ret; } else { mov DFDAT, SINDEX; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; } phase_lock_perr: mvi PERR_DETECTED call set_seqint; phase_lock: /* * If there is a parity error, wait for the kernel to * see the interrupt and prepare our message response * before continuing. */ test SSTAT1, REQINIT jz phase_lock; test SSTAT1, SCSIPERR jnz phase_lock_perr; phase_lock_latch_phase: if ((ahc->features & AHC_DT) == 0) { and SCSISIGO, PHASE_MASK, SCSISIGI; } and LASTPHASE, PHASE_MASK, SCSISIGI ret; if ((ahc->features & AHC_CMD_CHAN) == 0) { set_hcnt: mov HCNT[0], SINDEX; clear_hcnt: clr HCNT[1]; clr HCNT[2] ret; set_stcnt_from_hcnt: mov STCNT[0], HCNT[0]; mov STCNT[1], HCNT[1]; mov STCNT[2], HCNT[2] ret; bcopy_8: mov DINDIR, SINDIR; bcopy_7: mov DINDIR, SINDIR; mov DINDIR, SINDIR; bcopy_5: mov DINDIR, SINDIR; bcopy_4: mov DINDIR, SINDIR; bcopy_3: mov DINDIR, SINDIR; mov DINDIR, SINDIR; mov DINDIR, SINDIR ret; } if ((ahc->flags & AHC_TARGETROLE) != 0) { /* * Setup addr assuming that A is an index into * an array of 32byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_32byte_addr: shr ARG_2, 3, A; shl A, 5; jmp set_1byte_addr; } /* * Setup addr assuming that A is an index into * an array of 64byte objects, SINDEX contains * the base address of that array, and DINDEX * contains the base address of the location * to store the indexed address. */ set_64byte_addr: shr ARG_2, 2, A; shl A, 6; /* * Setup addr assuming that A + (ARG_2 * 256) is an * index into an array of 1byte objects, SINDEX contains * the base address of that array, and DINDEX contains * the base address of the location to store the computed * address. */ set_1byte_addr: add DINDIR, A, SINDIR; mov A, ARG_2; adc DINDIR, A, SINDIR; clr A; adc DINDIR, A, SINDIR; adc DINDIR, A, SINDIR ret; /* * Either post or fetch an SCB from host memory based on the * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. */ dma_scb: mov A, SINDEX; if ((ahc->features & AHC_CMD_CHAN) != 0) { mvi DINDEX, CCHADDR; mvi HSCB_ADDR call set_64byte_addr; mov CCSCBPTR, SCBPTR; test DMAPARAMS, DIRECTION jz dma_scb_tohost; if ((ahc->flags & AHC_SCB_BTT) != 0) { mvi CCHCNT, SCB_DOWNLOAD_SIZE_64; } else { mvi CCHCNT, SCB_DOWNLOAD_SIZE; } mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; jmp dma_scb_finish; dma_scb_tohost: mvi CCHCNT, SCB_UPLOAD_SIZE; if ((ahc->features & AHC_ULTRA2) == 0) { mvi CCSCBCTL, CCSCBRESET; bmov CCSCBRAM, SCB_BASE, SCB_UPLOAD_SIZE; or CCSCBCTL, CCSCBEN|CCSCBRESET; test CCSCBCTL, CCSCBDONE jz .; } else if ((ahc->bugs & AHC_SCBCHAN_UPLOAD_BUG) != 0) { mvi CCSCBCTL, CCARREN|CCSCBRESET; cmp CCSCBCTL, ARRDONE|CCARREN jne .; mvi CCHCNT, SCB_UPLOAD_SIZE; mvi CCSCBCTL, CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .; } else { mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; } dma_scb_finish: clr CCSCBCTL; test CCSCBCTL, CCARREN|CCSCBEN jnz .; ret; } else { mvi DINDEX, HADDR; mvi HSCB_ADDR call set_64byte_addr; mvi SCB_DOWNLOAD_SIZE call set_hcnt; mov DFCNTRL, DMAPARAMS; test DMAPARAMS, DIRECTION jnz dma_scb_fromhost; /* Fill it with the SCB data */ copy_scb_tofifo: mvi SINDEX, SCB_BASE; add A, SCB_DOWNLOAD_SIZE, SINDEX; copy_scb_tofifo_loop: call copy_to_fifo_8; cmp SINDEX, A jne copy_scb_tofifo_loop; or DFCNTRL, HDMAEN|FIFOFLUSH; jmp dma_finish; dma_scb_fromhost: mvi DINDEX, SCB_BASE; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0) { /* * The PCI module will only issue a PCI * retry if the data FIFO is empty. If the * host disconnects in the middle of a * transfer, we must empty the fifo of all * available data to force the chip to * continue the transfer. This does not * happen for SCSI transfers as the SCSI module * will drain the FIFO as data are made available. * When the hang occurs, we know that a multiple * of 8 bytes is in the FIFO because the PCI * module has an 8 byte input latch that only * dumps to the FIFO when HCNT == 0 or the * latch is full. */ clr A; /* Wait for at least 8 bytes of data to arrive. */ dma_scb_hang_fifo: test DFSTATUS, FIFOQWDEMP jnz dma_scb_hang_fifo; dma_scb_hang_wait: test DFSTATUS, MREQPEND jnz dma_scb_hang_wait; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; /* * The PCI module no longer intends to perform * a PCI transaction. Drain the fifo. */ dma_scb_hang_dma_drain_fifo: not A, HCNT; add A, SCB_DOWNLOAD_SIZE+SCB_BASE+1; and A, ~0x7; mov DINDIR,DFDAT; cmp DINDEX, A jne . - 1; cmp DINDEX, SCB_DOWNLOAD_SIZE+SCB_BASE je dma_finish_nowait; /* Restore A as the lines left to transfer. */ add A, -SCB_BASE, DINDEX; shr A, 3; jmp dma_scb_hang_fifo; dma_scb_hang_dma_done: and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; add SEQADDR0, A; } else { call dma_finish; } call dfdat_in_8; call dfdat_in_8; call dfdat_in_8; dfdat_in_8: mov DINDIR,DFDAT; dfdat_in_7: mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; mov DINDIR,DFDAT; dfdat_in_2: mov DINDIR,DFDAT; mov DINDIR,DFDAT ret; } copy_to_fifo_8: mov DFDAT,SINDIR; mov DFDAT,SINDIR; copy_to_fifo_6: mov DFDAT,SINDIR; copy_to_fifo_5: mov DFDAT,SINDIR; copy_to_fifo_4: mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR; mov DFDAT,SINDIR ret; /* * Wait for DMA from host memory to data FIFO to complete, then disable * DMA and wait for it to acknowledge that it's off. */ dma_finish: test DFSTATUS,HDONE jz dma_finish; dma_finish_nowait: /* Turn off DMA */ and DFCNTRL, ~HDMAEN; test DFCNTRL, HDMAEN jnz .; ret; /* * Restore an SCB that failed to match an incoming reselection * to the correct/safe state. If the SCB is for a disconnected * transaction, it must be returned to the disconnected list. * If it is not in the disconnected state, it must be free. */ cleanup_scb: if ((ahc->flags & AHC_PAGESCBS) != 0) { test SCB_CONTROL,DISCONNECTED jnz add_scb_to_disc_list; } add_scb_to_free_list: if ((ahc->flags & AHC_PAGESCBS) != 0) { BEGIN_CRITICAL; mov SCB_NEXT, FREE_SCBH; mvi SCB_TAG, SCB_LIST_NULL; mov FREE_SCBH, SCBPTR ret; END_CRITICAL; } else { mvi SCB_TAG, SCB_LIST_NULL ret; } if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { set_hhaddr: or DSCOMMAND1, HADDLDSEL0; and HADDR, SG_HIGH_ADDR_BITS, SINDEX; and DSCOMMAND1, ~HADDLDSEL0 ret; } if ((ahc->flags & AHC_PAGESCBS) != 0) { get_free_or_disc_scb: BEGIN_CRITICAL; cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; return_error: mvi NO_FREE_SCB call set_seqint; mvi SINDEX, SCB_LIST_NULL ret; dequeue_disc_scb: mov SCBPTR, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCB_NEXT; END_CRITICAL; mvi DMAPARAMS, FIFORESET; mov SCB_TAG jmp dma_scb; BEGIN_CRITICAL; dequeue_free_scb: mov SCBPTR, FREE_SCBH; mov FREE_SCBH, SCB_NEXT ret; END_CRITICAL; add_scb_to_disc_list: /* * Link this SCB into the DISCONNECTED list. This list holds the * candidates for paging out an SCB if one is needed for a new command. * Modifying the disconnected list is a critical(pause dissabled) section. */ BEGIN_CRITICAL; mov SCB_NEXT, DISCONNECTED_SCBH; mov DISCONNECTED_SCBH, SCBPTR ret; END_CRITICAL; } set_seqint: mov INTSTAT, SINDEX; nop; return: ret; Index: stable/4/sys/dev/aic7xxx/aic7xxx_93cx6.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx_93cx6.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx_93cx6.c (revision 125849) @@ -1,306 +1,326 @@ /* * Interface for the 93C66/56/46/26/06 serial eeprom parts. * * Copyright (c) 1995, 1996 Daniel M. Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#17 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $ */ /* * The instruction set of the 93C66/56/46/26/06 chips are as follows: * * Start OP * * Function Bit Code Address** Data Description * ------------------------------------------------------------------- * READ 1 10 A5 - A0 Reads data stored in memory, * starting at specified address * EWEN 1 00 11XXXX Write enable must precede * all programming modes * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0 * WRITE 1 01 A5 - A0 D15 - D0 Writes register * ERAL 1 00 10XXXX Erase all registers * WRAL 1 00 01XXXX D15 - D0 Writes to all registers * EWDS 1 00 00XXXX Disables all programming * instructions * *Note: A value of X for address is a don't care condition. * **Note: There are 8 address bits for the 93C56/66 chips unlike * the 93C46/26/06 chips which have 6 address bits. * * The 93C46 has a four wire interface: clock, chip select, data in, and * data out. In order to perform one of the above functions, you need * to enable the chip select for a clock period (typically a minimum of * 1 usec, with the clock high and low a minimum of 750 and 250 nsec * respectively). While the chip select remains high, you can clock in * the instructions (above) starting with the start bit, followed by the * OP code, Address, and Data (if needed). For the READ instruction, the * requested 16-bit register contents is read from the data out line but * is preceded by an initial zero (leading 0, followed by 16-bits, MSB * first). The clock cycling from low to high initiates the next data * bit to be sent from the chip. - * */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #include #endif /* * Right now, we only have to read the SEEPROM. But we make it easier to * add other 93Cx6 functions. */ -static struct seeprom_cmd { +struct seeprom_cmd { uint8_t len; - uint8_t bits[9]; -} seeprom_read = {3, {1, 1, 0}}; + uint8_t bits[11]; +}; +/* Short opcodes for the c46 */ static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; + +/* Long opcodes for the C56/C66 */ +static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; +static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; + +/* Common opcodes */ static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; +static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; /* * Wait for the SEERDY to go high; about 800 ns. */ #define CLOCK_PULSE(sd, rdy) \ while ((SEEPROM_STATUS_INB(sd) & rdy) == 0) { \ ; /* Do nothing */ \ } \ (void)SEEPROM_INB(sd); /* Clear clock */ /* * Send a START condition and the given command */ static void send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd) { uint8_t temp; int i = 0; /* Send chip select for one clock cycle. */ temp = sd->sd_MS ^ sd->sd_CS; SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); for (i = 0; i < cmd->len; i++) { if (cmd->bits[i] != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if (cmd->bits[i] != 0) temp ^= sd->sd_DO; } } /* * Clear CS put the chip in the reset state, where it can wait for new commands. */ static void reset_seeprom(struct seeprom_descriptor *sd) { uint8_t temp; temp = sd->sd_MS; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); } /* * Read the serial EEPROM and returns 1 if successful and 0 if * not successful. */ int ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, u_int start_addr, u_int count) { int i = 0; u_int k = 0; uint16_t v; uint8_t temp; /* * Read the requested registers of the seeprom. The loop * will range from 0 to count-1. */ for (k = start_addr; k < count + start_addr; k++) { /* * Now we're ready to send the read command followed by the * address of the 16-bit register we want to read. */ send_seeprom_cmd(sd, &seeprom_read); /* Send the 6 or 8 bit address (MSB first, LSB last). */ temp = sd->sd_MS ^ sd->sd_CS; for (i = (sd->sd_chip - 1); i >= 0; i--) { if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; } /* * Now read the 16 bit register. An initial 0 precedes the * register contents which begins with bit 15 (MSB) and ends * with bit 0 (LSB). The initial 0 will be shifted off the * top of our word as we let the loop run from 0 to 16. */ v = 0; for (i = 16; i >= 0; i--) { SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); v <<= 1; if (SEEPROM_DATA_INB(sd) & sd->sd_DI) v |= 1; SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); } buf[k - start_addr] = v; /* Reset the chip select for the next command cycle. */ reset_seeprom(sd); } #ifdef AHC_DUMP_EEPROM printf("\nSerial EEPROM:\n\t"); for (k = 0; k < count; k = k + 1) { if (((k % 8) == 0) && (k != 0)) { printf ("\n\t"); } printf (" 0x%x", buf[k]); } printf ("\n"); #endif return (1); } /* * Write the serial EEPROM and return 1 if successful and 0 if * not successful. */ int ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, u_int start_addr, u_int count) { + struct seeprom_cmd *ewen, *ewds; uint16_t v; uint8_t temp; int i, k; /* Place the chip into write-enable mode */ - send_seeprom_cmd(sd, &seeprom_ewen); + if (sd->sd_chip == C46) { + ewen = &seeprom_ewen; + ewds = &seeprom_ewds; + } else if (sd->sd_chip == C56_66) { + ewen = &seeprom_long_ewen; + ewds = &seeprom_long_ewds; + } else { + printf("ahc_write_seeprom: unsupported seeprom type %d\n", + sd->sd_chip); + return (0); + } + + send_seeprom_cmd(sd, ewen); reset_seeprom(sd); /* Write all requested data out to the seeprom. */ temp = sd->sd_MS ^ sd->sd_CS; for (k = start_addr; k < count + start_addr; k++) { /* Send the write command */ send_seeprom_cmd(sd, &seeprom_write); /* Send the 6 or 8 bit address (MSB first). */ for (i = (sd->sd_chip - 1); i >= 0; i--) { if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((k & (1 << i)) != 0) temp ^= sd->sd_DO; } /* Write the 16 bit value, MSB first */ v = buf[k - start_addr]; for (i = 15; i >= 0; i--) { if ((v & (1 << i)) != 0) temp ^= sd->sd_DO; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); if ((v & (1 << i)) != 0) temp ^= sd->sd_DO; } /* Wait for the chip to complete the write */ temp = sd->sd_MS; SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); temp = sd->sd_MS ^ sd->sd_CS; do { SEEPROM_OUTB(sd, temp); CLOCK_PULSE(sd, sd->sd_RDY); SEEPROM_OUTB(sd, temp ^ sd->sd_CK); CLOCK_PULSE(sd, sd->sd_RDY); } while ((SEEPROM_DATA_INB(sd) & sd->sd_DI) == 0); reset_seeprom(sd); } /* Put the chip back into write-protect mode */ - send_seeprom_cmd(sd, &seeprom_ewds); + send_seeprom_cmd(sd, ewds); reset_seeprom(sd); return (1); } int ahc_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return(1); } } Index: stable/4/sys/dev/aic7xxx/aic7xxx_inline.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx_inline.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx_inline.h (revision 125849) @@ -1,649 +1,649 @@ /* * Inline routines shareable across OS platforms. * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#47 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_INLINE_H_ #define _AIC7XXX_INLINE_H_ /************************* Sequencer Execution Control ************************/ static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc); static __inline int ahc_is_paused(struct ahc_softc *ahc); static __inline void ahc_pause(struct ahc_softc *ahc); static __inline void ahc_unpause(struct ahc_softc *ahc); /* * Work around any chip bugs related to halting sequencer execution. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by * reading a register that will set this signal and deassert it. * Without this workaround, if the chip is paused, by an interrupt or * manual pause while accessing scb ram, accesses to certain registers * will hang the system (infinite pci retries). */ static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc) { if ((ahc->features & AHC_ULTRA2) != 0) (void)ahc_inb(ahc, CCSCBCTL); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ static __inline int ahc_is_paused(struct ahc_softc *ahc) { return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ static __inline void ahc_pause(struct ahc_softc *ahc) { ahc_outb(ahc, HCNTRL, ahc->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahc_is_paused(ahc) == 0) ; ahc_pause_bug_fix(ahc); } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ static __inline void ahc_unpause(struct ahc_softc *ahc) { if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0) ahc_outb(ahc, HCNTRL, ahc->unpause); } /*********************** Untagged Transaction Routines ************************/ static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc); static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc); /* * Block our completion routine from starting the next untagged * transaction for this target or target lun. */ static __inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) ahc->untagged_queue_lock++; } /* * Allow the next untagged transaction for this target or target lun * to be executed. We use a counting semaphore to allow the lock * to be acquired recursively. Once the count drops to zero, the * transaction queues will be run. */ static __inline void ahc_release_untagged_queues(struct ahc_softc *ahc) { if ((ahc->flags & AHC_SCB_BTT) == 0) { ahc->untagged_queue_lock--; if (ahc->untagged_queue_lock == 0) ahc_run_untagged_queues(ahc); } } /************************** Memory mapping routines ***************************/ static __inline struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr); static __inline uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg); static __inline uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index); static __inline void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op); static __inline void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op); static __inline uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index); static __inline struct ahc_dma_seg * ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr) { int sg_index; sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg); /* sg_list_phys points to entry 1, not 0 */ sg_index++; return (&scb->sg_list[sg_index]); } static __inline uint32_t ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg) { int sg_index; /* sg_list_phys points to entry 1, not 0 */ sg_index = sg - &scb->sg_list[1]; return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list))); } static __inline uint32_t ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index) { return (ahc->scb_data->hscb_busaddr + (sizeof(struct hardware_scb) * index)); } static __inline void ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op) { - ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, + aic_dmamap_sync(ahc, ahc->scb_data->hscb_dmat, ahc->scb_data->hscb_dmamap, /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb), /*len*/sizeof(*scb->hscb), op); } static __inline void ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op) { if (scb->sg_count == 0) return; - ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, + aic_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap, /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr) * sizeof(struct ahc_dma_seg), /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op); } static __inline uint32_t ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index) { return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo); } /******************************** Debugging ***********************************/ static __inline char *ahc_name(struct ahc_softc *ahc); static __inline char * ahc_name(struct ahc_softc *ahc) { return (ahc->name); } /*********************** Miscelaneous Support Functions ***********************/ static __inline void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb); static __inline struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate); static __inline uint16_t ahc_inw(struct ahc_softc *ahc, u_int port); static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value); static __inline uint32_t ahc_inl(struct ahc_softc *ahc, u_int port); static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value); static __inline uint64_t ahc_inq(struct ahc_softc *ahc, u_int port); static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value); static __inline struct scb* ahc_get_scb(struct ahc_softc *ahc); static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb); static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb); static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb); static __inline struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb); static __inline uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb); /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static __inline void ahc_update_residual(struct ahc_softc *ahc, struct scb *scb) { uint32_t sgptr; - sgptr = ahc_le32toh(scb->hscb->sgptr); + sgptr = aic_le32toh(scb->hscb->sgptr); if ((sgptr & SG_RESID_VALID) != 0) ahc_calc_residual(ahc, scb); } /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ static __inline struct ahc_initiator_tinfo * ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id, u_int remote_id, struct ahc_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahc->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } static __inline uint16_t ahc_inw(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port+1) << 8) | ahc_inb(ahc, port)); } static __inline void ahc_outw(struct ahc_softc *ahc, u_int port, u_int value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); } static __inline uint32_t ahc_inl(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24)); } static __inline void ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value) { ahc_outb(ahc, port, (value) & 0xFF); ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF); ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF); ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF); } static __inline uint64_t ahc_inq(struct ahc_softc *ahc, u_int port) { return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) | (((uint64_t)ahc_inb(ahc, port+7)) << 56)); } static __inline void ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value) { ahc_outb(ahc, port, value & 0xFF); ahc_outb(ahc, port+1, (value >> 8) & 0xFF); ahc_outb(ahc, port+2, (value >> 16) & 0xFF); ahc_outb(ahc, port+3, (value >> 24) & 0xFF); ahc_outb(ahc, port+4, (value >> 32) & 0xFF); ahc_outb(ahc, port+5, (value >> 40) & 0xFF); ahc_outb(ahc, port+6, (value >> 48) & 0xFF); ahc_outb(ahc, port+7, (value >> 56) & 0xFF); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ static __inline struct scb * ahc_get_scb(struct ahc_softc *ahc) { struct scb *scb; if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) { ahc_alloc_scbs(ahc); scb = SLIST_FIRST(&ahc->scb_data->free_scbs); if (scb == NULL) return (NULL); } SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle); return (scb); } /* * Return an SCB resource to the free list. */ static __inline void ahc_free_scb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *hscb; hscb = scb->hscb; /* Clean up for the next user */ ahc->scb_data->scbindex[hscb->tag] = NULL; - scb->flags = SCB_FREE; + scb->flags = SCB_FLAG_NONE; hscb->control = 0; SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle); /* Notify the OSM that a resource is now available. */ - ahc_platform_scb_free(ahc, scb); + aic_platform_scb_free(ahc, scb); } static __inline struct scb * ahc_lookup_scb(struct ahc_softc *ahc, u_int tag) { struct scb* scb; scb = ahc->scb_data->scbindex[tag]; if (scb != NULL) ahc_sync_scb(ahc, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static __inline void ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb) { struct hardware_scb *q_hscb; u_int saved_tag; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB to download, and we * can't disappoint it. To achieve this, the next * SCB to download is saved off in ahc->next_queued_scb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahc->next_queued_scb->hscb; saved_tag = q_hscb->tag; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); if ((scb->flags & SCB_CDB32_PTR) != 0) { q_hscb->shared_data.cdb_ptr = - ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + aic_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag) + offsetof(struct hardware_scb, cdb32)); } q_hscb->tag = saved_tag; q_hscb->next = scb->hscb->tag; /* Now swap HSCB pointers. */ ahc->next_queued_scb->hscb = scb->hscb; scb->hscb = q_hscb; /* Now define the mapping from tag to SCB in the scbindex */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; } /* * Tell the sequencer about a new transaction to execute. */ static __inline void ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb) { ahc_swap_with_next_hscb(ahc, scb); if (scb->hscb->tag == SCB_LIST_NULL || scb->hscb->next == SCB_LIST_NULL) panic("Attempt to queue invalid SCB tag %x:%x\n", scb->hscb->tag, scb->hscb->next); /* * Setup data "oddness". */ scb->hscb->lun &= LID; - if (ahc_get_transfer_length(scb) & 0x1) + if (aic_get_transfer_length(scb) & 0x1) scb->hscb->lun |= SCB_XFERLEN_ODD; /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; /* * Make sure our data is consistent from the * perspective of the adapter. */ ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Tell the adapter about the newly queued SCB */ if ((ahc->features & AHC_QUEUE_REGS) != 0) { ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); } else { if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_pause(ahc); ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); if ((ahc->features & AHC_AUTOPAUSE) == 0) ahc_unpause(ahc); } } static __inline struct scsi_sense_data * ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (&ahc->scb_data->sense[offset]); } static __inline uint32_t ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb) { int offset; offset = scb - ahc->scb_data->scbarray; return (ahc->scb_data->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } /************************** Interrupt Processing ******************************/ static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op); static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op); static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc); static __inline int ahc_intr(struct ahc_softc *ahc); static __inline void ahc_sync_qoutfifo(struct ahc_softc *ahc, int op) { - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/0, /*len*/256, op); } static __inline void ahc_sync_tqinfifo(struct ahc_softc *ahc, int op) { #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) { - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, 0), sizeof(struct target_cmd) * AHC_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHC_RUN_QOUTFIFO 0x1 #define AHC_RUN_TQINFIFO 0x2 static __inline u_int ahc_check_cmdcmpltqueues(struct ahc_softc *ahc) { u_int retval; retval = 0; - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, /*offset*/ahc->qoutfifonext, /*len*/1, BUS_DMASYNC_POSTREAD); if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) retval |= AHC_RUN_QOUTFIFO; #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0 && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) { - ahc_dmamap_sync(ahc, ahc->shared_data_dmat, + aic_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, ahc_targetcmd_offset(ahc, ahc->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0) retval |= AHC_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ static __inline int ahc_intr(struct ahc_softc *ahc) { u_int intstat; if ((ahc->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0 && (ahc_check_cmdcmpltqueues(ahc) != 0)) intstat = CMDCMPLT; else { intstat = ahc_inb(ahc, INTSTAT); } if ((intstat & INT_PEND) == 0) { -#if AHC_PCI_CONFIG > 0 +#if AIC_PCI_CONFIG > 0 if (ahc->unsolicited_ints > 500) { ahc->unsolicited_ints = 0; if ((ahc->chip & AHC_PCI) != 0 && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0) ahc->bus_intr(ahc); } #endif ahc->unsolicited_ints++; return (0); } ahc->unsolicited_ints = 0; if (intstat & CMDCMPLT) { ahc_outb(ahc, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ ahc_flush_device_writes(ahc); ahc_run_qoutfifo(ahc); #ifdef AHC_TARGET_MODE if ((ahc->flags & AHC_TARGETROLE) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & BRKADRINT) { ahc_handle_brkadrint(ahc); } else if ((intstat & (SEQINT|SCSIINT)) != 0) { ahc_pause_bug_fix(ahc); if ((intstat & SEQINT) != 0) ahc_handle_seqint(ahc, intstat); if ((intstat & SCSIINT) != 0) ahc_handle_scsiint(ahc, intstat); } return (1); } #endif /* _AIC7XXX_INLINE_H_ */ Index: stable/4/sys/dev/aic7xxx/aic7xxx_osm.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx_osm.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx_osm.c (revision 125849) @@ -1,1973 +1,1677 @@ /* - * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers + * Bus independent FreeBSD shim for the aic7xxx based Adaptec SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#13 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#20 $ */ +#include +__FBSDID("$FreeBSD$"); + #include #include +#include + #ifndef AHC_TMODE_ENABLE #define AHC_TMODE_ENABLE 0 #endif +#include + #define ccb_scb_ptr spriv_ptr0 devclass_t ahc_devclass; #if UNUSED static void ahc_dump_targcmd(struct target_cmd *cmd); #endif static int ahc_modevent(module_t mod, int type, void *data); static void ahc_action(struct cam_sim *sim, union ccb *ccb); static void ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, struct ccb_trans_settings *cts); static void ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error); static void ahc_poll(struct cam_sim *sim); static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, struct ccb_scsiio *csio, struct scb *scb); static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb); static int ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, u_int lun, struct cam_path **path); -static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); static int ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, u_int lun, struct cam_path **path) { path_id_t path_id; if (channel == 'B') path_id = cam_sim_path(ahc->platform_data->sim_b); else path_id = cam_sim_path(ahc->platform_data->sim); return (xpt_create_path(path, /*periph*/NULL, path_id, target, lun)); } int ahc_map_int(struct ahc_softc *ahc) { int error; /* Hook up our interrupt handler */ error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq, INTR_TYPE_CAM, ahc_platform_intr, ahc, &ahc->platform_data->ih); if (error != 0) device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n", error); return (error); } /* * Attach all the sub-devices we can find */ int ahc_attach(struct ahc_softc *ahc) { char ahc_info[256]; struct ccb_setasync csa; struct cam_devq *devq; int bus_id; int bus_id2; struct cam_sim *sim; struct cam_sim *sim2; struct cam_path *path; struct cam_path *path2; long s; int count; count = 0; sim = NULL; sim2 = NULL; + /* + * Create a thread to perform all recovery. + */ + if (ahc_spawn_recovery_thread(ahc) != 0) + goto fail; + ahc_controller_info(ahc, ahc_info); printf("%s\n", ahc_info); ahc_lock(ahc, &s); + /* * Attach secondary channel first if the user has * declared it the primary channel. */ if ((ahc->features & AHC_TWIN) != 0 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { bus_id = 1; bus_id2 = 0; } else { bus_id = 0; bus_id2 = 1; } /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(AHC_MAX_QUEUE); if (devq == NULL) goto fail; /* * Construct our first channel SIM entry */ sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, device_get_unit(ahc->dev_softc), 1, AHC_MAX_QUEUE, devq); if (sim == NULL) { cam_simq_free(devq); goto fail; } if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { cam_sim_free(sim, /*free_devq*/TRUE); sim = NULL; goto fail; } if (xpt_create_path(&path, /*periph*/NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, /*free_devq*/TRUE); sim = NULL; goto fail; } xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = ahc_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); count++; if (ahc->features & AHC_TWIN) { sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, device_get_unit(ahc->dev_softc), 1, AHC_MAX_QUEUE, devq); if (sim2 == NULL) { printf("ahc_attach: Unable to attach second " "bus due to resource shortage"); goto fail; } if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { printf("ahc_attach: Unable to attach second " "bus due to resource shortage"); /* * We do not want to destroy the device queue * because the first bus is using it. */ cam_sim_free(sim2, /*free_devq*/FALSE); goto fail; } if (xpt_create_path(&path2, /*periph*/NULL, cam_sim_path(sim2), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim2)); cam_sim_free(sim2, /*free_devq*/FALSE); sim2 = NULL; goto fail; } xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = ahc_async; csa.callback_arg = sim2; xpt_action((union ccb *)&csa); count++; } fail: if ((ahc->features & AHC_TWIN) != 0 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { ahc->platform_data->sim_b = sim; ahc->platform_data->path_b = path; ahc->platform_data->sim = sim2; ahc->platform_data->path = path2; } else { ahc->platform_data->sim = sim; ahc->platform_data->path = path; ahc->platform_data->sim_b = sim2; ahc->platform_data->path_b = path2; } if (count != 0) { /* We have to wait until after any system dumps... */ ahc->platform_data->eh = EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, ahc, SHUTDOWN_PRI_DEFAULT); ahc_intr_enable(ahc, TRUE); } ahc_unlock(ahc, &s); return (count); } /* * Catch an interrupt from the adapter */ void ahc_platform_intr(void *arg) { struct ahc_softc *ahc; ahc = (struct ahc_softc *)arg; ahc_intr(ahc); } /* * We have an scb which has been processed by the * adaptor, now we look to see how the operation * went. */ void ahc_done(struct ahc_softc *ahc, struct scb *scb) { union ccb *ccb; CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_done - scb %d\n", scb->hscb->tag)); ccb = scb->io_ctx; LIST_REMOVE(scb, pending_links); + if ((scb->flags & SCB_TIMEDOUT) != 0) + LIST_REMOVE(scb, timedout_links); if ((scb->flags & SCB_UNTAGGEDQ) != 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &ahc->untagged_queues[target_offset]; TAILQ_REMOVE(untagged_q, scb, links.tqe); scb->flags &= ~SCB_UNTAGGEDQ; ahc_run_untagged_queue(ahc, untagged_q); } - untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); + untimeout(ahc_platform_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); } if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct cam_path *ccb_path; /* * If we have finally disconnected, clean up our * pending device state. * XXX - There may be error states that cause where * we will remain connected. */ ccb_path = ccb->ccb_h.path; if (ahc->pending_device != NULL && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { ahc->pending_device = NULL; } else { if (bootverbose) { xpt_print_path(ccb->ccb_h.path); printf("Still connected\n"); } - ahc_freeze_ccb(ccb); + aic_freeze_ccb(ccb); } } - if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) + if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) ccb->ccb_h.status |= CAM_REQ_CMP; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ahc_free_scb(ahc, scb); xpt_done(ccb); return; } /* * If the recovery SCB completes, we have to be * out of our timeout. */ if ((scb->flags & SCB_RECOVERY_SCB) != 0) { struct scb *list_scb; /* * We were able to complete the command successfully, - * so reinstate the timeouts for all other pending + * so renew the timeouts for all other pending * commands. */ LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { - union ccb *ccb; - uint64_t time; - ccb = list_scb->io_ctx; - if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) - continue; - - time = ccb->ccb_h.timeout; - time *= hz; - time /= 1000; - ccb->ccb_h.timeout_ch = - timeout(ahc_timeout, list_scb, time); + aic_scb_timer_reset(scb, aic_get_timeout(scb)); } - if (ahc_get_transaction_status(scb) == CAM_BDR_SENT - || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) - ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); + if (aic_get_transaction_status(scb) == CAM_BDR_SENT + || aic_get_transaction_status(scb) == CAM_REQ_ABORTED) + aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); ahc_print_path(ahc, scb); printf("no longer in timeout, status = %x\n", ccb->ccb_h.status); } /* Don't clobber any existing error state */ - if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { + if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) { ccb->ccb_h.status |= CAM_REQ_CMP; } else if ((scb->flags & SCB_SENSE) != 0) { /* * We performed autosense retrieval. * * Zero any sense not transferred by the * device. The SCSI spec mandates that any * untransfered data should be assumed to be * zero. Complete the 'bounce' of sense information * through buffers accessible via bus-space by * copying it into the clients csio. */ memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); memcpy(&ccb->csio.sense_data, ahc_get_sense_buf(ahc, scb), - (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK) + (aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK) - ccb->csio.sense_resid); scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ahc_free_scb(ahc, scb); xpt_done(ccb); } static void ahc_action(struct cam_sim *sim, union ccb *ccb) { struct ahc_softc *ahc; struct ahc_tmode_lstate *lstate; u_int target_id; u_int our_id; long s; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); ahc = (struct ahc_softc *)cam_sim_softc(sim); target_id = ccb->ccb_h.target_id; our_id = SIM_SCSI_ID(ahc, sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ { struct ahc_tmode_tstate *tstate; cam_status status; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { /* Response from the black hole device */ tstate = NULL; lstate = ahc->black_hole; } else { ccb->ccb_h.status = status; xpt_done(ccb); break; } } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { ahc_lock(ahc, &s); SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) ahc_run_tqinfifo(ahc, /*paused*/FALSE); ahc_unlock(ahc, &s); break; } /* * The target_id represents the target we attempt to * select. In target mode, this is the initiator of * the original command. */ our_id = target_id; target_id = ccb->csio.init_id; /* FALLTHROUGH */ } case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct scb *scb; struct hardware_scb *hscb; if ((ahc->flags & AHC_INITIATORROLE) == 0 && (ccb->ccb_h.func_code == XPT_SCSI_IO || ccb->ccb_h.func_code == XPT_RESET_DEV)) { ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); return; } /* * get an scb to use. */ ahc_lock(ahc, &s); if ((scb = ahc_get_scb(ahc)) == NULL) { xpt_freeze_simq(sim, /*count*/1); ahc->flags |= AHC_RESOURCE_SHORTAGE; ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } ahc_unlock(ahc, &s); hscb = scb->hscb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, ("start scb(%p)\n", scb)); scb->io_ctx = ccb; /* * So we can find the SCB when an abort is requested */ ccb->ccb_h.ccb_scb_ptr = scb; /* * Put all the arguments for the xfer in the scb */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id); hscb->lun = ccb->ccb_h.target_lun; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { hscb->cdb_len = 0; scb->flags |= SCB_DEVICE_RESET; hscb->control |= MK_MESSAGE; ahc_execute_scb(scb, NULL, 0, 0); } else { if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct target_data *tdata; tdata = &hscb->shared_data.tdata; if (ahc->pending_device == lstate) scb->flags |= SCB_TARGET_IMMEDIATE; hscb->control |= TARGET_SCB; scb->flags |= SCB_TARGET_SCB; tdata->target_phases = 0; if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { tdata->target_phases |= SPHASE_PENDING; tdata->scsi_status = ccb->csio.scsi_status; } if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) tdata->target_phases |= NO_DISCONNECT; tdata->initiator_tag = ccb->csio.tag_id; } if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) hscb->control |= ccb->csio.tag_action; ahc_setup_data(ahc, sim, &ccb->csio, scb); } break; } case XPT_NOTIFY_ACK: case XPT_IMMED_NOTIFY: { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; cam_status status; status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; xpt_done(ccb); break; } SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, sim_links.sle); ccb->ccb_h.status = CAM_REQ_INPROG; ahc_send_lstate_events(ahc, lstate); break; } case XPT_EN_LUN: /* Enable LUN as a target */ ahc_handle_en_lun(ahc, sim, ccb); xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { ahc_abort_ccb(ahc, sim, ccb); break; } case XPT_SET_TRAN_SETTINGS: { #ifdef AHC_NEW_TRAN_SETTINGS struct ahc_devinfo devinfo; struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t *discenable; uint16_t *tagenable; u_int update_type; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), cts->ccb_h.target_id, cts->ccb_h.target_lun, SIM_CHANNEL(ahc, sim), ROLE_UNKNOWN); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { update_type |= AHC_TRANS_GOAL; discenable = &tstate->discenable; tagenable = &tstate->tagenable; tinfo->curr.protocol_version = cts->protocol_version; tinfo->curr.transport_version = cts->transport_version; tinfo->goal.protocol_version = cts->protocol_version; tinfo->goal.transport_version = cts->transport_version; } else if (cts->type == CTS_TYPE_USER_SETTINGS) { update_type |= AHC_TRANS_USER; discenable = &ahc->user_discenable; tagenable = &ahc->user_tagenable; tinfo->user.protocol_version = cts->protocol_version; tinfo->user.transport_version = cts->transport_version; } else { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } ahc_lock(ahc, &s); if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *discenable |= devinfo.target_mask; else *discenable &= ~devinfo.target_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *tagenable |= devinfo.target_mask; else *tagenable &= ~devinfo.target_mask; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { ahc_validate_width(ahc, /*tinfo limit*/NULL, &spi->bus_width, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, spi->bus_width, update_type, /*paused*/FALSE); } if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { if (update_type == AHC_TRANS_USER) spi->ppr_options = tinfo->user.ppr_options; else spi->ppr_options = tinfo->goal.ppr_options; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { if (update_type == AHC_TRANS_USER) spi->sync_offset = tinfo->user.offset; else spi->sync_offset = tinfo->goal.offset; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { if (update_type == AHC_TRANS_USER) spi->sync_period = tinfo->user.period; else spi->sync_period = tinfo->goal.period; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { struct ahc_syncrate *syncrate; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) maxsync = AHC_SYNCRATE_DT; else if ((ahc->features & AHC_ULTRA) != 0) maxsync = AHC_SYNCRATE_ULTRA; else maxsync = AHC_SYNCRATE_FAST; if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; syncrate = ahc_find_syncrate(ahc, &spi->sync_period, &spi->ppr_options, maxsync); ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate, &spi->sync_offset, spi->bus_width, ROLE_UNKNOWN); /* We use a period of 0 to represent async */ if (spi->sync_offset == 0) { spi->sync_period = 0; spi->ppr_options = 0; } ahc_set_syncrate(ahc, &devinfo, syncrate, spi->sync_period, spi->sync_offset, spi->ppr_options, update_type, /*paused*/FALSE); } ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); #else struct ahc_devinfo devinfo; struct ccb_trans_settings *cts; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; uint16_t *discenable; uint16_t *tagenable; u_int update_type; long s; cts = &ccb->cts; ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), cts->ccb_h.target_id, cts->ccb_h.target_lun, SIM_CHANNEL(ahc, sim), ROLE_UNKNOWN); tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); update_type = 0; if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { update_type |= AHC_TRANS_GOAL; discenable = &tstate->discenable; tagenable = &tstate->tagenable; } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { update_type |= AHC_TRANS_USER; discenable = &ahc->user_discenable; tagenable = &ahc->user_tagenable; } else { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } ahc_lock(ahc, &s); if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) *discenable |= devinfo.target_mask; else *discenable &= ~devinfo.target_mask; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) *tagenable |= devinfo.target_mask; else *tagenable &= ~devinfo.target_mask; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { ahc_validate_width(ahc, /*tinfo limit*/NULL, &cts->bus_width, ROLE_UNKNOWN); ahc_set_width(ahc, &devinfo, cts->bus_width, update_type, /*paused*/FALSE); } if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { if (update_type == AHC_TRANS_USER) cts->sync_offset = tinfo->user.offset; else cts->sync_offset = tinfo->goal.offset; } if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { if (update_type == AHC_TRANS_USER) cts->sync_period = tinfo->user.period; else cts->sync_period = tinfo->goal.period; } if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { struct ahc_syncrate *syncrate; u_int ppr_options; u_int maxsync; if ((ahc->features & AHC_ULTRA2) != 0) maxsync = AHC_SYNCRATE_DT; else if ((ahc->features & AHC_ULTRA) != 0) maxsync = AHC_SYNCRATE_ULTRA; else maxsync = AHC_SYNCRATE_FAST; ppr_options = 0; if (cts->sync_period <= 9 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ppr_options = MSG_EXT_PPR_DT_REQ; syncrate = ahc_find_syncrate(ahc, &cts->sync_period, &ppr_options, maxsync); ahc_validate_offset(ahc, /*tinfo limit*/NULL, syncrate, &cts->sync_offset, MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN); /* We use a period of 0 to represent async */ if (cts->sync_offset == 0) { cts->sync_period = 0; ppr_options = 0; } if (ppr_options == MSG_EXT_PPR_DT_REQ && tinfo->user.transport_version >= 3) { tinfo->goal.transport_version = tinfo->user.transport_version; tinfo->curr.transport_version = tinfo->user.transport_version; } ahc_set_syncrate(ahc, &devinfo, syncrate, cts->sync_period, cts->sync_offset, ppr_options, update_type, /*paused*/FALSE); } ahc_unlock(ahc, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); #endif break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { ahc_lock(ahc, &s); ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim), SIM_CHANNEL(ahc, sim), &ccb->cts); ahc_unlock(ahc, &s); xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { - struct ccb_calc_geometry *ccg; - uint32_t size_mb; - uint32_t secs_per_cylinder; - int extended; + int extended; - ccg = &ccb->ccg; - size_mb = ccg->volume_size - / ((1024L * 1024L) / ccg->block_size); extended = SIM_IS_SCSIBUS_B(ahc, sim) - ? ahc->flags & AHC_EXTENDED_TRANS_B - : ahc->flags & AHC_EXTENDED_TRANS_A; - - if (size_mb > 1024 && extended) { - ccg->heads = 255; - ccg->secs_per_track = 63; - } else { - ccg->heads = 64; - ccg->secs_per_track = 32; - } - secs_per_cylinder = ccg->heads * ccg->secs_per_track; - ccg->cylinders = ccg->volume_size / secs_per_cylinder; - ccb->ccb_h.status = CAM_REQ_CMP; + ? ahc->flags & AHC_EXTENDED_TRANS_B + : ahc->flags & AHC_EXTENDED_TRANS_A; + aic_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int found; ahc_lock(ahc, &s); found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), /*initiate reset*/TRUE); ahc_unlock(ahc, &s); if (bootverbose) { xpt_print_path(SIM_PATH(ahc, sim)); printf("SCSI bus reset delivered. " "%d SCBs aborted.\n", found); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((ahc->features & AHC_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; if ((ahc->features & AHC_TARGETMODE) != 0) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; cpi->max_lun = AHC_NUM_LUNS - 1; if (SIM_IS_SCSIBUS_B(ahc, sim)) { cpi->initiator_id = ahc->our_id_b; if ((ahc->flags & AHC_RESET_BUS_B) == 0) cpi->hba_misc |= PIM_NOBUSRESET; } else { cpi->initiator_id = ahc->our_id; if ((ahc->flags & AHC_RESET_BUS_A) == 0) cpi->hba_misc |= PIM_NOBUSRESET; } cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); #ifdef AHC_NEW_TRAN_SETTINGS cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; if ((ahc->features & AHC_DT) != 0) { cpi->transport_version = 3; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; } #endif cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } } static void ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, struct ccb_trans_settings *cts) { #ifdef AHC_NEW_TRAN_SETTINGS struct ahc_devinfo devinfo; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; ahc_compile_devinfo(&devinfo, our_id, cts->ccb_h.target_id, cts->ccb_h.target_lun, channel, ROLE_UNKNOWN); targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) tinfo = &targ_info->curr; else tinfo = &targ_info->user; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_USER_SETTINGS) { if ((ahc->user_discenable & devinfo.target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((ahc->user_tagenable & devinfo.target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { if ((tstate->discenable & devinfo.target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((tstate->tagenable & devinfo.target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } cts->protocol_version = tinfo->protocol_version; cts->transport_version = tinfo->transport_version; spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->ppr_options = tinfo->ppr_options; cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else { scsi->valid = 0; } cts->ccb_h.status = CAM_REQ_CMP; #else struct ahc_devinfo devinfo; struct ahc_initiator_tinfo *targ_info; struct ahc_tmode_tstate *tstate; struct ahc_transinfo *tinfo; ahc_compile_devinfo(&devinfo, our_id, cts->ccb_h.target_id, cts->ccb_h.target_lun, channel, ROLE_UNKNOWN); targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) tinfo = &targ_info->curr; else tinfo = &targ_info->user; cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { if ((ahc->user_discenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((ahc->user_tagenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; } else { if ((tstate->discenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((tstate->tagenable & devinfo.target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; } cts->sync_period = tinfo->period; cts->sync_offset = tinfo->offset; cts->bus_width = tinfo->width; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; cts->ccb_h.status = CAM_REQ_CMP; #endif } static void ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ahc_softc *ahc; struct cam_sim *sim; sim = (struct cam_sim *)callback_arg; ahc = (struct ahc_softc *)cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: { struct ahc_devinfo devinfo; long s; ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), xpt_path_target_id(path), xpt_path_lun_id(path), SIM_CHANNEL(ahc, sim), ROLE_UNKNOWN); /* * Revert to async/narrow transfers * for the next device. */ ahc_lock(ahc, &s); ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, /*period*/0, /*offset*/0, /*ppr_options*/0, AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); ahc_unlock(ahc, &s); break; } default: break; } } static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct scb *scb; union ccb *ccb; struct ahc_softc *ahc; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; u_int mask; long s; scb = (struct scb *)arg; ccb = scb->io_ctx; ahc = scb->ahc_softc; if (error != 0) { if (error == EFBIG) - ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG); + aic_set_transaction_status(scb, CAM_REQ_TOO_BIG); else - ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR); + aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); if (nsegments != 0) bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); ahc_lock(ahc, &s); ahc_free_scb(ahc, scb); ahc_unlock(ahc, &s); xpt_done(ccb); return; } if (nsegments != 0) { struct ahc_dma_seg *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nsegments; /* Copy the segments into our SG list */ sg = scb->sg_list; while (dm_segs < end_seg) { uint32_t len; - sg->addr = ahc_htole32(dm_segs->ds_addr); + sg->addr = aic_htole32(dm_segs->ds_addr); len = dm_segs->ds_len | ((dm_segs->ds_addr >> 8) & 0x7F000000); - sg->len = ahc_htole32(len); + sg->len = aic_htole32(len); sg++; dm_segs++; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ - scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID); + scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { struct target_data *tdata; tdata = &scb->hscb->shared_data.tdata; tdata->target_phases |= DPHASE_PENDING; + /* + * CAM data direction is relative to the initiator. + */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) tdata->data_phase = P_DATAOUT; else tdata->data_phase = P_DATAIN; /* * If the transfer is of an odd length and in the * "in" direction (scsi->HostBus), then it may * trigger a bug in the 'WideODD' feature of * non-Ultra2 chips. Force the total data-length * to be even by adding an extra, 1 byte, SG, * element. We do this even if we are not currently * negotiated wide as negotiation could occur before * this command is executed. */ if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0 && (ccb->csio.dxfer_len & 0x1) != 0 - && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { nsegments++; if (nsegments > AHC_NSEG) { - ahc_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_REQ_TOO_BIG); bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); ahc_lock(ahc, &s); ahc_free_scb(ahc, scb); ahc_unlock(ahc, &s); xpt_done(ccb); return; } - sg->addr = ahc_htole32(ahc->dma_bug_buf); - sg->len = ahc_htole32(1); + sg->addr = aic_htole32(ahc->dma_bug_buf); + sg->len = aic_htole32(1); sg++; } } sg--; - sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); + sg->len |= aic_htole32(AHC_DMA_LAST_SEG); /* Copy the first SG into the "current" data pointer area */ scb->hscb->dataptr = scb->sg_list->addr; scb->hscb->datacnt = scb->sg_list->len; } else { - scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); + scb->hscb->sgptr = aic_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } scb->sg_count = nsegments; ahc_lock(ahc, &s); /* * Last time we need to check if this SCB needs to * be aborted. */ - if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) { + if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); ahc_free_scb(ahc, scb); ahc_unlock(ahc, &s); xpt_done(ccb); return; } tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid), SCSIID_OUR_ID(scb->hscb->scsiid), SCSIID_TARGET(ahc, scb->hscb->scsiid), &tstate); mask = SCB_GET_TARGET_MASK(ahc, scb); scb->hscb->scsirate = tinfo->scsirate; scb->hscb->scsioffset = tinfo->curr.offset; if ((tstate->ultraenb & mask) != 0) scb->hscb->control |= ULTRAENB; if ((tstate->discenable & mask) != 0 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) scb->hscb->control |= DISCENB; if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 && (tinfo->goal.width != 0 || tinfo->goal.offset != 0 || tinfo->goal.ppr_options != 0)) { scb->flags |= SCB_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } else if ((tstate->auto_negotiate & mask) != 0) { scb->flags |= SCB_AUTO_NEGOTIATE; scb->hscb->control |= MK_MESSAGE; } LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { uint64_t time; if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) ccb->ccb_h.timeout = 5 * 1000; time = ccb->ccb_h.timeout; time *= hz; time /= 1000; ccb->ccb_h.timeout_ch = - timeout(ahc_timeout, (caddr_t)scb, time); + timeout(ahc_platform_timeout, (caddr_t)scb, time); } /* * We only allow one untagged transaction * per target in the initiator role unless * we are storing a full busy target *lun* * table in SCB space. */ if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 && (ahc->flags & AHC_SCB_BTT) == 0) { struct scb_tailq *untagged_q; int target_offset; target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); untagged_q = &(ahc->untagged_queues[target_offset]); TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); scb->flags |= SCB_UNTAGGEDQ; if (TAILQ_FIRST(untagged_q) != scb) { ahc_unlock(ahc, &s); return; } } scb->flags |= SCB_ACTIVE; if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { /* Define a mapping from our tag to the SCB. */ ahc->scb_data->scbindex[scb->hscb->tag] = scb; ahc_pause(ahc); if ((ahc->flags & AHC_PAGESCBS) == 0) ahc_outb(ahc, SCBPTR, scb->hscb->tag); ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag); ahc_unpause(ahc); } else { ahc_queue_scb(ahc, scb); } ahc_unlock(ahc, &s); } static void ahc_poll(struct cam_sim *sim) { struct ahc_softc *ahc; ahc = (struct ahc_softc *)cam_sim_softc(sim); ahc_intr(ahc); } static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, struct ccb_scsiio *csio, struct scb *scb) { struct hardware_scb *hscb; struct ccb_hdr *ccb_h; hscb = scb->hscb; ccb_h = &csio->ccb_h; csio->resid = 0; csio->sense_resid = 0; if (ccb_h->func_code == XPT_SCSI_IO) { hscb->cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if (hscb->cdb_len > sizeof(hscb->cdb32) || (ccb_h->flags & CAM_CDB_PHYS) != 0) { u_long s; - ahc_set_transaction_status(scb, + aic_set_transaction_status(scb, CAM_REQ_INVALID); ahc_lock(ahc, &s); ahc_free_scb(ahc, scb); ahc_unlock(ahc, &s); xpt_done((union ccb *)csio); return; } if (hscb->cdb_len > 12) { memcpy(hscb->cdb32, csio->cdb_io.cdb_ptr, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } else { memcpy(hscb->shared_data.cdb, csio->cdb_io.cdb_ptr, hscb->cdb_len); } } else { if (hscb->cdb_len > 12) { memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes, hscb->cdb_len); scb->flags |= SCB_CDB32_PTR; } else { memcpy(hscb->shared_data.cdb, csio->cdb_io.cdb_bytes, hscb->cdb_len); } } } /* Only use S/G if there is a transfer */ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { /* We've been given a pointer to a single buffer */ if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load(ahc->buffer_dmat, scb->dmamap, csio->data_ptr, csio->dxfer_len, ahc_execute_scb, scb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(sim, /*count*/1); scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) panic("ahc_setup_data - Transfer size " "larger than can device max"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; ahc_execute_scb(scb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccb_h->flags & CAM_DATA_PHYS) != 0) panic("ahc_setup_data - Physical segment " "pointers unsupported"); if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) panic("ahc_setup_data - Virtual segment " "addresses unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *)csio->data_ptr; ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); } } else { ahc_execute_scb(scb, NULL, 0, 0); } } static void -ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { - - if ((scb->flags & SCB_RECOVERY_SCB) == 0) { - struct scb *list_scb; - - scb->flags |= SCB_RECOVERY_SCB; - - /* - * Take all queued, but not sent SCBs out of the equation. - * Also ensure that no new CCBs are queued to us while we - * try to fix this problem. - */ - if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { - xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1); - scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; - } - - /* - * Go through all of our pending SCBs and remove - * any scheduled timeouts for them. We will reschedule - * them after we've successfully fixed this problem. - */ - LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { - union ccb *ccb; - - ccb = list_scb->io_ctx; - untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch); - } - } -} - -void -ahc_timeout(void *arg) -{ - struct scb *scb; - struct ahc_softc *ahc; - long s; - int found; - u_int last_phase; - int target; - int lun; - int i; - char channel; - - scb = (struct scb *)arg; - ahc = (struct ahc_softc *)scb->ahc_softc; - - ahc_lock(ahc, &s); - - ahc_pause_and_flushwork(ahc); - - if ((scb->flags & SCB_ACTIVE) == 0) { - /* Previous timeout took care of me already */ - printf("%s: Timedout SCB already complete. " - "Interrupts may not be functioning.\n", ahc_name(ahc)); - ahc_unpause(ahc); - ahc_unlock(ahc, &s); - return; - } - - target = SCB_GET_TARGET(ahc, scb); - channel = SCB_GET_CHANNEL(ahc, scb); - lun = SCB_GET_LUN(scb); - - ahc_print_path(ahc, scb); - printf("SCB 0x%x - timed out\n", scb->hscb->tag); - ahc_dump_card_state(ahc); - last_phase = ahc_inb(ahc, LASTPHASE); - if (scb->sg_count > 0) { - for (i = 0; i < scb->sg_count; i++) { - printf("sg[%d] - Addr 0x%x : Length %d\n", - i, - scb->sg_list[i].addr, - scb->sg_list[i].len & AHC_SG_LEN_MASK); - } - } - if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { - /* - * Been down this road before. - * Do a full bus reset. - */ -bus_reset: - ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); - found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); - printf("%s: Issued Channel %c Bus Reset. " - "%d SCBs aborted\n", ahc_name(ahc), channel, found); - } else { - /* - * If we are a target, transition to bus free and report - * the timeout. - * - * The target/initiator that is holding up the bus may not - * be the same as the one that triggered this timeout - * (different commands have different timeout lengths). - * If the bus is idle and we are actiing as the initiator - * for this request, queue a BDR message to the timed out - * target. Otherwise, if the timed out transaction is - * active: - * Initiator transaction: - * Stuff the message buffer with a BDR message and assert - * ATN in the hopes that the target will let go of the bus - * and go to the mesgout phase. If this fails, we'll - * get another timeout 2 seconds later which will attempt - * a bus reset. - * - * Target transaction: - * Transition to BUS FREE and report the error. - * It's good to be the target! - */ - u_int active_scb_index; - u_int saved_scbptr; - - saved_scbptr = ahc_inb(ahc, SCBPTR); - active_scb_index = ahc_inb(ahc, SCB_TAG); - - if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 - && (active_scb_index < ahc->scb_data->numscbs)) { - struct scb *active_scb; - - /* - * If the active SCB is not us, assume that - * the active SCB has a longer timeout than - * the timedout SCB, and wait for the active - * SCB to timeout. - */ - active_scb = ahc_lookup_scb(ahc, active_scb_index); - if (active_scb != scb) { - struct ccb_hdr *ccbh; - uint64_t newtimeout; - - ahc_print_path(ahc, scb); - printf("Other SCB Timeout%s", - (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 - ? " again\n" : "\n"); - scb->flags |= SCB_OTHERTCL_TIMEOUT; - newtimeout = - MAX(active_scb->io_ctx->ccb_h.timeout, - scb->io_ctx->ccb_h.timeout); - newtimeout *= hz; - newtimeout /= 1000; - ccbh = &scb->io_ctx->ccb_h; - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahc_timeout, scb, newtimeout); - ahc_unpause(ahc); - ahc_unlock(ahc, &s); - return; - } - - /* It's us */ - if ((scb->flags & SCB_TARGET_SCB) != 0) { - - /* - * Send back any queued up transactions - * and properly record the error condition. - */ - ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), - SCB_GET_CHANNEL(ahc, scb), - SCB_GET_LUN(scb), - scb->hscb->tag, - ROLE_TARGET, - CAM_CMD_TIMEOUT); - - /* Will clear us from the bus */ - ahc_restart(ahc); - ahc_unlock(ahc, &s); - return; - } - - ahc_set_recoveryscb(ahc, active_scb); - ahc_outb(ahc, MSG_OUT, HOST_MSG); - ahc_outb(ahc, SCSISIGO, last_phase|ATNO); - ahc_print_path(ahc, active_scb); - printf("BDR message in message buffer\n"); - active_scb->flags |= SCB_DEVICE_RESET; - active_scb->io_ctx->ccb_h.timeout_ch = - timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); - ahc_unpause(ahc); - } else { - int disconnected; - - /* XXX Shouldn't panic. Just punt instead? */ - if ((scb->flags & SCB_TARGET_SCB) != 0) - panic("Timed-out target SCB but bus idle"); - - if (last_phase != P_BUSFREE - && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { - /* XXX What happened to the SCB? */ - /* Hung target selection. Goto busfree */ - printf("%s: Hung target selection\n", - ahc_name(ahc)); - ahc_restart(ahc); - ahc_unlock(ahc, &s); - return; - } - - if (ahc_search_qinfifo(ahc, target, channel, lun, - scb->hscb->tag, ROLE_INITIATOR, - /*status*/0, SEARCH_COUNT) > 0) { - disconnected = FALSE; - } else { - disconnected = TRUE; - } - - if (disconnected) { - - ahc_set_recoveryscb(ahc, scb); - /* - * Actually re-queue this SCB in an attempt - * to select the device before it reconnects. - * In either case (selection or reselection), - * we will now issue a target reset to the - * timed-out device. - * - * Set the MK_MESSAGE control bit indicating - * that we desire to send a message. We - * also set the disconnected flag since - * in the paging case there is no guarantee - * that our SCB control byte matches the - * version on the card. We don't want the - * sequencer to abort the command thinking - * an unsolicited reselection occurred. - */ - scb->hscb->control |= MK_MESSAGE|DISCONNECTED; - scb->flags |= SCB_DEVICE_RESET; - - /* - * Remove any cached copy of this SCB in the - * disconnected list in preparation for the - * queuing of our abort SCB. We use the - * same element in the SCB, SCB_NEXT, for - * both the qinfifo and the disconnected list. - */ - ahc_search_disc_list(ahc, target, channel, - lun, scb->hscb->tag, - /*stop_on_first*/TRUE, - /*remove*/TRUE, - /*save_state*/FALSE); - - /* - * In the non-paging case, the sequencer will - * never re-reference the in-core SCB. - * To make sure we are notified during - * reslection, set the MK_MESSAGE flag in - * the card's copy of the SCB. - */ - if ((ahc->flags & AHC_PAGESCBS) == 0) { - ahc_outb(ahc, SCBPTR, scb->hscb->tag); - ahc_outb(ahc, SCB_CONTROL, - ahc_inb(ahc, SCB_CONTROL) - | MK_MESSAGE); - } - - /* - * Clear out any entries in the QINFIFO first - * so we are the next SCB for this target - * to run. - */ - ahc_search_qinfifo(ahc, - SCB_GET_TARGET(ahc, scb), - channel, SCB_GET_LUN(scb), - SCB_LIST_NULL, - ROLE_INITIATOR, - CAM_REQUEUE_REQ, - SEARCH_COMPLETE); - ahc_print_path(ahc, scb); - printf("Queuing a BDR SCB\n"); - ahc_qinfifo_requeue_tail(ahc, scb); - ahc_outb(ahc, SCBPTR, saved_scbptr); - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahc_timeout, (caddr_t)scb, 2 * hz); - ahc_unpause(ahc); - } else { - /* Go "immediatly" to the bus reset */ - /* This shouldn't happen */ - ahc_set_recoveryscb(ahc, scb); - ahc_print_path(ahc, scb); - printf("SCB %d: Immediate reset. " - "Flags = 0x%x\n", scb->hscb->tag, - scb->flags); - goto bus_reset; - } - } - } - ahc_unlock(ahc, &s); -} - -static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) { union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; switch (abort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: { struct ahc_tmode_tstate *tstate; struct ahc_tmode_lstate *lstate; struct ccb_hdr_slist *list; cam_status status; status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, &lstate, TRUE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; break; } if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) list = &lstate->accept_tios; else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) list = &lstate->immed_notifies; else list = NULL; if (list != NULL) { struct ccb_hdr *curelm; int found; curelm = SLIST_FIRST(list); found = 0; if (curelm == &abort_ccb->ccb_h) { found = 1; SLIST_REMOVE_HEAD(list, sim_links.sle); } else { while(curelm != NULL) { struct ccb_hdr *nextelm; nextelm = SLIST_NEXT(curelm, sim_links.sle); if (nextelm == &abort_ccb->ccb_h) { found = 1; SLIST_NEXT(curelm, sim_links.sle) = SLIST_NEXT(nextelm, sim_links.sle); break; } curelm = nextelm; } } if (found) { abort_ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(abort_ccb); ccb->ccb_h.status = CAM_REQ_CMP; } else { xpt_print_path(abort_ccb->ccb_h.path); printf("Not found\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } break; } /* FALLTHROUGH */ } case XPT_SCSI_IO: /* XXX Fully implement the hard ones */ ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } void ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun, ac_code code, void *opt_arg) { struct ccb_trans_settings cts; struct cam_path *path; void *arg; int error; arg = NULL; error = ahc_create_path(ahc, channel, target, lun, &path); if (error != CAM_REQ_CMP) return; switch (code) { case AC_TRANSFER_NEG: { #ifdef AHC_NEW_TRAN_SETTINGS struct ccb_trans_settings_scsi *scsi; cts.type = CTS_TYPE_CURRENT_SETTINGS; scsi = &cts.proto_specific.scsi; #else cts.flags = CCB_TRANS_CURRENT_SETTINGS; #endif cts.ccb_h.path = path; cts.ccb_h.target_id = target; cts.ccb_h.target_lun = lun; ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id : ahc->our_id_b, channel, &cts); arg = &cts; #ifdef AHC_NEW_TRAN_SETTINGS scsi->valid &= ~CTS_SCSI_VALID_TQ; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; #else cts.valid &= ~CCB_TRANS_TQ_VALID; cts.flags &= ~CCB_TRANS_TAG_ENB; #endif if (opt_arg == NULL) break; if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED) #ifdef AHC_NEW_TRAN_SETTINGS scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; scsi->valid |= CTS_SCSI_VALID_TQ; #else cts.flags |= CCB_TRANS_TAG_ENB; cts.valid |= CCB_TRANS_TQ_VALID; #endif break; } case AC_SENT_BDR: case AC_BUS_RESET: break; default: panic("ahc_send_async: Unexpected async event"); } xpt_async(code, path, arg); xpt_free_path(path); } void ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable) { } int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) { ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ahc->platform_data == NULL) return (ENOMEM); return (0); } void ahc_platform_free(struct ahc_softc *ahc) { struct ahc_platform_data *pdata; pdata = ahc->platform_data; if (pdata != NULL) { if (pdata->regs != NULL) bus_release_resource(ahc->dev_softc, pdata->regs_res_type, pdata->regs_res_id, pdata->regs); if (pdata->irq != NULL) bus_release_resource(ahc->dev_softc, pdata->irq_res_type, 0, pdata->irq); if (pdata->sim_b != NULL) { xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); xpt_free_path(pdata->path_b); xpt_bus_deregister(cam_sim_path(pdata->sim_b)); cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); } if (pdata->sim != NULL) { xpt_async(AC_LOST_DEVICE, pdata->path, NULL); xpt_free_path(pdata->path); xpt_bus_deregister(cam_sim_path(pdata->sim)); cam_sim_free(pdata->sim, /*free_devq*/TRUE); } if (pdata->eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); free(ahc->platform_data, M_DEVBUF); } } int ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) { /* We don't sort softcs under FreeBSD so report equal always */ return (0); } int ahc_detach(device_t dev) { struct ahc_softc *ahc; u_long l; u_long s; ahc_list_lock(&l); device_printf(dev, "detaching device\n"); ahc = device_get_softc(dev); ahc = ahc_find_softc(ahc); if (ahc == NULL) { device_printf(dev, "aic7xxx already detached\n"); ahc_list_unlock(&l); return (ENOENT); } + TAILQ_REMOVE(&ahc_tailq, ahc, links); + ahc_list_unlock(&l); ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih); ahc_unlock(ahc, &s); ahc_free(ahc); - ahc_list_unlock(&l); return (0); } #if UNUSED static void ahc_dump_targcmd(struct target_cmd *cmd) { uint8_t *byte; uint8_t *last_byte; int i; byte = &cmd->initiator_channel; /* Debugging info for received commands */ last_byte = &cmd[1].initiator_channel; i = 0; while (byte < last_byte) { if (i == 0) printf("\t"); printf("%#x", *byte++); i++; if (i == 8) { printf("\n"); i = 0; } else { printf(", "); } } } #endif static int ahc_modevent(module_t mod, int type, void *data) { /* XXX Deal with busy status on unload. */ return 0; } static moduledata_t ahc_mod = { "ahc", ahc_modevent, NULL }; DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(ahc, cam, 1, 1, 1); MODULE_VERSION(ahc, 1); Index: stable/4/sys/dev/aic7xxx/aic7xxx_osm.h =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx_osm.h (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx_osm.h (revision 125849) @@ -1,588 +1,309 @@ /* * FreeBSD platform specific driver option settings, data structures, * function declarations and includes. * * Copyright (c) 1994-2001 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.h#15 $ + * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.h#18 $ * * $FreeBSD$ */ #ifndef _AIC7XXX_FREEBSD_H_ #define _AIC7XXX_FREEBSD_H_ #include /* for config options */ #include #include #include /* For device_t */ #if __FreeBSD_version >= 500000 #include #endif #include #include #include #include #if __FreeBSD_version < 500000 #include #else #define NPCI 1 #endif #if NPCI > 0 -#define AHC_PCI_CONFIG 1 +#define AIC_PCI_CONFIG 1 #include #endif #include #include #include #include #include #include #if NPCI > 0 +#if __FreeBSD_version >= 500000 +#include +#include +#else #include #include #endif +#endif #include #include #include #include #include #include #include #ifdef CAM_NEW_TRAN_CODE #define AHC_NEW_TRAN_SETTINGS #endif /* CAM_NEW_TRAN_CODE */ /*************************** Attachment Bookkeeping ***************************/ extern devclass_t ahc_devclass; /****************************** Platform Macros *******************************/ #define SIM_IS_SCSIBUS_B(ahc, sim) \ ((sim) == ahc->platform_data->sim_b) #define SIM_CHANNEL(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? 'B' : 'A') #define SIM_SCSI_ID(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? ahc->our_id_b : ahc->our_id) #define SIM_PATH(ahc, sim) \ (((sim) == ahc->platform_data->sim_b) ? ahc->platform_data->path_b \ : ahc->platform_data->path) #define BUILD_SCSIID(ahc, sim, target_id, our_id) \ ((((target_id) << TID_SHIFT) & TID) | (our_id) \ | (SIM_IS_SCSIBUS_B(ahc, sim) ? TWIN_CHNLB : 0)) #define SCB_GET_SIM(ahc, scb) \ (SCB_GET_CHANNEL(ahc, scb) == 'A' ? (ahc)->platform_data->sim \ : (ahc)->platform_data->sim_b) #ifndef offsetof #define offsetof(type, member) ((size_t)(&((type *)0)->member)) #endif -/************************* Forward Declarations *******************************/ -typedef device_t ahc_dev_softc_t; -typedef union ccb *ahc_io_ctx_t; -/***************************** Bus Space/DMA **********************************/ -#define ahc_dma_tag_create(ahc, parent_tag, alignment, boundary, \ - lowaddr, highaddr, filter, filterarg, \ - maxsize, nsegments, maxsegsz, flags, \ - dma_tagp) \ - bus_dma_tag_create(parent_tag, alignment, boundary, \ - lowaddr, highaddr, filter, filterarg, \ - maxsize, nsegments, maxsegsz, flags, \ - dma_tagp) - -#define ahc_dma_tag_destroy(ahc, tag) \ - bus_dma_tag_destroy(tag) - -#define ahc_dmamem_alloc(ahc, dmat, vaddr, flags, mapp) \ - bus_dmamem_alloc(dmat, vaddr, flags, mapp) - -#define ahc_dmamem_free(ahc, dmat, vaddr, map) \ - bus_dmamem_free(dmat, vaddr, map) - -#define ahc_dmamap_create(ahc, tag, flags, mapp) \ - bus_dmamap_create(tag, flags, mapp) - -#define ahc_dmamap_destroy(ahc, tag, map) \ - bus_dmamap_destroy(tag, map) - -#define ahc_dmamap_load(ahc, dmat, map, addr, buflen, callback, \ - callback_arg, flags) \ - bus_dmamap_load(dmat, map, addr, buflen, callback, callback_arg, flags) - -#define ahc_dmamap_unload(ahc, tag, map) \ - bus_dmamap_unload(tag, map) - -/* XXX Need to update Bus DMA for partial map syncs */ -#define ahc_dmamap_sync(ahc, dma_tag, dmamap, offset, len, op) \ - bus_dmamap_sync(dma_tag, dmamap, op) - /************************ Tunable Driver Parameters **************************/ /* * The number of dma segments supported. The sequencer can handle any number * of physically contiguous S/G entrys. To reduce the driver's memory * consumption, we limit the number supported to be sufficient to handle * the largest mapping supported by the kernel, MAXPHYS. Assuming the * transfer is as fragmented as possible and unaligned, this turns out to * be the number of paged sized transfers in MAXPHYS plus an extra element * to handle any unaligned residual. The sequencer fetches SG elements * in cacheline sized chucks, so make the number per-transaction an even * multiple of 16 which should align us on even the largest of cacheline * boundaries. */ #define AHC_NSEG (roundup(btoc(MAXPHYS) + 1, 16)) /* This driver supports target mode */ #define AHC_TARGET_MODE 1 /************************** Softc/SCB Platform Data ***************************/ struct ahc_platform_data { /* * Hooks into the XPT. */ struct cam_sim *sim; struct cam_sim *sim_b; struct cam_path *path; struct cam_path *path_b; int regs_res_type; int regs_res_id; int irq_res_type; struct resource *regs; struct resource *irq; void *ih; eventhandler_tag eh; + struct proc *recovery_thread; }; struct scb_platform_data { }; -/********************************* Byte Order *********************************/ -#if __FreeBSD_version >= 500000 -#define ahc_htobe16(x) htobe16(x) -#define ahc_htobe32(x) htobe32(x) -#define ahc_htobe64(x) htobe64(x) -#define ahc_htole16(x) htole16(x) -#define ahc_htole32(x) htole32(x) -#define ahc_htole64(x) htole64(x) - -#define ahc_be16toh(x) be16toh(x) -#define ahc_be32toh(x) be32toh(x) -#define ahc_be64toh(x) be64toh(x) -#define ahc_le16toh(x) le16toh(x) -#define ahc_le32toh(x) le32toh(x) -#define ahc_le64toh(x) le64toh(x) -#else -#define ahc_htobe16(x) (x) -#define ahc_htobe32(x) (x) -#define ahc_htobe64(x) (x) -#define ahc_htole16(x) (x) -#define ahc_htole32(x) (x) -#define ahc_htole64(x) (x) - -#define ahc_be16toh(x) (x) -#define ahc_be32toh(x) (x) -#define ahc_be64toh(x) (x) -#define ahc_le16toh(x) (x) -#define ahc_le32toh(x) (x) -#define ahc_le64toh(x) (x) -#endif - -/************************** Timer DataStructures ******************************/ -typedef struct callout ahc_timer_t; - /***************************** Core Includes **********************************/ #if AHC_REG_PRETTY_PRINT #define AIC_DEBUG_REGISTERS 1 #else #define AIC_DEBUG_REGISTERS 0 #endif -#include +#define AIC_CORE_INCLUDE +#define AIC_LIB_PREFIX ahc +#define AIC_CONST_PREFIX AHC +#include -/***************************** Timer Facilities *******************************/ -timeout_t ahc_timeout; - -#if __FreeBSD_version >= 500000 -#define ahc_timer_init(timer) callout_init(timer, /*mpsafe*/0) -#else -#define ahc_timer_init callout_init -#endif -#define ahc_timer_stop callout_stop - -static __inline void -ahc_timer_reset(ahc_timer_t *timer, u_int usec, ahc_callback_t *func, void *arg) -{ - callout_reset(timer, (usec * hz)/1000000, func, arg); -} - -static __inline void -ahc_scb_timer_reset(struct scb *scb, u_int usec) -{ - untimeout(ahc_timeout, (caddr_t)scb, scb->io_ctx->ccb_h.timeout_ch); - scb->io_ctx->ccb_h.timeout_ch = - timeout(ahc_timeout, scb, (usec * hz)/1000000); -} - /*************************** Device Access ************************************/ #define ahc_inb(ahc, port) \ bus_space_read_1((ahc)->tag, (ahc)->bsh, port) #define ahc_outb(ahc, port, value) \ bus_space_write_1((ahc)->tag, (ahc)->bsh, port, value) #define ahc_outsb(ahc, port, valp, count) \ bus_space_write_multi_1((ahc)->tag, (ahc)->bsh, port, valp, count) #define ahc_insb(ahc, port, valp, count) \ bus_space_read_multi_1((ahc)->tag, (ahc)->bsh, port, valp, count) static __inline void ahc_flush_device_writes(struct ahc_softc *); static __inline void ahc_flush_device_writes(struct ahc_softc *ahc) { /* XXX Is this sufficient for all architectures??? */ ahc_inb(ahc, INTSTAT); } /**************************** Locking Primitives ******************************/ /* Lock protecting internal data structures */ static __inline void ahc_lockinit(struct ahc_softc *); static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags); /* Lock held during command compeletion to the upper layer */ static __inline void ahc_done_lockinit(struct ahc_softc *); static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags); /* Lock held during ahc_list manipulation and ahc softc frees */ static __inline void ahc_list_lockinit(void); static __inline void ahc_list_lock(unsigned long *flags); static __inline void ahc_list_unlock(unsigned long *flags); static __inline void ahc_lockinit(struct ahc_softc *ahc) { } static __inline void ahc_lock(struct ahc_softc *ahc, unsigned long *flags) { *flags = splcam(); } static __inline void ahc_unlock(struct ahc_softc *ahc, unsigned long *flags) { splx(*flags); } /* Lock held during command compeletion to the upper layer */ static __inline void ahc_done_lockinit(struct ahc_softc *ahc) { } static __inline void ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags) { } static __inline void ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags) { } /* Lock held during ahc_list manipulation and ahc softc frees */ static __inline void ahc_list_lockinit(void) { } static __inline void ahc_list_lock(unsigned long *flags) { } static __inline void ahc_list_unlock(unsigned long *flags) { } -/****************************** OS Primitives *********************************/ -#define ahc_delay DELAY -/************************** Transaction Operations ****************************/ -static __inline void ahc_set_transaction_status(struct scb *, uint32_t); -static __inline void ahc_set_scsi_status(struct scb *, uint32_t); -static __inline uint32_t ahc_get_transaction_status(struct scb *); -static __inline uint32_t ahc_get_scsi_status(struct scb *); -static __inline void ahc_set_transaction_tag(struct scb *, int, u_int); -static __inline u_long ahc_get_transfer_length(struct scb *); -static __inline int ahc_get_transfer_dir(struct scb *); -static __inline void ahc_set_residual(struct scb *, u_long); -static __inline void ahc_set_sense_residual(struct scb *, u_long); -static __inline u_long ahc_get_residual(struct scb *); -static __inline int ahc_perform_autosense(struct scb *); -static __inline uint32_t ahc_get_sense_bufsize(struct ahc_softc*, struct scb*); -static __inline void ahc_freeze_ccb(union ccb *ccb); -static __inline void ahc_freeze_scb(struct scb *scb); -static __inline void ahc_platform_freeze_devq(struct ahc_softc *, struct scb *); -static __inline int ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, - char channel, int lun, u_int tag, - role_t role, uint32_t status); - -static __inline -void ahc_set_transaction_status(struct scb *scb, uint32_t status) -{ - scb->io_ctx->ccb_h.status &= ~CAM_STATUS_MASK; - scb->io_ctx->ccb_h.status |= status; -} - -static __inline -void ahc_set_scsi_status(struct scb *scb, uint32_t status) -{ - scb->io_ctx->csio.scsi_status = status; -} - -static __inline -uint32_t ahc_get_transaction_status(struct scb *scb) -{ - return (scb->io_ctx->ccb_h.status & CAM_STATUS_MASK); -} - -static __inline -uint32_t ahc_get_scsi_status(struct scb *scb) -{ - return (scb->io_ctx->csio.scsi_status); -} - -static __inline -void ahc_set_transaction_tag(struct scb *scb, int enabled, u_int type) -{ - scb->io_ctx->csio.tag_action = type; - if (enabled) - scb->io_ctx->ccb_h.flags |= CAM_TAG_ACTION_VALID; - else - scb->io_ctx->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; -} - -static __inline -u_long ahc_get_transfer_length(struct scb *scb) -{ - return (scb->io_ctx->csio.dxfer_len); -} - -static __inline -int ahc_get_transfer_dir(struct scb *scb) -{ - return (scb->io_ctx->ccb_h.flags & CAM_DIR_MASK); -} - -static __inline -void ahc_set_residual(struct scb *scb, u_long resid) -{ - scb->io_ctx->csio.resid = resid; -} - -static __inline -void ahc_set_sense_residual(struct scb *scb, u_long resid) -{ - scb->io_ctx->csio.sense_resid = resid; -} - -static __inline -u_long ahc_get_residual(struct scb *scb) -{ - return (scb->io_ctx->csio.resid); -} - -static __inline -int ahc_perform_autosense(struct scb *scb) -{ - return (!(scb->io_ctx->ccb_h.flags & CAM_DIS_AUTOSENSE)); -} - -static __inline uint32_t -ahc_get_sense_bufsize(struct ahc_softc *ahc, struct scb *scb) -{ - return (sizeof(struct scsi_sense_data)); -} - -static __inline void -ahc_freeze_ccb(union ccb *ccb) -{ - if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { - ccb->ccb_h.status |= CAM_DEV_QFRZN; - xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); - } -} - -static __inline void -ahc_freeze_scb(struct scb *scb) -{ - ahc_freeze_ccb(scb->io_ctx); -} - -static __inline void -ahc_platform_freeze_devq(struct ahc_softc *ahc, struct scb *scb) -{ - /* Nothing to do here for FreeBSD */ -} - -static __inline int -ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, - char channel, int lun, u_int tag, - role_t role, uint32_t status) -{ - /* Nothing to do here for FreeBSD */ - return (0); -} - -static __inline void -ahc_platform_scb_free(struct ahc_softc *ahc, struct scb *scb) -{ - /* What do we do to generically handle driver resource shortages??? */ - if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0 - && scb->io_ctx != NULL - && (scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { - scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; - ahc->flags &= ~AHC_RESOURCE_SHORTAGE; - } - scb->io_ctx = NULL; -} - /********************************** PCI ***************************************/ -#ifdef AHC_PCI_CONFIG -static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci, - int reg, int width); -static __inline void ahc_pci_write_config(ahc_dev_softc_t pci, - int reg, uint32_t value, - int width); -static __inline int ahc_get_pci_function(ahc_dev_softc_t); -static __inline int ahc_get_pci_slot(ahc_dev_softc_t); -static __inline int ahc_get_pci_bus(ahc_dev_softc_t); +#ifdef AIC_PCI_CONFIG +int ahc_pci_map_registers(struct ahc_softc *ahc); +int ahc_pci_map_int(struct ahc_softc *ahc); +#endif /*AIC_PCI_CONFIG*/ -int ahc_pci_map_registers(struct ahc_softc *ahc); -int ahc_pci_map_int(struct ahc_softc *ahc); - -static __inline uint32_t -ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width) -{ - return (pci_read_config(pci, reg, width)); -} - -static __inline void -ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width) -{ - pci_write_config(pci, reg, value, width); -} - -static __inline int -ahc_get_pci_function(ahc_dev_softc_t pci) -{ - return (pci_get_function(pci)); -} - -static __inline int -ahc_get_pci_slot(ahc_dev_softc_t pci) -{ - return (pci_get_slot(pci)); -} - -static __inline int -ahc_get_pci_bus(ahc_dev_softc_t pci) -{ - return (pci_get_bus(pci)); -} - -typedef enum -{ - AHC_POWER_STATE_D0, - AHC_POWER_STATE_D1, - AHC_POWER_STATE_D2, - AHC_POWER_STATE_D3 -} ahc_power_state; - -void ahc_power_state_change(struct ahc_softc *ahc, - ahc_power_state new_state); -#endif /******************************** VL/EISA *************************************/ int aic7770_map_registers(struct ahc_softc *ahc, u_int port); int aic7770_map_int(struct ahc_softc *ahc, int irq); /********************************* Debug **************************************/ static __inline void ahc_print_path(struct ahc_softc *, struct scb *); static __inline void ahc_platform_dump_card_state(struct ahc_softc *ahc); static __inline void ahc_print_path(struct ahc_softc *ahc, struct scb *scb) { xpt_print_path(scb->io_ctx->ccb_h.path); } static __inline void ahc_platform_dump_card_state(struct ahc_softc *ahc) { /* Nothing to do here for FreeBSD */ } /**************************** Transfer Settings *******************************/ void ahc_notify_xfer_settings_change(struct ahc_softc *, struct ahc_devinfo *); void ahc_platform_set_tags(struct ahc_softc *, struct ahc_devinfo *, int /*enable*/); /************************* Initialization/Teardown ****************************/ int ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg); void ahc_platform_free(struct ahc_softc *ahc); int ahc_map_int(struct ahc_softc *ahc); int ahc_attach(struct ahc_softc *); int ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc); int ahc_detach(device_t); /****************************** Interrupts ************************************/ void ahc_platform_intr(void *); static __inline void ahc_platform_flushwork(struct ahc_softc *ahc); static __inline void ahc_platform_flushwork(struct ahc_softc *ahc) { } /************************ Misc Function Declarations **************************/ void ahc_done(struct ahc_softc *ahc, struct scb *scb); void ahc_send_async(struct ahc_softc *, char /*channel*/, u_int /*target*/, u_int /*lun*/, ac_code, void *arg); #endif /* _AIC7XXX_FREEBSD_H_ */ Index: stable/4/sys/dev/aic7xxx/aic7xxx_pci.c =================================================================== --- stable/4/sys/dev/aic7xxx/aic7xxx_pci.c (revision 125848) +++ stable/4/sys/dev/aic7xxx/aic7xxx_pci.c (revision 125849) @@ -1,2484 +1,2502 @@ /* * Product specific probe and attach routines for: * 3940, 2940, aic7895, aic7890, aic7880, * aic7870, aic7860 and aic7850 SCSI controllers * * Copyright (c) 1994-2001 Justin T. Gibbs. * Copyright (c) 2000-2001 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * - * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#69 $ - * - * $FreeBSD$ + * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#78 $ */ #ifdef __linux__ #include "aic7xxx_osm.h" #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" #else +#include +__FBSDID("$FreeBSD$"); #include #include #include #endif -#define AHC_PCI_IOADDR PCIR_MAPS /* I/O Address */ -#define AHC_PCI_MEMADDR (PCIR_MAPS + 4) /* Mem I/O Address */ - static __inline uint64_t ahc_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor) { uint64_t id; id = subvendor | (subdevice << 16) | ((uint64_t)vendor << 32) | ((uint64_t)device << 48); return (id); } #define ID_ALL_MASK 0xFFFFFFFFFFFFFFFFull #define ID_DEV_VENDOR_MASK 0xFFFFFFFF00000000ull #define ID_9005_GENERIC_MASK 0xFFF0FFFF00000000ull #define ID_9005_SISL_MASK 0x000FFFFF00000000ull #define ID_9005_SISL_ID 0x0005900500000000ull #define ID_AIC7850 0x5078900400000000ull #define ID_AHA_2902_04_10_15_20C_30C 0x5078900478509004ull #define ID_AIC7855 0x5578900400000000ull #define ID_AIC7859 0x3860900400000000ull #define ID_AHA_2930CU 0x3860900438699004ull #define ID_AIC7860 0x6078900400000000ull #define ID_AIC7860C 0x6078900478609004ull #define ID_AHA_1480A 0x6075900400000000ull #define ID_AHA_2940AU_0 0x6178900400000000ull #define ID_AHA_2940AU_1 0x6178900478619004ull #define ID_AHA_2940AU_CN 0x2178900478219004ull #define ID_AHA_2930C_VAR 0x6038900438689004ull #define ID_AIC7870 0x7078900400000000ull #define ID_AHA_2940 0x7178900400000000ull #define ID_AHA_3940 0x7278900400000000ull #define ID_AHA_398X 0x7378900400000000ull #define ID_AHA_2944 0x7478900400000000ull #define ID_AHA_3944 0x7578900400000000ull #define ID_AHA_4944 0x7678900400000000ull #define ID_AIC7880 0x8078900400000000ull #define ID_AIC7880_B 0x8078900478809004ull #define ID_AHA_2940U 0x8178900400000000ull #define ID_AHA_3940U 0x8278900400000000ull #define ID_AHA_2944U 0x8478900400000000ull #define ID_AHA_3944U 0x8578900400000000ull #define ID_AHA_398XU 0x8378900400000000ull #define ID_AHA_4944U 0x8678900400000000ull #define ID_AHA_2940UB 0x8178900478819004ull #define ID_AHA_2930U 0x8878900478889004ull #define ID_AHA_2940U_PRO 0x8778900478879004ull #define ID_AHA_2940U_CN 0x0078900478009004ull #define ID_AIC7895 0x7895900478959004ull #define ID_AIC7895_ARO 0x7890900478939004ull #define ID_AIC7895_ARO_MASK 0xFFF0FFFFFFFFFFFFull #define ID_AHA_2940U_DUAL 0x7895900478919004ull #define ID_AHA_3940AU 0x7895900478929004ull #define ID_AHA_3944AU 0x7895900478949004ull #define ID_AIC7890 0x001F9005000F9005ull #define ID_AIC7890_ARO 0x00139005000F9005ull #define ID_AAA_131U2 0x0013900500039005ull #define ID_AHA_2930U2 0x0011900501819005ull #define ID_AHA_2940U2B 0x00109005A1009005ull #define ID_AHA_2940U2_OEM 0x0010900521809005ull #define ID_AHA_2940U2 0x00109005A1809005ull #define ID_AHA_2950U2B 0x00109005E1009005ull #define ID_AIC7892 0x008F9005FFFF9005ull #define ID_AIC7892_ARO 0x00839005FFFF9005ull #define ID_AHA_29160 0x00809005E2A09005ull #define ID_AHA_29160_CPQ 0x00809005E2A00E11ull #define ID_AHA_29160N 0x0080900562A09005ull #define ID_AHA_29160C 0x0080900562209005ull #define ID_AHA_29160B 0x00809005E2209005ull #define ID_AHA_19160B 0x0081900562A19005ull +#define ID_AHA_2915_30LP 0x0082900502109005ull #define ID_AIC7896 0x005F9005FFFF9005ull #define ID_AIC7896_ARO 0x00539005FFFF9005ull #define ID_AHA_3950U2B_0 0x00509005FFFF9005ull #define ID_AHA_3950U2B_1 0x00509005F5009005ull #define ID_AHA_3950U2D_0 0x00519005FFFF9005ull #define ID_AHA_3950U2D_1 0x00519005B5009005ull #define ID_AIC7899 0x00CF9005FFFF9005ull #define ID_AIC7899_ARO 0x00C39005FFFF9005ull #define ID_AHA_3960D 0x00C09005F6209005ull #define ID_AHA_3960D_CPQ 0x00C09005F6200E11ull #define ID_AIC7810 0x1078900400000000ull #define ID_AIC7815 0x7815900400000000ull #define DEVID_9005_TYPE(id) ((id) & 0xF) #define DEVID_9005_TYPE_HBA 0x0 /* Standard Card */ #define DEVID_9005_TYPE_AAA 0x3 /* RAID Card */ #define DEVID_9005_TYPE_SISL 0x5 /* Container ROMB */ #define DEVID_9005_TYPE_MB 0xF /* On Motherboard */ #define DEVID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define DEVID_9005_MAXRATE_U160 0x0 #define DEVID_9005_MAXRATE_ULTRA2 0x1 #define DEVID_9005_MAXRATE_ULTRA 0x2 #define DEVID_9005_MAXRATE_FAST 0x3 #define DEVID_9005_MFUNC(id) (((id) & 0x40) >> 6) #define DEVID_9005_CLASS(id) (((id) & 0xFF00) >> 8) #define DEVID_9005_CLASS_SPI 0x0 /* Parallel SCSI */ #define SUBID_9005_TYPE(id) ((id) & 0xF) #define SUBID_9005_TYPE_MB 0xF /* On Motherboard */ #define SUBID_9005_TYPE_CARD 0x0 /* Standard Card */ #define SUBID_9005_TYPE_LCCARD 0x1 /* Low Cost Card */ #define SUBID_9005_TYPE_RAID 0x3 /* Combined with Raid */ #define SUBID_9005_TYPE_KNOWN(id) \ ((((id) & 0xF) == SUBID_9005_TYPE_MB) \ || (((id) & 0xF) == SUBID_9005_TYPE_CARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_LCCARD) \ || (((id) & 0xF) == SUBID_9005_TYPE_RAID)) #define SUBID_9005_MAXRATE(id) (((id) & 0x30) >> 4) #define SUBID_9005_MAXRATE_ULTRA2 0x0 #define SUBID_9005_MAXRATE_ULTRA 0x1 #define SUBID_9005_MAXRATE_U160 0x2 #define SUBID_9005_MAXRATE_RESERVED 0x3 #define SUBID_9005_SEEPTYPE(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0xC0) >> 6 \ : ((id) & 0x300) >> 8) #define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_1K 0x1 #define SUBID_9005_SEEPTYPE_2K_4K 0x2 #define SUBID_9005_SEEPTYPE_RESERVED 0x3 #define SUBID_9005_AUTOTERM(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? (((id) & 0x400) >> 10) == 0 \ : (((id) & 0x40) >> 6) == 0) #define SUBID_9005_NUMCHAN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x300) >> 8 \ : ((id) & 0xC00) >> 10) #define SUBID_9005_LEGACYCONN(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? 0 \ : ((id) & 0x80) >> 7) #define SUBID_9005_MFUNCENB(id) \ ((SUBID_9005_TYPE(id) == SUBID_9005_TYPE_MB) \ ? ((id) & 0x800) >> 11 \ : ((id) & 0x1000) >> 12) /* * Informational only. Should use chip register to be * certain, but may be use in identification strings. */ #define SUBID_9005_CARD_SCSIWIDTH_MASK 0x2000 #define SUBID_9005_CARD_PCIWIDTH_MASK 0x4000 #define SUBID_9005_CARD_SEDIFF_MASK 0x8000 static ahc_device_setup_t ahc_aic785X_setup; static ahc_device_setup_t ahc_aic7860_setup; static ahc_device_setup_t ahc_apa1480_setup; static ahc_device_setup_t ahc_aic7870_setup; static ahc_device_setup_t ahc_aha394X_setup; static ahc_device_setup_t ahc_aha494X_setup; static ahc_device_setup_t ahc_aha398X_setup; static ahc_device_setup_t ahc_aic7880_setup; static ahc_device_setup_t ahc_aha2940Pro_setup; static ahc_device_setup_t ahc_aha394XU_setup; static ahc_device_setup_t ahc_aha398XU_setup; static ahc_device_setup_t ahc_aic7890_setup; static ahc_device_setup_t ahc_aic7892_setup; static ahc_device_setup_t ahc_aic7895_setup; static ahc_device_setup_t ahc_aic7896_setup; static ahc_device_setup_t ahc_aic7899_setup; static ahc_device_setup_t ahc_aha29160C_setup; static ahc_device_setup_t ahc_raid_setup; static ahc_device_setup_t ahc_aha394XX_setup; static ahc_device_setup_t ahc_aha494XX_setup; static ahc_device_setup_t ahc_aha398XX_setup; struct ahc_pci_identity ahc_pci_ident_table [] = { /* aic7850 based controllers */ { ID_AHA_2902_04_10_15_20C_30C, ID_ALL_MASK, "Adaptec 2902/04/10/15/20C/30C SCSI adapter", ahc_aic785X_setup }, /* aic7860 based controllers */ { ID_AHA_2930CU, ID_ALL_MASK, "Adaptec 2930CU SCSI adapter", ahc_aic7860_setup }, { ID_AHA_1480A & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 1480A Ultra SCSI adapter", ahc_apa1480_setup }, { ID_AHA_2940AU_0 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2940AU_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940A/CN Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AHA_2930C_VAR & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930C Ultra SCSI adapter (VAR)", ahc_aic7860_setup }, /* aic7870 based controllers */ { ID_AHA_2940, ID_ALL_MASK, "Adaptec 2940 SCSI adapter", ahc_aic7870_setup }, { ID_AHA_3940, ID_ALL_MASK, "Adaptec 3940 SCSI adapter", ahc_aha394X_setup }, { ID_AHA_398X, ID_ALL_MASK, "Adaptec 398X SCSI RAID adapter", ahc_aha398X_setup }, { ID_AHA_2944, ID_ALL_MASK, "Adaptec 2944 SCSI adapter", ahc_aic7870_setup }, { ID_AHA_3944, ID_ALL_MASK, "Adaptec 3944 SCSI adapter", ahc_aha394X_setup }, { ID_AHA_4944, ID_ALL_MASK, "Adaptec 4944 SCSI adapter", ahc_aha494X_setup }, /* aic7880 based controllers */ { ID_AHA_2940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_3940U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3940 Ultra SCSI adapter", ahc_aha394XU_setup }, { ID_AHA_2944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2944 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_3944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 3944 Ultra SCSI adapter", ahc_aha394XU_setup }, { ID_AHA_398XU & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 398X Ultra SCSI RAID adapter", ahc_aha398XU_setup }, { /* * XXX Don't know the slot numbers * so we can't identify channels */ ID_AHA_4944U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 4944 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_2930U & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2930 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AHA_2940U_PRO & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940 Pro Ultra SCSI adapter", ahc_aha2940Pro_setup }, { ID_AHA_2940U_CN & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec 2940/CN Ultra SCSI adapter", ahc_aic7880_setup }, /* Ignore all SISL (AAC on MB) based controllers. */ { ID_9005_SISL_ID, ID_9005_SISL_MASK, NULL, NULL }, /* aic7890 based controllers */ { ID_AHA_2930U2, ID_ALL_MASK, "Adaptec 2930 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2B, ID_ALL_MASK, "Adaptec 2940B Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2940U2_OEM, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter (OEM)", ahc_aic7890_setup }, { ID_AHA_2940U2, ID_ALL_MASK, "Adaptec 2940 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AHA_2950U2B, ID_ALL_MASK, "Adaptec 2950 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7890_ARO, ID_ALL_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter (ARO)", ahc_aic7890_setup }, { ID_AAA_131U2, ID_ALL_MASK, "Adaptec AAA-131 Ultra2 RAID adapter", ahc_aic7890_setup }, /* aic7892 based controllers */ { ID_AHA_29160, ID_ALL_MASK, "Adaptec 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 29160 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160N, ID_ALL_MASK, "Adaptec 29160N Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_29160C, ID_ALL_MASK, "Adaptec 29160C Ultra160 SCSI adapter", ahc_aha29160C_setup }, { ID_AHA_29160B, ID_ALL_MASK, "Adaptec 29160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AHA_19160B, ID_ALL_MASK, "Adaptec 19160B Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7892_ARO, ID_ALL_MASK, "Adaptec aic7892 Ultra160 SCSI adapter (ARO)", ahc_aic7892_setup }, + { + ID_AHA_2915_30LP, + ID_ALL_MASK, + "Adaptec 2915/30LP Ultra160 SCSI adapter", + ahc_aic7892_setup + }, /* aic7895 based controllers */ { ID_AHA_2940U_DUAL, ID_ALL_MASK, "Adaptec 2940/DUAL Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3940AU, ID_ALL_MASK, "Adaptec 3940A Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AHA_3944AU, ID_ALL_MASK, "Adaptec 3944A Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AIC7895_ARO, ID_AIC7895_ARO_MASK, "Adaptec aic7895 Ultra SCSI adapter (ARO)", ahc_aic7895_setup }, /* aic7896/97 based controllers */ { ID_AHA_3950U2B_0, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2B_1, ID_ALL_MASK, "Adaptec 3950B Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_0, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AHA_3950U2D_1, ID_ALL_MASK, "Adaptec 3950D Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7896_ARO, ID_ALL_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter (ARO)", ahc_aic7896_setup }, /* aic7899 based controllers */ { ID_AHA_3960D, ID_ALL_MASK, "Adaptec 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AHA_3960D_CPQ, ID_ALL_MASK, "Adaptec (Compaq OEM) 3960D Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7899_ARO, ID_ALL_MASK, "Adaptec aic7899 Ultra160 SCSI adapter (ARO)", ahc_aic7899_setup }, /* Generic chip probes for devices we don't know 'exactly' */ { ID_AIC7850 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7850 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7855 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7855 SCSI adapter", ahc_aic785X_setup }, { ID_AIC7859 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7859 SCSI adapter", ahc_aic7860_setup }, { ID_AIC7860 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7860 Ultra SCSI adapter", ahc_aic7860_setup }, { ID_AIC7870 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7870 SCSI adapter", ahc_aic7870_setup }, { ID_AIC7880 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7880 Ultra SCSI adapter", ahc_aic7880_setup }, { ID_AIC7890 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7890/91 Ultra2 SCSI adapter", ahc_aic7890_setup }, { ID_AIC7892 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7892 Ultra160 SCSI adapter", ahc_aic7892_setup }, { ID_AIC7895 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7895 Ultra SCSI adapter", ahc_aic7895_setup }, { ID_AIC7896 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7896/97 Ultra2 SCSI adapter", ahc_aic7896_setup }, { ID_AIC7899 & ID_9005_GENERIC_MASK, ID_9005_GENERIC_MASK, "Adaptec aic7899 Ultra160 SCSI adapter", ahc_aic7899_setup }, { ID_AIC7810 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7810 RAID memory controller", ahc_raid_setup }, { ID_AIC7815 & ID_DEV_VENDOR_MASK, ID_DEV_VENDOR_MASK, "Adaptec aic7815 RAID memory controller", ahc_raid_setup } }; const u_int ahc_num_pci_devs = NUM_ELEMENTS(ahc_pci_ident_table); #define AHC_394X_SLOT_CHANNEL_A 4 #define AHC_394X_SLOT_CHANNEL_B 5 #define AHC_398X_SLOT_CHANNEL_A 4 #define AHC_398X_SLOT_CHANNEL_B 8 #define AHC_398X_SLOT_CHANNEL_C 12 #define AHC_494X_SLOT_CHANNEL_A 4 #define AHC_494X_SLOT_CHANNEL_B 5 #define AHC_494X_SLOT_CHANNEL_C 6 #define AHC_494X_SLOT_CHANNEL_D 7 #define DEVCONFIG 0x40 #define PCIERRGENDIS 0x80000000ul #define SCBSIZE32 0x00010000ul /* aic789X only */ #define REXTVALID 0x00001000ul /* ultra cards only */ #define MPORTMODE 0x00000400ul /* aic7870+ only */ #define RAMPSM 0x00000200ul /* aic7870+ only */ #define VOLSENSE 0x00000100ul #define PCI64BIT 0x00000080ul /* 64Bit PCI bus (Ultra2 Only)*/ #define SCBRAMSEL 0x00000080ul #define MRDCEN 0x00000040ul #define EXTSCBTIME 0x00000020ul /* aic7870 only */ #define EXTSCBPEN 0x00000010ul /* aic7870 only */ #define BERREN 0x00000008ul #define DACEN 0x00000004ul #define STPWLEVEL 0x00000002ul #define DIFACTNEGEN 0x00000001ul /* aic7870 only */ #define CSIZE_LATTIME 0x0c #define CACHESIZE 0x0000003ful /* only 5 bits */ #define LATTIME 0x0000ff00ul /* PCI STATUS definitions */ #define DPE 0x80 #define SSE 0x40 #define RMA 0x20 #define RTA 0x10 #define STA 0x08 #define DPR 0x01 static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device, uint16_t subvendor, uint16_t subdevice); static int ahc_ext_scbram_present(struct ahc_softc *ahc); static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large); static void ahc_probe_ext_scbram(struct ahc_softc *ahc); static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1); static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc); static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1); static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present); static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present); static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present); static void write_brdctl(struct ahc_softc *ahc, uint8_t value); static uint8_t read_brdctl(struct ahc_softc *ahc); static void ahc_pci_intr(struct ahc_softc *ahc); static int ahc_pci_chip_init(struct ahc_softc *ahc); static int ahc_pci_suspend(struct ahc_softc *ahc); static int ahc_pci_resume(struct ahc_softc *ahc); static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, uint16_t subdevice, uint16_t subvendor) { int result; /* Default to invalid. */ result = 0; if (vendor == 0x9005 && subvendor == 0x9005 && subdevice != device && SUBID_9005_TYPE_KNOWN(subdevice) != 0) { switch (SUBID_9005_TYPE(subdevice)) { case SUBID_9005_TYPE_MB: break; case SUBID_9005_TYPE_CARD: case SUBID_9005_TYPE_LCCARD: /* * Currently only trust Adaptec cards to * get the sub device info correct. */ if (DEVID_9005_TYPE(device) == DEVID_9005_TYPE_HBA) result = 1; break; case SUBID_9005_TYPE_RAID: break; default: break; } } return (result); } struct ahc_pci_identity * -ahc_find_pci_device(ahc_dev_softc_t pci) +ahc_find_pci_device(aic_dev_softc_t pci) { uint64_t full_id; uint16_t device; uint16_t vendor; uint16_t subdevice; uint16_t subvendor; struct ahc_pci_identity *entry; u_int i; - vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); - device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); - subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); - subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); + vendor = aic_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); + device = aic_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); + subvendor = aic_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); + subdevice = aic_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); full_id = ahc_compose_id(device, vendor, subdevice, subvendor); /* * If the second function is not hooked up, ignore it. * Unfortunately, not all MB vendors implement the * subdevice ID as per the Adaptec spec, so do our best * to sanity check it prior to accepting the subdevice * ID as valid. */ - if (ahc_get_pci_function(pci) > 0 + if (aic_get_pci_function(pci) > 0 && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) && SUBID_9005_MFUNCENB(subdevice) == 0) return (NULL); for (i = 0; i < ahc_num_pci_devs; i++) { entry = &ahc_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) { /* Honor exclusion entries. */ if (entry->name == NULL) return (NULL); return (entry); } } return (NULL); } int ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) { u_long l; u_int command; u_int our_id; u_int sxfrctl1; u_int scsiseq; u_int dscommand0; uint32_t devconfig; int error; uint8_t sblkctl; our_id = 0; error = entry->setup(ahc); if (error != 0) return (error); ahc->chip |= AHC_PCI; ahc->description = entry->name; - ahc_power_state_change(ahc, AHC_POWER_STATE_D0); + aic_power_state_change(ahc, AIC_POWER_STATE_D0); error = ahc_pci_map_registers(ahc); if (error != 0) return (error); /* * Before we continue probing the card, ensure that * its interrupts are *disabled*. We don't want * a misstep to hang the machine in an interrupt * storm. */ ahc_intr_enable(ahc, FALSE); - devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); /* * If we need to support high memory, enable dual * address cycles. This bit must be set to enable * high address bit generation even if we are on a * 64bit bus (PCI64BIT set in devconfig). */ if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) { if (bootverbose) printf("%s: Enabling 39Bit Addressing\n", ahc_name(ahc)); devconfig |= DACEN; } /* Ensure that pci error generation, a test feature, is disabled. */ devconfig |= PCIERRGENDIS; - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); /* Ensure busmastering is enabled */ - command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); + command = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); command |= PCIM_CMD_BUSMASTEREN; - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2); + aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, /*bytes*/2); /* On all PCI adapters, we allow SCB paging */ ahc->flags |= AHC_PAGESCBS; error = ahc_softc_init(ahc); if (error != 0) return (error); /* * Disable PCI parity error checking. Users typically * do this to work around broken PCI chipsets that get * the parity timing wrong and thus generate lots of spurious * errors. The chip only allows us to disable *all* parity * error reporting when doing this, so CIO bus, scb ram, and * scratch ram parity errors will be ignored too. */ if ((ahc->flags & AHC_DISABLE_PCI_PERR) != 0) ahc->seqctl |= FAILDIS; ahc->bus_intr = ahc_pci_intr; ahc->bus_chip_init = ahc_pci_chip_init; ahc->bus_suspend = ahc_pci_suspend; ahc->bus_resume = ahc_pci_resume; /* Remeber how the card was setup in case there is no SEEPROM */ if ((ahc_inb(ahc, HCNTRL) & POWRDN) == 0) { ahc_pause(ahc); if ((ahc->features & AHC_ULTRA2) != 0) our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; else our_id = ahc_inb(ahc, SCSIID) & OID; sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN; scsiseq = ahc_inb(ahc, SCSISEQ); } else { sxfrctl1 = STPWEN; our_id = 7; scsiseq = 0; } error = ahc_reset(ahc, /*reinit*/FALSE); if (error != 0) return (ENXIO); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; /* Perform ALT-Mode Setup */ sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, OPTIONMODE_DEFAULTS|AUTOACKEN|BUSFREEREV|EXPPHASEDIS); ahc_outb(ahc, SFUNCT, sfunct); /* Normal mode setup */ ahc_outb(ahc, CRCCONTROL1, CRCVALCHKEN|CRCENDCHKEN|CRCREQCHKEN |TARGCRCENDEN); } dscommand0 = ahc_inb(ahc, DSCOMMAND0); dscommand0 |= MPARCKEN|CACHETHEN; if ((ahc->features & AHC_ULTRA2) != 0) { /* * DPARCKEN doesn't work correctly on * some MBs so don't use it. */ dscommand0 &= ~DPARCKEN; } /* * Handle chips that must have cache line * streaming (dis/en)abled. */ if ((ahc->bugs & AHC_CACHETHEN_DIS_BUG) != 0) dscommand0 |= CACHETHEN; if ((ahc->bugs & AHC_CACHETHEN_BUG) != 0) dscommand0 &= ~CACHETHEN; ahc_outb(ahc, DSCOMMAND0, dscommand0); ahc->pci_cachesize = - ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, + aic_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1) & CACHESIZE; ahc->pci_cachesize *= 4; if ((ahc->bugs & AHC_PCI_2_1_RETRY_BUG) != 0 && ahc->pci_cachesize == 4) { - ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, + aic_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, 0, /*bytes*/1); ahc->pci_cachesize = 0; } /* * We cannot perform ULTRA speeds without the presense * of the external precision resistor. */ if ((ahc->features & AHC_ULTRA) != 0) { uint32_t devconfig; - devconfig = ahc_pci_read_config(ahc->dev_softc, + devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((devconfig & REXTVALID) == 0) ahc->features &= ~AHC_ULTRA; } /* See if we have a SEEPROM and perform auto-term */ check_extport(ahc, &sxfrctl1); /* * Take the LED out of diagnostic mode */ sblkctl = ahc_inb(ahc, SBLKCTL); ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON))); if ((ahc->features & AHC_ULTRA2) != 0) { ahc_outb(ahc, DFF_THRSH, RD_DFTHRSH_MAX|WR_DFTHRSH_MAX); } else { ahc_outb(ahc, DSPCISTATUS, DFTHRSH_100); } if (ahc->flags & AHC_USEDEFAULTS) { /* * PCI Adapter default setup * Should only be used if the adapter does not have * a SEEPROM. */ /* See if someone else set us up already */ if ((ahc->flags & AHC_NO_BIOS_INIT) == 0 && scsiseq != 0) { printf("%s: Using left over BIOS settings\n", ahc_name(ahc)); ahc->flags &= ~AHC_USEDEFAULTS; ahc->flags |= AHC_BIOS_ENABLED; } else { /* * Assume only one connector and always turn * on termination. */ our_id = 0x07; sxfrctl1 = STPWEN; } ahc_outb(ahc, SCSICONF, our_id|ENSPCHK|RESET_SCSI); ahc->our_id = our_id; } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ ahc_probe_ext_scbram(ahc); /* * Record our termination setting for the * generic initialization routine. */ if ((sxfrctl1 & STPWEN) != 0) ahc->flags |= AHC_TERM_ENB_A; /* * Save chip register configuration data for chip resets * that occur during runtime and resume events. */ ahc->bus_softc.pci_softc.devconfig = - ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); ahc->bus_softc.pci_softc.command = - ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); + aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1); ahc->bus_softc.pci_softc.csize_lattime = - ahc_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1); + aic_pci_read_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1); ahc->bus_softc.pci_softc.dscommand0 = ahc_inb(ahc, DSCOMMAND0); ahc->bus_softc.pci_softc.dspcistatus = ahc_inb(ahc, DSPCISTATUS); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc->bus_softc.pci_softc.optionmode = ahc_inb(ahc, OPTIONMODE); ahc->bus_softc.pci_softc.targcrccnt = ahc_inw(ahc, TARGCRCCNT); ahc_outb(ahc, SFUNCT, sfunct); ahc->bus_softc.pci_softc.crccontrol1 = ahc_inb(ahc, CRCCONTROL1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc->bus_softc.pci_softc.scbbaddr = ahc_inb(ahc, SCBBADDR); if ((ahc->features & AHC_ULTRA2) != 0) ahc->bus_softc.pci_softc.dff_thrsh = ahc_inb(ahc, DFF_THRSH); /* Core initialization */ error = ahc_init(ahc); if (error != 0) return (error); /* * Allow interrupts now that we are completely setup. */ error = ahc_pci_map_int(ahc); if (error != 0) return (error); ahc_list_lock(&l); /* * Link this softc in with all other ahc instances. */ ahc_softc_insert(ahc); ahc_list_unlock(&l); return (0); } /* * Test for the presense of external sram in an * "unshared" configuration. */ static int ahc_ext_scbram_present(struct ahc_softc *ahc) { u_int chip; int ramps; int single_user; uint32_t devconfig; chip = ahc->chip & AHC_CHIPID_MASK; - devconfig = ahc_pci_read_config(ahc->dev_softc, + devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); single_user = (devconfig & MPORTMODE) != 0; if ((ahc->features & AHC_ULTRA2) != 0) ramps = (ahc_inb(ahc, DSCOMMAND0) & RAMPS) != 0; else if (chip == AHC_AIC7895 || chip == AHC_AIC7895C) /* * External SCBRAM arbitration is flakey * on these chips. Unfortunately this means * we don't use the extra SCB ram space on the * 3940AUW. */ ramps = 0; else if (chip >= AHC_AIC7870) ramps = (devconfig & RAMPSM) != 0; else ramps = 0; if (ramps && single_user) return (1); return (0); } /* * Enable external scbram. */ static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large) { uint32_t devconfig; if (ahc->features & AHC_MULTI_FUNC) { /* * Set the SCB Base addr (highest address bit) * depending on which channel we are. */ - ahc_outb(ahc, SCBBADDR, ahc_get_pci_function(ahc->dev_softc)); + ahc_outb(ahc, SCBBADDR, aic_get_pci_function(ahc->dev_softc)); } ahc->flags &= ~AHC_LSCBS_ENABLED; if (large) ahc->flags |= AHC_LSCBS_ENABLED; - devconfig = ahc_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); + devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); if ((ahc->features & AHC_ULTRA2) != 0) { u_int dscommand0; dscommand0 = ahc_inb(ahc, DSCOMMAND0); if (enable) dscommand0 &= ~INTSCBRAMSEL; else dscommand0 |= INTSCBRAMSEL; if (large) dscommand0 &= ~USCBSIZE32; else dscommand0 |= USCBSIZE32; ahc_outb(ahc, DSCOMMAND0, dscommand0); } else { if (fast) devconfig &= ~EXTSCBTIME; else devconfig |= EXTSCBTIME; if (enable) devconfig &= ~SCBRAMSEL; else devconfig |= SCBRAMSEL; if (large) devconfig &= ~SCBSIZE32; else devconfig |= SCBSIZE32; } if (pcheck) devconfig |= EXTSCBPEN; else devconfig &= ~EXTSCBPEN; - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); + aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* * Take a look to see if we have external SRAM. * We currently do not attempt to use SRAM that is * shared among multiple controllers. */ static void ahc_probe_ext_scbram(struct ahc_softc *ahc) { int num_scbs; int test_num_scbs; int enable; int pcheck; int fast; int large; enable = FALSE; pcheck = FALSE; fast = FALSE; large = FALSE; num_scbs = 0; if (ahc_ext_scbram_present(ahc) == 0) goto done; /* * Probe for the best parameters to use. */ ahc_scbram_config(ahc, /*enable*/TRUE, pcheck, fast, large); num_scbs = ahc_probe_scbs(ahc); if (num_scbs == 0) { /* The SRAM wasn't really present. */ goto done; } enable = TRUE; /* * Clear any outstanding parity error * and ensure that parity error reporting * is enabled. */ ahc_outb(ahc, SEQCTL, 0); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do parity */ ahc_scbram_config(ahc, enable, /*pcheck*/TRUE, fast, large); num_scbs = ahc_probe_scbs(ahc); if ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0) pcheck = TRUE; /* Clear any resulting parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); /* Now see if we can do fast timing */ ahc_scbram_config(ahc, enable, pcheck, /*fast*/TRUE, large); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs == num_scbs && ((ahc_inb(ahc, INTSTAT) & BRKADRINT) == 0 || (ahc_inb(ahc, ERROR) & MPARERR) == 0)) fast = TRUE; /* * See if we can use large SCBs and still maintain * the same overall count of SCBs. */ if ((ahc->features & AHC_LARGE_SCBS) != 0) { ahc_scbram_config(ahc, enable, pcheck, fast, /*large*/TRUE); test_num_scbs = ahc_probe_scbs(ahc); if (test_num_scbs >= num_scbs) { large = TRUE; num_scbs = test_num_scbs; if (num_scbs >= 64) { /* * We have enough space to move the * "busy targets table" into SCB space * and make it qualify all the way to the * lun level. */ ahc->flags |= AHC_SCB_BTT; } } } done: /* * Disable parity error reporting until we * can load instruction ram. */ ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); /* Clear any latched parity error */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); if (bootverbose && enable) { printf("%s: External SRAM, %s access%s, %dbytes/SCB\n", ahc_name(ahc), fast ? "fast" : "slow", pcheck ? ", parity checking enabled" : "", large ? 64 : 32); } ahc_scbram_config(ahc, enable, pcheck, fast, large); } /* * Perform some simple tests that should catch situations where * our registers are invalidly mapped. */ int ahc_pci_test_register_access(struct ahc_softc *ahc) { int error; u_int status1; uint32_t cmd; uint8_t hcntrl; error = EIO; /* * Enable PCI error interrupt status, but suppress NMIs * generated by SERR raised due to target aborts. */ - cmd = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, + cmd = aic_pci_read_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/2); + aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd & ~PCIM_CMD_SERRESPEN, /*bytes*/2); /* * First a simple test to see if any * registers can be read. Reading * HCNTRL has no side effects and has * at least one bit that is guaranteed to * be zero so it is a good register to * use for this test. */ hcntrl = ahc_inb(ahc, HCNTRL); + if (hcntrl == 0xFF) goto fail; + if ((hcntrl & CHIPRST) != 0) { + /* + * The chip has not been initialized since + * PCI/EISA/VLB bus reset. Don't trust + * "left over BIOS data". + */ + ahc->flags |= AHC_NO_BIOS_INIT; + } + /* * Next create a situation where write combining * or read prefetching could be initiated by the * CPU or host bridge. Our device does not support * either, so look for data corruption and/or flagged * PCI errors. First pause without causing another * chip reset. */ hcntrl &= ~CHIPRST; ahc_outb(ahc, HCNTRL, hcntrl|PAUSE); while (ahc_is_paused(ahc) == 0) ; /* Clear any PCI errors that occurred before our driver attached. */ - status1 = ahc_pci_read_config(ahc->dev_softc, + status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); - ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS); ahc_outb(ahc, SCBPTR, 0); ahc_outl(ahc, SCB_BASE, 0x5aa555aa); if (ahc_inl(ahc, SCB_BASE) != 0x5aa555aa) goto fail; - status1 = ahc_pci_read_config(ahc->dev_softc, + status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); if ((status1 & STA) != 0) goto fail; error = 0; fail: /* Silently clear any latched errors. */ - status1 = ahc_pci_read_config(ahc->dev_softc, + status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); - ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); + aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); return (error); } /* * Check the external port logic for a serial eeprom * and termination/cable detection contrls. */ static void check_extport(struct ahc_softc *ahc, u_int *sxfrctl1) { struct seeprom_descriptor sd; struct seeprom_config *sc; int have_seeprom; int have_autoterm; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; sc = ahc->seep_config; /* * For some multi-channel devices, the c46 is simply too * small to work. For the other controller types, we can * get our information from either SEEPROM type. Set the * type to start our probe with accordingly. */ if (ahc->flags & AHC_LARGE_SEEPROM) sd.sd_chip = C56_66; else sd.sd_chip = C46; sd.sd_MS = SEEMS; sd.sd_RDY = SEERDY; sd.sd_CS = SEECS; sd.sd_CK = SEECK; sd.sd_DO = SEEDO; sd.sd_DI = SEEDI; have_seeprom = ahc_acquire_seeprom(ahc, &sd); if (have_seeprom) { if (bootverbose) printf("%s: Reading SEEPROM...", ahc_name(ahc)); for (;;) { u_int start_addr; start_addr = 32 * (ahc->channel - 'A'); have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc, start_addr, sizeof(*sc)/2); if (have_seeprom) have_seeprom = ahc_verify_cksum(sc); if (have_seeprom != 0 || sd.sd_chip == C56_66) { if (bootverbose) { if (have_seeprom == 0) printf ("checksum error\n"); else printf ("done.\n"); } break; } sd.sd_chip = C56_66; } ahc_release_seeprom(&sd); + + /* Remember the SEEPROM type for later */ + if (sd.sd_chip == C56_66) + ahc->flags |= AHC_LARGE_SEEPROM; } if (!have_seeprom) { /* * Pull scratch ram settings and treat them as * if they are the contents of an seeprom if * the 'ADPT' signature is found in SCB2. * We manually compose the data as 16bit values * to avoid endian issues. */ ahc_outb(ahc, SCBPTR, 2); if (ahc_inb(ahc, SCB_BASE) == 'A' && ahc_inb(ahc, SCB_BASE + 1) == 'D' && ahc_inb(ahc, SCB_BASE + 2) == 'P' && ahc_inb(ahc, SCB_BASE + 3) == 'T') { uint16_t *sc_data; int i; sc_data = (uint16_t *)sc; for (i = 0; i < 32; i++, sc_data++) { int j; j = i * 2; *sc_data = ahc_inb(ahc, SRAM_BASE + j) | ahc_inb(ahc, SRAM_BASE + j + 1) << 8; } have_seeprom = ahc_verify_cksum(sc); if (have_seeprom) ahc->flags |= AHC_SCB_CONFIG_USED; } /* * Clear any SCB parity errors in case this data and * its associated parity was not initialized by the BIOS */ ahc_outb(ahc, CLRINT, CLRPARERR); ahc_outb(ahc, CLRINT, CLRBRKADRINT); } if (!have_seeprom) { if (bootverbose) printf("%s: No SEEPROM available.\n", ahc_name(ahc)); ahc->flags |= AHC_USEDEFAULTS; free(ahc->seep_config, M_DEVBUF); ahc->seep_config = NULL; sc = NULL; } else { ahc_parse_pci_eeprom(ahc, sc); } /* * Cards that have the external logic necessary to talk to * a SEEPROM, are almost certain to have the remaining logic * necessary for auto-termination control. This assumption * hasn't failed yet... */ have_autoterm = have_seeprom; /* * Some low-cost chips have SEEPROM and auto-term control built * in, instead of using a GAL. They can tell us directly * if the termination logic is enabled. */ if ((ahc->features & AHC_SPIOCAP) != 0) { if ((ahc_inb(ahc, SPIOCAP) & SSPIOCPS) == 0) have_autoterm = FALSE; } if (have_autoterm) { ahc->flags |= AHC_HAS_TERM_LOGIC; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, sc->adapter_control, sxfrctl1); ahc_release_seeprom(&sd); } else if (have_seeprom) { *sxfrctl1 &= ~STPWEN; if ((sc->adapter_control & CFSTERM) != 0) *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: Low byte termination %sabled\n", ahc_name(ahc), (*sxfrctl1 & STPWEN) ? "en" : "dis"); } } static void ahc_parse_pci_eeprom(struct ahc_softc *ahc, struct seeprom_config *sc) { /* * Put the data we've collected down into SRAM * where ahc_init will find it. */ int i; int max_targ = sc->max_targets & CFMAXTARG; u_int scsi_conf; uint16_t discenable; uint16_t ultraenb; discenable = 0; ultraenb = 0; if ((sc->adapter_control & CFULTRAEN) != 0) { /* * Determine if this adapter has a "newstyle" * SEEPROM format. */ for (i = 0; i < max_targ; i++) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) { ahc->flags |= AHC_NEWEEPROM_FMT; break; } } } for (i = 0; i < max_targ; i++) { u_int scsirate; uint16_t target_mask; target_mask = 0x01 << i; if (sc->device_flags[i] & CFDISC) discenable |= target_mask; if ((ahc->flags & AHC_NEWEEPROM_FMT) != 0) { if ((sc->device_flags[i] & CFSYNCHISULTRA) != 0) ultraenb |= target_mask; } else if ((sc->adapter_control & CFULTRAEN) != 0) { ultraenb |= target_mask; } if ((sc->device_flags[i] & CFXFER) == 0x04 && (ultraenb & target_mask) != 0) { /* Treat 10MHz as a non-ultra speed */ sc->device_flags[i] &= ~CFXFER; ultraenb &= ~target_mask; } if ((ahc->features & AHC_ULTRA2) != 0) { u_int offset; if (sc->device_flags[i] & CFSYNCH) offset = MAX_OFFSET_ULTRA2; else offset = 0; ahc_outb(ahc, TARG_OFFSET + i, offset); /* * The ultra enable bits contain the * high bit of the ultra2 sync rate * field. */ scsirate = (sc->device_flags[i] & CFXFER) | ((ultraenb & target_mask) ? 0x8 : 0x0); if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } else { scsirate = (sc->device_flags[i] & CFXFER) << 4; if (sc->device_flags[i] & CFSYNCH) scsirate |= SOFS; if (sc->device_flags[i] & CFWIDEB) scsirate |= WIDEXFER; } ahc_outb(ahc, TARG_SCSIRATE + i, scsirate); } ahc->our_id = sc->brtime_id & CFSCSIID; scsi_conf = (ahc->our_id & 0x7); if (sc->adapter_control & CFSPARITY) scsi_conf |= ENSPCHK; if (sc->adapter_control & CFRESETB) scsi_conf |= RESET_SCSI; ahc->flags |= (sc->adapter_control & CFBOOTCHAN) >> CFBOOTCHANSHIFT; if (sc->bios_control & CFEXTEND) ahc->flags |= AHC_EXTENDED_TRANS_A; if (sc->bios_control & CFBIOSEN) ahc->flags |= AHC_BIOS_ENABLED; if (ahc->features & AHC_ULTRA && (ahc->flags & AHC_NEWEEPROM_FMT) == 0) { /* Should we enable Ultra mode? */ if (!(sc->adapter_control & CFULTRAEN)) /* Treat us as a non-ultra card */ ultraenb = 0; } if (sc->signature == CFSIGNATURE || sc->signature == CFSIGNATURE2) { uint32_t devconfig; /* Honor the STPWLEVEL settings */ - devconfig = ahc_pci_read_config(ahc->dev_softc, + devconfig = aic_pci_read_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4); devconfig &= ~STPWLEVEL; if ((sc->bios_control & CFSTPWLEVEL) != 0) devconfig |= STPWLEVEL; - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, + aic_pci_write_config(ahc->dev_softc, DEVCONFIG, devconfig, /*bytes*/4); } /* Set SCSICONF info */ ahc_outb(ahc, SCSICONF, scsi_conf); ahc_outb(ahc, DISC_DSB, ~(discenable & 0xff)); ahc_outb(ahc, DISC_DSB + 1, ~((discenable >> 8) & 0xff)); ahc_outb(ahc, ULTRA_ENB, ultraenb & 0xff); ahc_outb(ahc, ULTRA_ENB + 1, (ultraenb >> 8) & 0xff); } static void configure_termination(struct ahc_softc *ahc, struct seeprom_descriptor *sd, u_int adapter_control, u_int *sxfrctl1) { uint8_t brddat; brddat = 0; /* * Update the settings in sxfrctl1 to match the * termination settings */ *sxfrctl1 = 0; /* * SEECS must be on for the GALS to latch * the data properly. Be sure to leave MS * on or we will release the seeprom. */ SEEPROM_OUTB(sd, sd->sd_MS | sd->sd_CS); if ((adapter_control & CFAUTOTERM) != 0 || (ahc->features & AHC_NEW_TERMCTL) != 0) { int internal50_present; int internal68_present; int externalcable_present; int eeprom_present; int enableSEC_low; int enableSEC_high; int enablePRI_low; int enablePRI_high; int sum; enableSEC_low = 0; enableSEC_high = 0; enablePRI_low = 0; enablePRI_high = 0; if ((ahc->features & AHC_NEW_TERMCTL) != 0) { ahc_new_term_detect(ahc, &enableSEC_low, &enableSEC_high, &enablePRI_low, &enablePRI_high, &eeprom_present); if ((adapter_control & CFSEAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual SE Termination\n", ahc_name(ahc)); enableSEC_low = (adapter_control & CFSELOWTERM); enableSEC_high = (adapter_control & CFSEHIGHTERM); } if ((adapter_control & CFAUTOTERM) == 0) { if (bootverbose) printf("%s: Manual LVD Termination\n", ahc_name(ahc)); enablePRI_low = (adapter_control & CFSTERM); enablePRI_high = (adapter_control & CFWSTERM); } /* Make the table calculations below happy */ internal50_present = 0; internal68_present = 1; externalcable_present = 1; } else if ((ahc->features & AHC_SPIOCAP) != 0) { aic785X_cable_detect(ahc, &internal50_present, &externalcable_present, &eeprom_present); /* Can never support a wide connector. */ internal68_present = 0; } else { aic787X_cable_detect(ahc, &internal50_present, &internal68_present, &externalcable_present, &eeprom_present); } if ((ahc->features & AHC_WIDE) == 0) internal68_present = 0; if (bootverbose && (ahc->features & AHC_ULTRA2) == 0) { printf("%s: internal 50 cable %s present", ahc_name(ahc), internal50_present ? "is":"not"); if ((ahc->features & AHC_WIDE) != 0) printf(", internal 68 cable %s present", internal68_present ? "is":"not"); printf("\n%s: external cable %s present\n", ahc_name(ahc), externalcable_present ? "is":"not"); } if (bootverbose) printf("%s: BIOS eeprom %s present\n", ahc_name(ahc), eeprom_present ? "is" : "not"); if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) { /* * The 50 pin connector is a separate bus, * so force it to always be terminated. * In the future, perform current sensing * to determine if we are in the middle of * a properly terminated bus. */ internal50_present = 0; } /* * Now set the termination based on what * we found. * Flash Enable = BRDDAT7 * Secondary High Term Enable = BRDDAT6 * Secondary Low Term Enable = BRDDAT5 (7890) * Primary High Term Enable = BRDDAT4 (7890) */ if ((ahc->features & AHC_ULTRA2) == 0 && (internal50_present != 0) && (internal68_present != 0) && (externalcable_present != 0)) { printf("%s: Illegal cable configuration!!. " "Only two connectors on the " "adapter may be used at a " "time!\n", ahc_name(ahc)); /* * Pretend there are no cables in the hope * that having all of the termination on * gives us a more stable bus. */ internal50_present = 0; internal68_present = 0; externalcable_present = 0; } if ((ahc->features & AHC_WIDE) != 0 && ((externalcable_present == 0) || (internal68_present == 0) || (enableSEC_high != 0))) { brddat |= BRDDAT6; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printf("%s: 68 pin termination " "Enabled\n", ahc_name(ahc)); else printf("%s: %sHigh byte termination " "Enabled\n", ahc_name(ahc), enableSEC_high ? "Secondary " : ""); } } sum = internal50_present + internal68_present + externalcable_present; if (sum < 2 || (enableSEC_low != 0)) { if ((ahc->features & AHC_ULTRA2) != 0) brddat |= BRDDAT5; else *sxfrctl1 |= STPWEN; if (bootverbose) { if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) printf("%s: 50 pin termination " "Enabled\n", ahc_name(ahc)); else printf("%s: %sLow byte termination " "Enabled\n", ahc_name(ahc), enableSEC_low ? "Secondary " : ""); } } if (enablePRI_low != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: Primary Low Byte termination " "Enabled\n", ahc_name(ahc)); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if (enablePRI_high != 0) { brddat |= BRDDAT4; if (bootverbose) printf("%s: Primary High Byte " "termination Enabled\n", ahc_name(ahc)); } write_brdctl(ahc, brddat); } else { if ((adapter_control & CFSTERM) != 0) { *sxfrctl1 |= STPWEN; if (bootverbose) printf("%s: %sLow byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Primary " : ""); } if ((adapter_control & CFWSTERM) != 0 && (ahc->features & AHC_WIDE) != 0) { brddat |= BRDDAT6; if (bootverbose) printf("%s: %sHigh byte termination Enabled\n", ahc_name(ahc), (ahc->features & AHC_ULTRA2) ? "Secondary " : ""); } /* * Setup STPWEN before setting up the rest of * the termination per the tech note on the U160 cards. */ ahc_outb(ahc, SXFRCTL1, *sxfrctl1); if ((ahc->features & AHC_WIDE) != 0) write_brdctl(ahc, brddat); } SEEPROM_OUTB(sd, sd->sd_MS); /* Clear CS */ } static void ahc_new_term_detect(struct ahc_softc *ahc, int *enableSEC_low, int *enableSEC_high, int *enablePRI_low, int *enablePRI_high, int *eeprom_present) { uint8_t brdctl; /* * BRDDAT7 = Eeprom * BRDDAT6 = Enable Secondary High Byte termination * BRDDAT5 = Enable Secondary Low Byte termination * BRDDAT4 = Enable Primary high byte termination * BRDDAT3 = Enable Primary low byte termination */ brdctl = read_brdctl(ahc); *eeprom_present = brdctl & BRDDAT7; *enableSEC_high = (brdctl & BRDDAT6); *enableSEC_low = (brdctl & BRDDAT5); *enablePRI_high = (brdctl & BRDDAT4); *enablePRI_low = (brdctl & BRDDAT3); } static void aic787X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *internal68_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; /* * First read the status of our cables. * Set the rom bank to 0 since the * bank setting serves as a multiplexor * for the cable detection logic. * BRDDAT5 controls the bank switch. */ write_brdctl(ahc, 0); /* * Now read the state of the internal * connectors. BRDDAT6 is INT50 and * BRDDAT7 is INT68. */ brdctl = read_brdctl(ahc); *internal50_present = (brdctl & BRDDAT6) ? 0 : 1; *internal68_present = (brdctl & BRDDAT7) ? 0 : 1; /* * Set the rom bank to 1 and determine * the other signals. */ write_brdctl(ahc, BRDDAT5); /* * Now read the state of the external * connectors. BRDDAT6 is EXT68 and * BRDDAT7 is EPROMPS. */ brdctl = read_brdctl(ahc); *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (brdctl & BRDDAT7) ? 1 : 0; } static void aic785X_cable_detect(struct ahc_softc *ahc, int *internal50_present, int *externalcable_present, int *eeprom_present) { uint8_t brdctl; uint8_t spiocap; spiocap = ahc_inb(ahc, SPIOCAP); spiocap &= ~SOFTCMDEN; spiocap |= EXT_BRDCTL; ahc_outb(ahc, SPIOCAP, spiocap); ahc_outb(ahc, BRDCTL, BRDRW|BRDCS); ahc_flush_device_writes(ahc); - ahc_delay(500); + aic_delay(500); ahc_outb(ahc, BRDCTL, 0); ahc_flush_device_writes(ahc); - ahc_delay(500); + aic_delay(500); brdctl = ahc_inb(ahc, BRDCTL); *internal50_present = (brdctl & BRDDAT5) ? 0 : 1; *externalcable_present = (brdctl & BRDDAT6) ? 0 : 1; *eeprom_present = (ahc_inb(ahc, SPIOCAP) & EEPROM) ? 1 : 0; } int ahc_acquire_seeprom(struct ahc_softc *ahc, struct seeprom_descriptor *sd) { int wait; if ((ahc->features & AHC_SPIOCAP) != 0 && (ahc_inb(ahc, SPIOCAP) & SEEPROM) == 0) return (0); /* * Request access of the memory port. When access is * granted, SEERDY will go high. We use a 1 second * timeout which should be near 1 second more than * is needed. Reason: after the chip reset, there * should be no contention. */ SEEPROM_OUTB(sd, sd->sd_MS); wait = 1000; /* 1 second timeout in msec */ while (--wait && ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0)) { - ahc_delay(1000); /* delay 1 msec */ + aic_delay(1000); /* delay 1 msec */ } if ((SEEPROM_STATUS_INB(sd) & sd->sd_RDY) == 0) { SEEPROM_OUTB(sd, 0); return (0); } return(1); } void ahc_release_seeprom(struct seeprom_descriptor *sd) { /* Release access to the memory port and the serial EEPROM. */ SEEPROM_OUTB(sd, 0); } static void write_brdctl(struct ahc_softc *ahc, uint8_t value) { uint8_t brdctl; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDSTB; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = 0; } else { brdctl = BRDSTB|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); brdctl |= value; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl |= BRDSTB_ULTRA2; else brdctl &= ~BRDSTB; ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); if ((ahc->features & AHC_ULTRA2) != 0) brdctl = 0; else brdctl &= ~BRDCS; ahc_outb(ahc, BRDCTL, brdctl); } static uint8_t read_brdctl(struct ahc_softc *ahc) { uint8_t brdctl; uint8_t value; if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { brdctl = BRDRW; if (ahc->channel == 'B') brdctl |= BRDCS; } else if ((ahc->features & AHC_ULTRA2) != 0) { brdctl = BRDRW_ULTRA2; } else { brdctl = BRDRW|BRDCS; } ahc_outb(ahc, BRDCTL, brdctl); ahc_flush_device_writes(ahc); value = ahc_inb(ahc, BRDCTL); ahc_outb(ahc, BRDCTL, 0); return (value); } static void ahc_pci_intr(struct ahc_softc *ahc) { u_int error; u_int status1; error = ahc_inb(ahc, ERROR); if ((error & PCIERRSTAT) == 0) return; - status1 = ahc_pci_read_config(ahc->dev_softc, + status1 = aic_pci_read_config(ahc->dev_softc, PCIR_STATUS + 1, /*bytes*/1); printf("%s: PCI error Interrupt at seqaddr = 0x%x\n", ahc_name(ahc), ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); if (status1 & DPE) { ahc->pci_target_perr_count++; printf("%s: Data Parity Error Detected during address " "or write data phase\n", ahc_name(ahc)); } if (status1 & SSE) { printf("%s: Signal System Error Detected\n", ahc_name(ahc)); } if (status1 & RMA) { printf("%s: Received a Master Abort\n", ahc_name(ahc)); } if (status1 & RTA) { printf("%s: Received a Target Abort\n", ahc_name(ahc)); } if (status1 & STA) { printf("%s: Signaled a Target Abort\n", ahc_name(ahc)); } if (status1 & DPR) { printf("%s: Data Parity Error has been reported via PERR#\n", ahc_name(ahc)); } /* Clear latched errors. */ - ahc_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, + aic_pci_write_config(ahc->dev_softc, PCIR_STATUS + 1, status1, /*bytes*/1); if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) { printf("%s: Latched PCIERR interrupt with " "no status bits set\n", ahc_name(ahc)); } else { ahc_outb(ahc, CLRINT, CLRPARERR); } if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) { printf( "%s: WARNING WARNING WARNING WARNING\n" "%s: Too many PCI parity errors observed as a target.\n" "%s: Some device on this bus is generating bad parity.\n" "%s: This is an error *observed by*, not *generated by*, this controller.\n" "%s: PCI parity error checking has been disabled.\n" "%s: WARNING WARNING WARNING WARNING\n", ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc), ahc_name(ahc)); ahc->seqctl |= FAILDIS; ahc_outb(ahc, SEQCTL, ahc->seqctl); } ahc_unpause(ahc); } static int ahc_pci_chip_init(struct ahc_softc *ahc) { ahc_outb(ahc, DSCOMMAND0, ahc->bus_softc.pci_softc.dscommand0); ahc_outb(ahc, DSPCISTATUS, ahc->bus_softc.pci_softc.dspcistatus); if ((ahc->features & AHC_DT) != 0) { u_int sfunct; sfunct = ahc_inb(ahc, SFUNCT) & ~ALT_MODE; ahc_outb(ahc, SFUNCT, sfunct | ALT_MODE); ahc_outb(ahc, OPTIONMODE, ahc->bus_softc.pci_softc.optionmode); ahc_outw(ahc, TARGCRCCNT, ahc->bus_softc.pci_softc.targcrccnt); ahc_outb(ahc, SFUNCT, sfunct); ahc_outb(ahc, CRCCONTROL1, ahc->bus_softc.pci_softc.crccontrol1); } if ((ahc->features & AHC_MULTI_FUNC) != 0) ahc_outb(ahc, SCBBADDR, ahc->bus_softc.pci_softc.scbbaddr); if ((ahc->features & AHC_ULTRA2) != 0) ahc_outb(ahc, DFF_THRSH, ahc->bus_softc.pci_softc.dff_thrsh); return (ahc_chip_init(ahc)); } static int ahc_pci_suspend(struct ahc_softc *ahc) { return (ahc_suspend(ahc)); } static int ahc_pci_resume(struct ahc_softc *ahc) { - ahc_power_state_change(ahc, AHC_POWER_STATE_D0); + aic_power_state_change(ahc, AIC_POWER_STATE_D0); /* * We assume that the OS has restored our register * mappings, etc. Just update the config space registers * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, + aic_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, ahc->bus_softc.pci_softc.devconfig); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, + aic_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, ahc->bus_softc.pci_softc.command); - ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, + aic_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, ahc->bus_softc.pci_softc.csize_lattime); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; sd.sd_ahc = ahc; sd.sd_control_offset = SEECTL; sd.sd_status_offset = SEECTL; sd.sd_dataout_offset = SEECTL; ahc_acquire_seeprom(ahc, &sd); configure_termination(ahc, &sd, ahc->seep_config->adapter_control, &sxfrctl1); ahc_release_seeprom(&sd); } return (ahc_resume(ahc)); } static int ahc_aic785X_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7850; ahc->features = AHC_AIC7850_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; - rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7860_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7860; ahc->features = AHC_AIC7860_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; - rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_apa1480_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7860_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_aic7870_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7870; ahc->features = AHC_AIC7870_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; ahc->instruction_ram_size = 512; return (0); } static int ahc_aha394X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha398X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aha494X_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7870_setup(ahc); if (error == 0) error = ahc_aha494XX_setup(ahc); return (error); } static int ahc_aic7880_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7880; ahc->features = AHC_AIC7880_FE; ahc->bugs |= AHC_TMODE_WIDEODD_BUG; - rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 1) { ahc->bugs |= AHC_PCI_2_1_RETRY_BUG; } else { ahc->bugs |= AHC_CACHETHEN_BUG|AHC_PCI_MWI_BUG; } ahc->instruction_ram_size = 512; return (0); } static int ahc_aha2940Pro_setup(struct ahc_softc *ahc) { ahc->flags |= AHC_INT50_SPEEDFLEX; return (ahc_aic7880_setup(ahc)); } static int ahc_aha394XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha394XX_setup(ahc); return (error); } static int ahc_aha398XU_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7880_setup(ahc); if (error == 0) error = ahc_aha398XX_setup(ahc); return (error); } static int ahc_aic7890_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; ahc->channel = 'A'; ahc->chip = AHC_AIC7890; ahc->features = AHC_AIC7890_FE; ahc->flags |= AHC_NEWEEPROM_FMT; - rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev == 0) ahc->bugs |= AHC_AUTOFLUSH_BUG|AHC_CACHETHEN_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7892_setup(struct ahc_softc *ahc) { ahc->channel = 'A'; ahc->chip = AHC_AIC7892; ahc->features = AHC_AIC7892_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aic7895_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; uint8_t rev; pci = ahc->dev_softc; - ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; /* * The 'C' revision of the aic7895 has a few additional features. */ - rev = ahc_pci_read_config(pci, PCIR_REVID, /*bytes*/1); + rev = aic_pci_read_config(pci, PCIR_REVID, /*bytes*/1); if (rev >= 4) { ahc->chip = AHC_AIC7895C; ahc->features = AHC_AIC7895C_FE; } else { u_int command; ahc->chip = AHC_AIC7895; ahc->features = AHC_AIC7895_FE; /* * The BIOS disables the use of MWI transactions * since it does not have the MWI bug work around * we have. Disabling MWI reduces performance, so * turn it on again. */ - command = ahc_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1); + command = aic_pci_read_config(pci, PCIR_COMMAND, /*bytes*/1); command |= PCIM_CMD_MWRICEN; - ahc_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1); + aic_pci_write_config(pci, PCIR_COMMAND, command, /*bytes*/1); ahc->bugs |= AHC_PCI_MWI_BUG; } /* * XXX Does CACHETHEN really not work??? What about PCI retry? * on C level chips. Need to test, but for now, play it safe. */ ahc->bugs |= AHC_TMODE_WIDEODD_BUG|AHC_PCI_2_1_RETRY_BUG | AHC_CACHETHEN_BUG; #if 0 uint32_t devconfig; /* * Cachesize must also be zero due to stray DAC * problem when sitting behind some bridges. */ - ahc_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1); - devconfig = ahc_pci_read_config(pci, DEVCONFIG, /*bytes*/1); + aic_pci_write_config(pci, CSIZE_LATTIME, 0, /*bytes*/1); + devconfig = aic_pci_read_config(pci, DEVCONFIG, /*bytes*/1); devconfig |= MRDCEN; - ahc_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1); + aic_pci_write_config(pci, DEVCONFIG, devconfig, /*bytes*/1); #endif ahc->flags |= AHC_NEWEEPROM_FMT; ahc->instruction_ram_size = 512; return (0); } static int ahc_aic7896_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; pci = ahc->dev_softc; - ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7896; ahc->features = AHC_AIC7896_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_CACHETHEN_DIS_BUG; ahc->instruction_ram_size = 768; return (0); } static int ahc_aic7899_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; pci = ahc->dev_softc; - ahc->channel = ahc_get_pci_function(pci) == 1 ? 'B' : 'A'; + ahc->channel = aic_get_pci_function(pci) == 1 ? 'B' : 'A'; ahc->chip = AHC_AIC7899; ahc->features = AHC_AIC7899_FE; ahc->flags |= AHC_NEWEEPROM_FMT; ahc->bugs |= AHC_SCBCHAN_UPLOAD_BUG; ahc->instruction_ram_size = 1024; return (0); } static int ahc_aha29160C_setup(struct ahc_softc *ahc) { int error; error = ahc_aic7899_setup(ahc); if (error != 0) return (error); ahc->features |= AHC_REMOVABLE; return (0); } static int ahc_raid_setup(struct ahc_softc *ahc) { printf("RAID functionality unsupported\n"); return (ENXIO); } static int ahc_aha394XX_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; pci = ahc->dev_softc; - switch (ahc_get_pci_slot(pci)) { + switch (aic_get_pci_slot(pci)) { case AHC_394X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_394X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", - ahc_get_pci_slot(pci)); + aic_get_pci_slot(pci)); ahc->channel = 'A'; } return (0); } static int ahc_aha398XX_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; pci = ahc->dev_softc; - switch (ahc_get_pci_slot(pci)) { + switch (aic_get_pci_slot(pci)) { case AHC_398X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_398X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_398X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", - ahc_get_pci_slot(pci)); + aic_get_pci_slot(pci)); ahc->channel = 'A'; break; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); } static int ahc_aha494XX_setup(struct ahc_softc *ahc) { - ahc_dev_softc_t pci; + aic_dev_softc_t pci; pci = ahc->dev_softc; - switch (ahc_get_pci_slot(pci)) { + switch (aic_get_pci_slot(pci)) { case AHC_494X_SLOT_CHANNEL_A: ahc->channel = 'A'; break; case AHC_494X_SLOT_CHANNEL_B: ahc->channel = 'B'; break; case AHC_494X_SLOT_CHANNEL_C: ahc->channel = 'C'; break; case AHC_494X_SLOT_CHANNEL_D: ahc->channel = 'D'; break; default: printf("adapter at unexpected slot %d\n" "unable to map to a channel\n", - ahc_get_pci_slot(pci)); + aic_get_pci_slot(pci)); ahc->channel = 'A'; } ahc->flags |= AHC_LARGE_SEEPROM; return (0); }