Index: head/sys/dev/aac/aac.c =================================================================== --- head/sys/dev/aac/aac.c (revision 85520) +++ head/sys/dev/aac/aac.c (revision 85521) @@ -1,2609 +1,2612 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. */ #include "opt_aac.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t aac_devclass; static void aac_startup(void *arg); static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesponse *mir, int f); /* Command Processing */ static void aac_startio(struct aac_softc *sc); static void aac_timeout(struct aac_softc *sc); static int aac_start(struct aac_command *cm); static void aac_complete(void *context, int pending); static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); static void aac_bio_complete(struct aac_command *cm); static int aac_wait_command(struct aac_command *cm, int timeout); static void aac_host_command(struct aac_softc *sc); static void aac_host_response(struct aac_softc *sc); /* Command Buffer Management */ static int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp); static void aac_release_command(struct aac_command *cm); static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_map_command(struct aac_command *cm); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_init(struct aac_softc *sc); static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp); static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, void *data, u_int16_t datasize, void *result, u_int16_t *resultsize); static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm); static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr); static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib); /* StrongARM interface */ static int aac_sa_get_fwstatus(struct aac_softc *sc); static void aac_sa_qnotify(struct aac_softc *sc, int qbit); static int aac_sa_get_istatus(struct aac_softc *sc); static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_sa_get_mailboxstatus(struct aac_softc *sc); static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); struct aac_interface aac_sa_interface = { aac_sa_get_fwstatus, aac_sa_qnotify, aac_sa_get_istatus, aac_sa_clear_istatus, aac_sa_set_mailbox, aac_sa_get_mailboxstatus, aac_sa_set_interrupts }; /* i960Rx interface */ static int aac_rx_get_fwstatus(struct aac_softc *sc); static void aac_rx_qnotify(struct aac_softc *sc, int qbit); static int aac_rx_get_istatus(struct aac_softc *sc); static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rx_get_mailboxstatus(struct aac_softc *sc); static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); struct aac_interface aac_rx_interface = { aac_rx_get_fwstatus, aac_rx_qnotify, aac_rx_get_istatus, aac_rx_clear_istatus, aac_rx_set_mailbox, aac_rx_get_mailboxstatus, aac_rx_set_interrupts }; /* Debugging and Diagnostics */ static void aac_describe_controller(struct aac_softc *sc); static char *aac_describe_code(struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_close_t aac_close; static d_ioctl_t aac_ioctl; static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); #define AAC_CDEV_MAJOR 150 static struct cdevsw aac_cdevsw = { aac_open, /* open */ aac_close, /* close */ noread, /* read */ nowrite, /* write */ aac_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "aac", /* name */ AAC_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0, /* flags */ #if __FreeBSD_version < 500005 -1, /* bmaj */ #endif }; MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); /* * Device Interface */ /* * Initialise the controller and softc */ int aac_attach(struct aac_softc *sc) { int error, unit; debug_called(1); /* * Initialise per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); aac_initq_complete(sc); aac_initq_bio(sc); #if __FreeBSD_version >= 500005 /* * Initialise command-completion task. */ TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); #endif /* disable interrupts before we enable anything */ AAC_MASK_INTERRUPTS(sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Allocate command structures. */ if ((error = aac_alloc_commands(sc)) != 0) return(error); /* * Initialise the adapter. */ if ((error = aac_init(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Register to probe our containers later. */ TAILQ_INIT(&sc->aac_container_tqh); AAC_LOCK_INIT(&sc->aac_container_lock); sc->aac_ich.ich_func = aac_startup; sc->aac_ich.ich_arg = sc; if (config_intrhook_establish(&sc->aac_ich) != 0) { device_printf(sc->aac_dev, "can't establish configuration hook\n"); return(ENXIO); } /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_WHEEL, 0644, "aac%d", unit); #if __FreeBSD_version > 500005 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); #endif sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ #if __FreeBSD_version > 500005 if (kthread_create((void(*)(void *))aac_host_command, sc, &sc->aifthread, 0, "aac%daif", unit)) #else if (kthread_create((void(*)(void *))aac_host_command, sc, &sc->aifthread, "aac%daif", unit)) #endif panic("Could not create AIF thread\n"); /* Register the shutdown method to only be called post-dump */ if ((EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); return(0); } /* * Probe for containers, create disks. */ static void aac_startup(void *arg) { struct aac_softc *sc; struct aac_mntinfo mi; struct aac_mntinforesponse mir; u_int16_t rsize; int i = 0; debug_called(1); sc = (struct aac_softc *)arg; /* disconnect ourselves from the intrhook chain */ config_intrhook_disestablish(&sc->aac_ich); /* loop over possible containers */ mi.Command = VM_NameServe; mi.MntType = FT_FILESYS; do { /* request information on this container */ mi.MntCount = i; rsize = sizeof(mir); if (aac_sync_fib(sc, ContainerCommand, 0, &mi, sizeof(struct aac_mntinfo), &mir, &rsize)) { debug(2, "error probing container %d", i); continue; } /* check response size */ if (rsize != sizeof(mir)) { debug(2, "container info response wrong size " "(%d should be %d)", rsize, sizeof(mir)); continue; } aac_add_container(sc, &mir, 0); i++; } while ((i < mir.MntRespCount) && (i < AAC_MAX_CONTAINERS)); /* poke the bus to actually attach the child devices */ if (bus_generic_attach(sc->aac_dev)) device_printf(sc->aac_dev, "bus_generic_attach failed\n"); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* enable interrupts now */ AAC_UNMASK_INTERRUPTS(sc); /* enable the timeout watchdog */ timeout((timeout_t*)aac_timeout, sc, AAC_PERIODIC_INTERVAL * hz); } /* * Create a device to respresent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesponse *mir, int f) { struct aac_container *co; device_t child; /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { MALLOC(co, struct aac_container *, sizeof *co, M_AACBUF, M_NOWAIT); if (co == NULL) panic("Out of memory?!\n"); debug(1, "id %x name '%.16s' size %u type %d", mir->MntTable[0].ObjectId, mir->MntTable[0].FileSystemName, mir->MntTable[0].Capacity, mir->MntTable[0].VolType); if ((child = device_add_child(sc->aac_dev, NULL, -1)) == NULL) device_printf(sc->aac_dev, "device_add_child failed\n"); else device_set_ivars(child, co); device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); co->co_disk = child; co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); AAC_LOCK_AQUIRE(&sc->aac_container_lock); TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); AAC_LOCK_RELEASE(&sc->aac_container_lock); } } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aac_free(struct aac_softc *sc) { debug_called(1); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ if (sc->aac_fibs != NULL) aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ if (sc->aac_intr) bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); if (sc->aac_irq != NULL) bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, sc->aac_irq); /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_resource != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid, sc->aac_regs_resource); +#if __FreeBSD_version >= 500005 + TASK_DESTROY(&sc->aac_task_complete); +#endif } /* * Disconnect from the controller completely, in preparation for unload. */ int aac_detach(device_t dev) { struct aac_softc *sc; #if AAC_BROKEN int error; #endif debug_called(1); sc = device_get_softc(dev); if (sc->aac_state & AAC_STATE_OPEN) return(EBUSY); #if AAC_BROKEN if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); tsleep(sc->aac_dev, PUSER | PCATCH, "aacdch", 30 * hz); } if (sc->aifflags & AAC_AIFFLAGS_RUNNING) panic("Cannot shutdown AIF thread\n"); if ((error = aac_shutdown(dev))) return(error); aac_free(sc); return(0); #else return (EBUSY); #endif } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aac_shutdown(device_t dev) { struct aac_softc *sc; struct aac_close_command cc; int s, i; debug_called(1); sc = device_get_softc(dev); s = splbio(); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); cc.Command = VM_CloseAll; cc.ContainerId = 0xffffffff; if (aac_sync_fib(sc, ContainerCommand, 0, &cc, sizeof(cc), NULL, NULL)) printf("FAILED.\n"); else { i = 0; /* * XXX Issuing this command to the controller makes it shut down * but also keeps it from coming back up without a reset of the * PCI bus. This is not desirable if you are just unloading the * driver module with the intent to reload it later. */ if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, &i, sizeof(i), NULL, NULL)) { printf("FAILED.\n"); } else { printf("done.\n"); } } AAC_MASK_INTERRUPTS(sc); splx(s); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aac_suspend(device_t dev) { struct aac_softc *sc; int s; debug_called(1); sc = device_get_softc(dev); s = splbio(); sc->aac_state |= AAC_STATE_SUSPEND; AAC_MASK_INTERRUPTS(sc); splx(s); return(0); } /* * Bring the controller back to a state ready for operation. */ int aac_resume(device_t dev) { struct aac_softc *sc; debug_called(1); sc = device_get_softc(dev); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_UNMASK_INTERRUPTS(sc); return(0); } /* * Take an interrupt. */ void aac_intr(void *arg) { struct aac_softc *sc; u_int16_t reason; debug_called(2); sc = (struct aac_softc *)arg; reason = AAC_GET_ISTATUS(sc); /* controller wants to talk to the log? Defer it to the AIF thread */ if (reason & AAC_DB_PRINTF) { AAC_CLEAR_ISTATUS(sc, AAC_DB_PRINTF); if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_PENDING; wakeup(sc->aifthread); } else aac_print_printf(sc); } /* controller has a message for us? */ if (reason & AAC_DB_COMMAND_READY) { AAC_CLEAR_ISTATUS(sc, AAC_DB_COMMAND_READY); /* XXX What happens if the thread is already awake? */ if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_PENDING; wakeup(sc->aifthread); } } /* controller has a response for us? */ if (reason & AAC_DB_RESPONSE_READY) { AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); aac_host_response(sc); } /* * spurious interrupts that we don't use - reset the mask and clear the * interrupts */ if (reason & (AAC_DB_COMMAND_NOT_FULL | AAC_DB_RESPONSE_NOT_FULL)) { AAC_UNMASK_INTERRUPTS(sc); AAC_CLEAR_ISTATUS(sc, AAC_DB_COMMAND_NOT_FULL | AAC_DB_RESPONSE_NOT_FULL); } }; /* * Command Processing */ /* * Start as much queued I/O as possible on the controller */ static void aac_startio(struct aac_softc *sc) { struct aac_command *cm; debug_called(2); for (;;) { /* * Try to get a command that's been put off for lack of * resources */ cm = aac_dequeue_ready(sc); /* * Try to build a command off the bio queue (ignore error * return) */ if (cm == NULL) aac_bio_command(sc, &cm); /* nothing to do? */ if (cm == NULL) break; /* try to give the command to the controller */ if (aac_start(cm) == EBUSY) { /* put it on the ready queue for later */ aac_requeue_ready(cm); break; } } } /* * Deliver a command to the controller; allocate controller resources at the * last moment when possible. */ static int aac_start(struct aac_command *cm) { struct aac_softc *sc; int error; debug_called(2); sc = cm->cm_sc; /* get the command mapped */ aac_map_command(cm); /* fix up the address values in the FIB */ cm->cm_fib->Header.SenderFibAddress = (u_int32_t)cm->cm_fib; cm->cm_fib->Header.ReceiverFibAddress = cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.SenderData = (u_int32_t)cm; /* XXX 64-bit physical * address issue */ /* put the FIB on the outbound queue */ error = aac_enqueue_fib(sc, cm->cm_queue, cm); return(error); } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_host_command(struct aac_softc *sc) { struct aac_fib *fib; u_int32_t fib_size; int size; debug_called(2); sc->aifflags |= AAC_AIFFLAGS_RUNNING; while (!(sc->aifflags & AAC_AIFFLAGS_EXIT)) { if (!(sc->aifflags & AAC_AIFFLAGS_PENDING)) tsleep(sc->aifthread, PRIBIO, "aifthd", 15 * hz); sc->aifflags &= ~AAC_AIFFLAGS_PENDING; for (;;) { if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, &fib_size, &fib)) break; /* nothing to do */ AAC_PRINT_FIB(sc, fib); switch (fib->Header.Command) { case AifRequest: aac_handle_aif(sc, fib); break; default: device_printf(sc->aac_dev, "unknown command " "from controller\n"); break; } /* Return the AIF to the controller. */ if ((fib->Header.XferState == 0) || (fib->Header.StructType != AAC_FIBTYPE_TFIB)) break; if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; *(AAC_FSAStatus*)fib->data = ST_OK; /* XXX Compute the Size field? */ size = fib->Header.Size; if (size > sizeof(struct aac_fib)) { size = sizeof(struct aac_fib); fib->Header.Size = size; } /* * Since we did not generate this command, it * cannot go through the normal * enqueue->startio chain. */ aac_enqueue_response(sc, AAC_ADAP_NORM_RESP_QUEUE, fib); } } aac_print_printf(sc); } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; wakeup(sc->aac_dev); #if __FreeBSD_version > 500005 mtx_lock(&Giant); #endif kthread_exit(0); } /* * Handle notification of one or more FIBs completed by the controller */ static void aac_host_response(struct aac_softc *sc) { struct aac_command *cm; struct aac_fib *fib; u_int32_t fib_size; debug_called(2); for (;;) { /* look for completed FIBs on our queue */ if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, &fib)) break; /* nothing to do */ /* get the command, unmap and queue for later processing */ cm = (struct aac_command *)fib->Header.SenderData; if (cm == NULL) { AAC_PRINT_FIB(sc, fib); } else { aac_remove_busy(cm); aac_unmap_command(cm); /* XXX defer? */ aac_enqueue_complete(cm); } } /* handle completion processing */ #if __FreeBSD_version >= 500005 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete); #else aac_complete(sc, 0); #endif } /* * Process completed commands. */ static void aac_complete(void *context, int pending) { struct aac_softc *sc; struct aac_command *cm; debug_called(2); sc = (struct aac_softc *)context; /* pull completed commands off the queue */ for (;;) { cm = aac_dequeue_complete(sc); if (cm == NULL) break; cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } } /* see if we can start some more I/O */ aac_startio(sc); } /* * Handle a bio submitted from a disk device. */ void aac_submit_bio(struct bio *bp) { struct aac_disk *ad; struct aac_softc *sc; debug_called(2); ad = (struct aac_disk *)bp->bio_dev->si_drv1; sc = ad->ad_controller; /* queue the BIO and try to get some work done */ aac_enqueue_bio(sc, bp); aac_startio(sc); } /* * Get a bio and build a command to go with it. */ static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; struct aac_fib *fib; struct aac_blockread *br; struct aac_blockwrite *bw; struct aac_disk *ad; struct bio *bp; debug_called(2); /* get the resources we will need */ cm = NULL; if ((bp = aac_dequeue_bio(sc)) == NULL) goto fail; if (aac_alloc_command(sc, &cm)) /* get a command */ goto fail; /* fill out the command */ cm->cm_data = (void *)bp->bio_data; cm->cm_datalen = bp->bio_bcount; cm->cm_complete = aac_bio_complete; cm->cm_private = bp; cm->cm_timestamp = time_second; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; /* build the FIB */ fib = cm->cm_fib; fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM; fib->Header.Command = ContainerCommand; fib->Header.Size = sizeof(struct aac_fib_header); /* build the read/write request */ ad = (struct aac_disk *)bp->bio_dev->si_drv1; if (BIO_IS_READ(bp)) { br = (struct aac_blockread *)&fib->data[0]; br->Command = VM_CtBlockRead; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->BlockNumber = bp->bio_pblkno; br->ByteCount = bp->bio_bcount; fib->Header.Size += sizeof(struct aac_blockread); cm->cm_sgtable = &br->SgMap; cm->cm_flags |= AAC_CMD_DATAIN; } else { bw = (struct aac_blockwrite *)&fib->data[0]; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = bp->bio_pblkno; bw->ByteCount = bp->bio_bcount; bw->Stable = CUNSTABLE; /* XXX what's appropriate here? */ fib->Header.Size += sizeof(struct aac_blockwrite); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = &bw->SgMap; } *cmp = cm; return(0); fail: if (bp != NULL) aac_enqueue_bio(sc, bp); if (cm != NULL) aac_release_command(cm); return(ENOMEM); } /* * Handle a bio-instigated command that has been completed. */ static void aac_bio_complete(struct aac_command *cm) { struct aac_blockread_response *brr; struct aac_blockwrite_response *bwr; struct bio *bp; AAC_FSAStatus status; /* fetch relevant status and then release the command */ bp = (struct bio *)cm->cm_private; if (BIO_IS_READ(bp)) { brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; status = brr->Status; } else { bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; status = bwr->Status; } aac_release_command(cm); /* fix up the bio based on status */ if (status == ST_OK) { bp->bio_resid = 0; } else { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; /* pass an error string out to the disk layer */ bp->bio_driver1 = aac_describe_code(aac_command_status_table, status); } aac_biodone(bp); } /* * Dump a block of data to the controller. If the queue is full, tell the * caller to hold off and wait for the queue to drain. */ int aac_dump_enqueue(struct aac_disk *ad, u_int32_t lba, void *data, int dumppages) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; struct aac_blockwrite *bw; sc = ad->ad_controller; cm = NULL; if (aac_alloc_command(sc, &cm)) return (EBUSY); /* fill out the command */ cm->cm_data = data; cm->cm_datalen = dumppages * PAGE_SIZE; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_timestamp = time_second; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; /* build the FIB */ fib = cm->cm_fib; fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM; fib->Header.Command = ContainerCommand; fib->Header.Size = sizeof(struct aac_fib_header); bw = (struct aac_blockwrite *)&fib->data[0]; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = lba; bw->ByteCount = dumppages * PAGE_SIZE; bw->Stable = CUNSTABLE; /* XXX what's appropriate here? */ fib->Header.Size += sizeof(struct aac_blockwrite); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = &bw->SgMap; return (aac_start(cm)); } /* * Wait for the card's queue to drain when dumping. Also check for monitor * printf's */ void aac_dump_complete(struct aac_softc *sc) { struct aac_fib *fib; struct aac_command *cm; u_int16_t reason; u_int32_t pi, ci, fib_size; do { reason = AAC_GET_ISTATUS(sc); if (reason & AAC_DB_RESPONSE_READY) { AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); for (;;) { if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, &fib)) break; cm = (struct aac_command *) fib->Header.SenderData; if (cm == NULL) AAC_PRINT_FIB(sc, fib); else { aac_remove_busy(cm); aac_unmap_command(cm); aac_enqueue_complete(cm); aac_release_command(cm); } } } if (reason & AAC_DB_PRINTF) { AAC_CLEAR_ISTATUS(sc, AAC_DB_PRINTF); aac_print_printf(sc); } pi = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][ AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][ AAC_CONSUMER_INDEX]; } while (ci != pi); return; } /* * Submit a command to the controller, return when it completes. */ static int aac_wait_command(struct aac_command *cm, int timeout) { int s, error = 0; debug_called(2); /* Put the command on the ready queue and get things going */ cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; aac_enqueue_ready(cm); aac_startio(cm->cm_sc); s = splbio(); while (!(cm->cm_flags & AAC_CMD_COMPLETED) && (error != EWOULDBLOCK)) { error = tsleep(cm, PRIBIO | PCATCH, "aacwait", 0); if ((error == ERESTART) || (error == EINTR)) break; } splx(s); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ static int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; debug_called(3); if ((cm = aac_dequeue_free(sc)) == NULL) return(ENOMEM); *cmp = cm; return(0); } /* * Release a command back to the freelist. */ static void aac_release_command(struct aac_command *cm) { debug_called(3); /* (re)initialise the command/FIB */ cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Flags = 0; cm->cm_fib->Header.SenderSize = sizeof(struct aac_fib); /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialised here for debugging purposes only. */ cm->cm_fib->Header.SenderFibAddress = (u_int32_t)cm->cm_fib; cm->cm_fib->Header.ReceiverFibAddress = cm->cm_fibphys; aac_enqueue_free(cm); } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; debug_called(3); sc->aac_fibphys = segs[0].ds_addr; } /* * Allocate and initialise commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; int i; debug_called(1); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&sc->aac_fibs, BUS_DMA_NOWAIT, &sc->aac_fibmap)) { return(ENOMEM); } bus_dmamap_load(sc->aac_fib_dmat, sc->aac_fibmap, sc->aac_fibs, AAC_FIB_COUNT * sizeof(struct aac_fib), aac_map_command_helper, sc, 0); /* initialise constant fields in the command structure */ for (i = 0; i < AAC_FIB_COUNT; i++) { cm = &sc->aac_command[i]; cm->cm_sc = sc; cm->cm_fib = sc->aac_fibs + i; cm->cm_fibphys = sc->aac_fibphys + (i * sizeof(struct aac_fib)); if (!bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) aac_release_command(cm); } return(0); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { int i; debug_called(1); for (i = 0; i < AAC_FIB_COUNT; i++) bus_dmamap_destroy(sc->aac_buffer_dmat, sc->aac_command[i].cm_datamap); bus_dmamap_unload(sc->aac_fib_dmat, sc->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, sc->aac_fibs, sc->aac_fibmap); } /* * Command-mapping helper function - populate this command's s/g table. */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_command *cm; struct aac_fib *fib; struct aac_sg_table *sg; int i; debug_called(3); cm = (struct aac_command *)arg; fib = cm->cm_fib; /* find the s/g table */ sg = cm->cm_sgtable; /* copy into the FIB */ if (sg != NULL) { sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg * sizeof(struct aac_sg_entry); } } /* * Map a command into controller-visible space. */ static void aac_map_command(struct aac_command *cm) { struct aac_softc *sc; debug_called(2); sc = cm->cm_sc; /* don't map more than once */ if (cm->cm_flags & AAC_CMD_MAPPED) return; if (cm->cm_datalen != 0) { bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aac_map_command_sg, cm, 0); if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); } cm->cm_flags |= AAC_CMD_MAPPED; } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; debug_called(2); sc = cm->cm_sc; if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialise the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; debug_called(1); sc = (struct aac_softc *)arg; sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; time_t then; u_int32_t code; u_int8_t *qaddr; debug_called(1); /* * First wait for the adapter to come ready. */ then = time_second; do { code = AAC_GET_FWSTATUS(sc); if (code & AAC_SELF_TEST_FAILED) { device_printf(sc->aac_dev, "FATAL: selftest failed\n"); return(ENXIO); } if (code & AAC_KERNEL_PANIC) { device_printf(sc->aac_dev, "FATAL: controller kernel panic\n"); return(ENXIO); } if (time_second > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING)); /* * Create DMA tag for the common structure and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct aac_common), /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return(ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return(ENOMEM); } bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, sizeof(*sc->aac_common), aac_common_map, sc, 0); bzero(sc->aac_common, sizeof(*sc->aac_common)); /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = &sc->aac_common->ac_fibs[0]; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; ip->HostPhysMemPages = 0; /* not used? */ ip->HostElapsedSeconds = time_second; /* reset later if invalid */ /* * Initialise FIB queues. Note that it appears that the layout of the * indexes and the segmentation of the entries may be mandated by the * adapter, which is only told about the base of the queue index fields. * * The initial values of the indices are assumed to inform the adapter * of the sizes of the respective queues, and theoretically it could * work out the entire layout of the queue structures from this. We * take the easy route and just lay this area out like everyone else * does. * * The Linux driver uses a much more complex scheme whereby several * header records are kept for each queue. We use a couple of generic * list manipulation functions which 'know' the size of each list by * virtue of a table. */ qaddr = &sc->aac_common->ac_qbuf[0] + AAC_QUEUE_ALIGN; qaddr -= (u_int32_t)qaddr % AAC_QUEUE_ALIGN; sc->aac_queues = (struct aac_queue_table *)qaddr; ip->CommHeaderAddress = sc->aac_common_busaddr + ((u_int32_t)sc->aac_queues - (u_int32_t)sc->aac_common); bzero(sc->aac_queues, sizeof(struct aac_queue_table)); sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = &sc->aac_queues->qt_HostNormCmdQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_HostHighCmdQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = &sc->aac_queues->qt_AdapNormCmdQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_AdapHighCmdQueue[0]; sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = &sc->aac_queues->qt_HostNormRespQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_HostHighRespQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = &sc->aac_queues->qt_AdapNormRespQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_AdapHighRespQueue[0]; /* * Do controller-type-specific initialisation */ switch (sc->aac_hwif) { case AAC_HWIF_I960RX: AAC_SETREG4(sc, AAC_RX_ODBR, ~0); break; } /* * Give the init structure to the controller. */ if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); return(EIO); } return(0); } /* * Send a synchronous command to the controller and wait for a result. */ static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp) { time_t then; u_int32_t status; debug_called(3); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); /* spin waiting for the command to complete */ then = time_second; do { if (time_second > (then + AAC_IMMEDIATE_TIMEOUT)) { debug(2, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOXSTATUS(sc); if (sp != NULL) *sp = status; return(0); } /* * Send a synchronous FIB to the controller and wait for a result. */ static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, void *data, u_int16_t datasize, void *result, u_int16_t *resultsize) { struct aac_fib *fib; debug_called(3); fib = &sc->aac_common->ac_sync_fib; if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = (u_int32_t)fib; fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); /* * Copy in data. */ if (data != NULL) { KASSERT(datasize <= sizeof(fib->data), ("aac_sync_fib: datasize to large")); bcopy(data, fib->data, datasize); fib->Header.XferState |= AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_NORM; } /* * Give the FIB to the controller, wait for a response. */ if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { debug(2, "IO error"); return(EIO); } /* * Copy out the result */ if (result != NULL) { u_int copysize; copysize = fib->Header.Size - sizeof(struct aac_fib_header); if (copysize > *resultsize) copysize = *resultsize; *resultsize = fib->Header.Size - sizeof(struct aac_fib_header); bcopy(fib->data, result, copysize); } return(0); } /* * Adapter-space FIB queue manipulation * * Note that the queue implementation here is a little funky; neither the PI or * CI will ever be zero. This behaviour is a controller feature. */ static struct { int size; int notify; } aac_qinfo[] = { {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, {AAC_HOST_HIGH_CMD_ENTRIES, 0}, {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, {AAC_HOST_HIGH_RESP_ENTRIES, 0}, {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, {AAC_ADAP_HIGH_RESP_ENTRIES, 0} }; /* * Atomically insert an entry into the nominated queue, returns 0 on success or * EBUSY if the queue is full. * * Note: it would be more efficient to defer notifying the controller in * the case where we may be inserting several entries in rapid succession, * but implementing this usefully may be difficult (it would involve a * separate queue/notify interface). */ static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) { u_int32_t pi, ci; int s, error; u_int32_t fib_size; u_int32_t fib_addr; debug_called(3); fib_size = cm->cm_fib->Header.Size; fib_addr = cm->cm_fib->Header.ReceiverFibAddress; s = splbio(); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* * To avoid a race with its completion interrupt, place this command on * the busy queue prior to advertising it to the controller. */ aac_enqueue_busy(cm); /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: splx(s); return(error); } /* * Atomically remove one entry from the nominated queue, returns 0 on * success or ENOENT if the queue is empty. */ static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr) { u_int32_t pi, ci; int s, error; int notify; debug_called(3); s = splbio(); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* check for queue empty */ if (ci == pi) { error = ENOENT; goto out; } notify = 0; if (ci == pi + 1) notify++; /* wrap the queue? */ if (ci >= aac_qinfo[queue].size) ci = 0; /* fetch the entry */ *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; *fib_addr = (struct aac_fib *)(sc->aac_qentries[queue] + ci)->aq_fib_addr; /* update consumer index */ sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; /* if we have made the queue un-full, notify the adapter */ if (notify && (aac_qinfo[queue].notify != 0)) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: splx(s); return(error); } /* * Put our response to an Adapter Initialed Fib on the response queue */ static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) { u_int32_t pi, ci; int s, error; u_int32_t fib_size; u_int32_t fib_addr; debug_called(1); /* Tell the adapter where the FIB is */ fib_size = fib->Header.Size; fib_addr = fib->Header.SenderFibAddress; fib->Header.ReceiverFibAddress = fib_addr; s = splbio(); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: splx(s); return(error); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { int s; struct aac_command *cm; time_t deadline; #if 0 /* simulate an interrupt to handle possibly-missed interrupts */ /* * XXX This was done to work around another bug which has since been * fixed. It is dangerous anyways because you don't want multiple * threads in the interrupt handler at the same time! If calling * is deamed neccesary in the future, proper mutexes must be used. */ s = splbio(); aac_intr(sc); splx(s); /* kick the I/O queue to restart it in the case of deadlock */ aac_startio(sc); #endif /* * traverse the busy command list, bitch about late commands once * only. */ deadline = time_second - AAC_CMD_TIMEOUT; s = splbio(); TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if ((cm->cm_timestamp < deadline) /* && !(cm->cm_flags & AAC_CMD_TIMEDOUT) */) { cm->cm_flags |= AAC_CMD_TIMEDOUT; device_printf(sc->aac_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_second-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); } } splx(s); /* reset the timer for next time */ timeout((timeout_t*)aac_timeout, sc, AAC_PERIODIC_INTERVAL * hz); return; } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_sa_get_fwstatus(struct aac_softc *sc) { debug_called(3); return(AAC_GETREG4(sc, AAC_SA_FWSTATUS)); } static int aac_rx_get_fwstatus(struct aac_softc *sc) { debug_called(3); return(AAC_GETREG4(sc, AAC_RX_FWSTATUS)); } /* * Notify the controller of a change in a given queue */ static void aac_sa_qnotify(struct aac_softc *sc, int qbit) { debug_called(3); AAC_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); } static void aac_rx_qnotify(struct aac_softc *sc, int qbit) { debug_called(3); AAC_SETREG4(sc, AAC_RX_IDBR, qbit); } /* * Get the interrupt reason bits */ static int aac_sa_get_istatus(struct aac_softc *sc) { debug_called(3); return(AAC_GETREG2(sc, AAC_SA_DOORBELL0)); } static int aac_rx_get_istatus(struct aac_softc *sc) { debug_called(3); return(AAC_GETREG4(sc, AAC_RX_ODBR)); } /* * Clear some interrupt reason bits */ static void aac_sa_clear_istatus(struct aac_softc *sc, int mask) { debug_called(3); AAC_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); } static void aac_rx_clear_istatus(struct aac_softc *sc, int mask) { debug_called(3); AAC_SETREG4(sc, AAC_RX_ODBR, mask); } /* * Populate the mailbox and set the command word */ static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { debug_called(4); AAC_SETREG4(sc, AAC_SA_MAILBOX, command); AAC_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); AAC_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); AAC_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); AAC_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); } static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { debug_called(4); AAC_SETREG4(sc, AAC_RX_MAILBOX, command); AAC_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); AAC_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); AAC_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); AAC_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_sa_get_mailboxstatus(struct aac_softc *sc) { debug_called(4); return(AAC_GETREG4(sc, AAC_SA_MAILBOX)); } static int aac_rx_get_mailboxstatus(struct aac_softc *sc) { debug_called(4); return(AAC_GETREG4(sc, AAC_RX_MAILBOX)); } /* * Set/clear interrupt masks */ static void aac_sa_set_interrupts(struct aac_softc *sc, int enable) { debug(2, "%sable interrupts", enable ? "en" : "dis"); if (enable) { AAC_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); } else { AAC_SETREG2((sc), AAC_SA_MASK0_SET, ~0); } } static void aac_rx_set_interrupts(struct aac_softc *sc, int enable) { debug(2, "%sable interrupts", enable ? "en" : "dis"); if (enable) { AAC_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_SETREG4(sc, AAC_RX_OIMR, ~0); } } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { u_int8_t buf[AAC_FIB_DATASIZE]; /* XXX really a bit big * for the stack */ u_int16_t bufsize; struct aac_adapter_info *info; u_int8_t arg; debug_called(2); arg = 0; bufsize = sizeof(buf); if (aac_sync_fib(sc, RequestAdapterInfo, 0, &arg, sizeof(arg), &buf, &bufsize)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return; } if (bufsize != sizeof(*info)) { device_printf(sc->aac_dev, "RequestAdapterInfo returned wrong data size " "(%d != %d)\n", bufsize, sizeof(*info)); /*return;*/ } info = (struct aac_adapter_info *)&buf[0]; device_printf(sc->aac_dev, "%s %dMHz, %dMB cache memory, %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->BufferMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); /* save the kernel revision structure for later use */ sc->aac_revision = info->KernelRevision; device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static char * aac_describe_code(struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(dev_t dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; debug_called(2); sc = dev->si_drv1; /* Check to make sure the device isn't already open */ if (sc->aac_state & AAC_STATE_OPEN) { return EBUSY; } sc->aac_state |= AAC_STATE_OPEN; return 0; } static int aac_close(dev_t dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; debug_called(2); sc = dev->si_drv1; /* Mark this unit as no longer open */ sc->aac_state &= ~AAC_STATE_OPEN; return 0; } static int aac_ioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; int i; debug_called(2); as = (union aac_statrequest *)arg; sc = dev->si_drv1; switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_BIO: case AACQ_READY: case AACQ_BUSY: case AACQ_COMPLETE: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: debug(1, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: debug(1, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: debug(1, "FSACTL_OPEN_GET_ADAPTER_FIB"); /* * Pass the caller out an AdapterFibContext. * * Note that because we only support one opener, we * basically ignore this. Set the caller's context to a magic * number just in case. * * The Linux code hands the driver a pointer into kernel space, * and then trusts it when the caller hands it back. Aiee! * Here, we give it the proc pointer of the per-adapter aif * thread. It's only used as a sanity check in other calls. */ i = (int)sc->aifthread; error = copyout(&i, arg, sizeof(i)); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: debug(1, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: debug(1, "FSACTL_CLOSE_GET_ADAPTER_FIB"); /* don't do anything here */ break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: debug(1, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: debug(1, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; default: device_printf(sc->aac_dev, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } /* * Send a FIB supplied from userspace */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; debug_called(2); cm = NULL; /* * Get a command */ if (aac_alloc_command(sc, &cm)) { error = EBUSY; goto out; } /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sizeof(struct aac_fib)) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sizeof(struct aac_fib)); size = sizeof(struct aac_fib); } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_second; /* * Pass the FIB to the controller, wait for it to complete. */ if ((error = aac_wait_command(cm, 30)) != 0) /* XXX user timeout? */ goto out; /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sizeof(struct aac_fib)) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sizeof(struct aac_fib)); size = sizeof(struct aac_fib); } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { aac_release_command(cm); } return(error); } /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_mntinfo mi; struct aac_mntinforesponse mir; u_int16_t rsize; int next, s, found; int added = 0, i = 0; debug_called(2); aif = (struct aac_aif_command*)&fib->data[0]; aac_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (aif->command) { case AifCmdEventNotify: switch (aif->data.EN.type) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ mi.Command = VM_NameServe; mi.MntType = FT_FILESYS; do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ mi.MntCount = i; rsize = sizeof(mir); if (aac_sync_fib(sc, ContainerCommand, 0, &mi, sizeof(mi), &mir, &rsize)) { debug(2, "Error probing container %d\n", i); continue; } if (rsize != sizeof(mir)) { debug(2, "Container response size too " "large\n"); continue; } /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir.Status == ST_OK) && (mir.MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir.MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, &mir, 1); added = 1; } i++; } while ((i < mir.MntRespCount) && (i < AAC_MAX_CONTAINERS)); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { device_delete_child(sc->aac_dev, co->co_disk); co_next = TAILQ_NEXT(co, co_link); AAC_LOCK_AQUIRE(&sc-> aac_container_lock); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); AAC_LOCK_RELEASE(&sc-> aac_container_lock); FREE(co, M_AACBUF); co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (added) bus_generic_attach(sc->aac_dev); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ s = splbio(); next = (sc->aac_aifq_head + 1) % AAC_AIFQ_LENGTH; if (next != sc->aac_aifq_tail) { bcopy(aif, &sc->aac_aifq[next], sizeof(struct aac_aif_command)); sc->aac_aifq_head = next; if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); } splx(s); return; } /* * Linux Management Interface * This is soon to be removed! */ #ifdef AAC_COMPAT_LINUX #include #include #include #include /* There are multiple ioctl number ranges that need to be handled */ #define AAC_LINUX_IOCTL_MIN 0x0000 #define AAC_LINUX_IOCTL_MAX 0x21ff static linux_ioctl_function_t aac_linux_ioctl; static struct linux_ioctl_handler aac_handler = {aac_linux_ioctl, AAC_LINUX_IOCTL_MIN, AAC_LINUX_IOCTL_MAX}; SYSINIT (aac_register, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &aac_handler); SYSUNINIT(aac_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &aac_handler); MODULE_DEPEND(aac, linux, 1, 1, 1); static int aac_linux_ioctl(struct thread *td, struct linux_ioctl_args *args) { struct file *fp; u_long cmd; debug_called(2); fp = td->td_proc->p_fd->fd_ofiles[args->fd]; cmd = args->cmd; /* * Pass the ioctl off to our standard handler. */ return(fo_ioctl(fp, cmd, (caddr_t)args->arg, td)); } #endif /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; debug_called(2); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } debug(2, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.ul = sc->aac_revision.external.ul; rev_check_resp.adapterSWRevision.buildNumber = sc->aac_revision.buildNumber; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; int error, s; debug_called(2); if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { /* * Check the magic number that we gave the caller. */ if (agf.AdapterFibContext != (int)sc->aifthread) { error = EFAULT; } else { s = splbio(); error = aac_return_aif(sc, agf.AifFib); if ((error == EAGAIN) && (agf.Wait)) { sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); if (error == 0) error = aac_return_aif(sc, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } splx(s); } } return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, caddr_t uptr) { int error, s; debug_called(2); s = splbio(); if (sc->aac_aifq_tail == sc->aac_aifq_head) { error = EAGAIN; } else { error = copyout(&sc->aac_aifq[sc->aac_aifq_tail], uptr, sizeof(struct aac_aif_command)); if (error) printf("aac_return_aif: copyout returned %d\n", error); if (!error) sc->aac_aifq_tail = (sc->aac_aifq_tail + 1) % AAC_AIFQ_LENGTH; } splx(s); return(error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; struct aac_disk *disk; int error, id; debug_called(2); disk = NULL; error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) return (error); id = query_disk.ContainerNumber; if (id == -1) return (EINVAL); AAC_LOCK_AQUIRE(&sc->aac_container_lock); TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { disk = device_get_softc(co->co_disk); query_disk.Valid = 1; query_disk.Locked = (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; query_disk.Deleted = 0; query_disk.Bus = 0; query_disk.Target = disk->unit; query_disk.Lun = 0; query_disk.UnMapped = 0; bcopy(disk->ad_dev_t->si_name, &query_disk.diskDeviceName[0], 10); } AAC_LOCK_RELEASE(&sc->aac_container_lock); error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); return (error); } Index: head/sys/dev/acpica/Osd/OsdSchedule.c =================================================================== --- head/sys/dev/acpica/Osd/OsdSchedule.c (revision 85520) +++ head/sys/dev/acpica/Osd/OsdSchedule.c (revision 85521) @@ -1,157 +1,158 @@ /*- * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * 6.3 : Scheduling services */ #include "acpi.h" #include #include #include #include #include #define _COMPONENT ACPI_OS_SERVICES MODULE_NAME("SCHEDULE") /* * This is a little complicated due to the fact that we need to build and then * free a 'struct task' for each task we enqueue. * * We use the default taskqueue_swi queue, since it really doesn't matter what * else we're queued along with. */ MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task"); static void AcpiOsExecuteQueue(void *arg, int pending); struct acpi_task { struct task at_task; OSD_EXECUTION_CALLBACK at_function; void *at_context; }; ACPI_STATUS AcpiOsQueueForExecution(UINT32 Priority, OSD_EXECUTION_CALLBACK Function, void *Context) { struct acpi_task *at; int pri; FUNCTION_TRACE(__func__); if (Function == NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT); /* Interrupt Context */ if (at == NULL) return_ACPI_STATUS(AE_NO_MEMORY); bzero(at, sizeof(*at)); at->at_function = Function; at->at_context = Context; switch (Priority) { case OSD_PRIORITY_GPE: pri = 4; break; case OSD_PRIORITY_HIGH: pri = 3; break; case OSD_PRIORITY_MED: pri = 2; break; case OSD_PRIORITY_LO: pri = 1; break; default: free(at, M_ACPITASK); return_ACPI_STATUS(AE_BAD_PARAMETER); } TASK_INIT(&at->at_task, pri, AcpiOsExecuteQueue, at); taskqueue_enqueue(taskqueue_swi, (struct task *)at); return_ACPI_STATUS(AE_OK); } static void AcpiOsExecuteQueue(void *arg, int pending) { struct acpi_task *at = (struct acpi_task *)arg; OSD_EXECUTION_CALLBACK Function; void *Context; FUNCTION_TRACE(__func__); Function = (OSD_EXECUTION_CALLBACK)at->at_function; Context = at->at_context; + TASK_DESTROY(at); free(at, M_ACPITASK); Function(Context); return_VOID; } /* * We don't have any sleep granularity better than hz, so * make do with that. */ void AcpiOsSleep (UINT32 Seconds, UINT32 Milliseconds) { int timo; static int dummy; FUNCTION_TRACE(__func__); timo = (Seconds * hz) + Milliseconds * hz / 1000; if (timo == 0) timo = 1; tsleep(&dummy, 0, "acpislp", timo); return_VOID; } void AcpiOsStall (UINT32 Microseconds) { FUNCTION_TRACE(__func__); DELAY(Microseconds); return_VOID; } UINT32 AcpiOsGetThreadId (void) { /* XXX do not add FUNCTION_TRACE here, results in recursive call */ KASSERT(curproc != NULL, (__func__ ": curproc is NULL!")); return(curproc->p_pid + 1); /* can't return 0 */ } Index: head/sys/dev/amr/amr.c =================================================================== --- head/sys/dev/amr/amr.c (revision 85520) +++ head/sys/dev/amr/amr.c (revision 85521) @@ -1,1641 +1,1645 @@ /*- * Copyright (c) 1999,2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for the AMI MegaRaid family of controllers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define AMR_DEFINE_TABLES #include #define AMR_CDEV_MAJOR 132 static d_open_t amr_open; static d_close_t amr_close; static d_ioctl_t amr_ioctl; static struct cdevsw amr_cdevsw = { /* open */ amr_open, /* close */ amr_close, /* read */ noread, /* write */ nowrite, /* ioctl */ amr_ioctl, /* poll */ nopoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "amr", /* maj */ AMR_CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, }; /* * Initialisation, bus interface. */ static void amr_startup(void *arg); /* * Command wrappers */ static int amr_query_controller(struct amr_softc *sc); static void *amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual); static void amr_completeio(struct amr_command *ac); /* * Command buffer allocation. */ static void amr_alloccmd_cluster(struct amr_softc *sc); static void amr_freecmd_cluster(struct amr_command_cluster *acc); /* * Command processing. */ static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp); static int amr_wait_command(struct amr_command *ac); static int amr_poll_command(struct amr_command *ac); static int amr_getslot(struct amr_command *ac); static void amr_mapcmd(struct amr_command *ac); static void amr_unmapcmd(struct amr_command *ac); static int amr_start(struct amr_command *ac); static void amr_complete(void *context, int pending); /* * Status monitoring */ static void amr_periodic(void *data); /* * Interface-specific shims */ static int amr_quartz_submit_command(struct amr_softc *sc); static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); static int amr_std_submit_command(struct amr_softc *sc); static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); static void amr_std_attach_mailbox(struct amr_softc *sc); #ifdef AMR_BOARD_INIT static int amr_quartz_init(struct amr_softc *sc); static int amr_std_init(struct amr_softc *sc); #endif /* * Debugging */ static void amr_describe_controller(struct amr_softc *sc); #ifdef AMR_DEBUG static void amr_printcommand(struct amr_command *ac); #endif /******************************************************************************** ******************************************************************************** Inline Glue ******************************************************************************** ********************************************************************************/ /******************************************************************************** ******************************************************************************** Public Interfaces ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Initialise the controller and softc. */ int amr_attach(struct amr_softc *sc) { debug_called(1); /* * Initialise per-controller queues. */ TAILQ_INIT(&sc->amr_completed); TAILQ_INIT(&sc->amr_freecmds); TAILQ_INIT(&sc->amr_cmd_clusters); TAILQ_INIT(&sc->amr_ready); bioq_init(&sc->amr_bioq); #if __FreeBSD_version >= 500005 /* * Initialise command-completion task. */ TASK_INIT(&sc->amr_task_complete, 0, amr_complete, sc); #endif debug(2, "queue init done"); /* * Configure for this controller type. */ if (AMR_IS_QUARTZ(sc)) { sc->amr_submit_command = amr_quartz_submit_command; sc->amr_get_work = amr_quartz_get_work; } else { sc->amr_submit_command = amr_std_submit_command; sc->amr_get_work = amr_std_get_work; amr_std_attach_mailbox(sc);; } #ifdef AMR_BOARD_INIT if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))) return(ENXIO); #endif /* * Quiz controller for features and limits. */ if (amr_query_controller(sc)) return(ENXIO); debug(2, "controller query complete"); #ifdef AMR_SCSI_PASSTHROUGH /* * Attach our 'real' SCSI channels to CAM. */ if (amr_cam_attach(sc)) return(ENXIO); debug(2, "CAM attach done"); #endif /* * Create the control device. */ sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev)); sc->amr_dev_t->si_drv1 = sc; /* * Schedule ourselves to bring the controller up once interrupts are * available. */ bzero(&sc->amr_ich, sizeof(struct intr_config_hook)); sc->amr_ich.ich_func = amr_startup; sc->amr_ich.ich_arg = sc; if (config_intrhook_establish(&sc->amr_ich) != 0) { device_printf(sc->amr_dev, "can't establish configuration hook\n"); return(ENOMEM); } /* * Print a little information about the controller. */ amr_describe_controller(sc); debug(2, "attach complete"); return(0); } /******************************************************************************** * Locate disk resources and attach children to them. */ static void amr_startup(void *arg) { struct amr_softc *sc = (struct amr_softc *)arg; struct amr_logdrive *dr; int i, error; debug_called(1); /* pull ourselves off the intrhook chain */ config_intrhook_disestablish(&sc->amr_ich); /* get up-to-date drive information */ if (amr_query_controller(sc)) { device_printf(sc->amr_dev, "can't scan controller for drives\n"); return; } /* iterate over available drives */ for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) { /* are we already attached to this drive? */ if (dr->al_disk == 0) { /* generate geometry information */ if (dr->al_size > 0x200000) { /* extended translation? */ dr->al_heads = 255; dr->al_sectors = 63; } else { dr->al_heads = 64; dr->al_sectors = 32; } dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors); dr->al_disk = device_add_child(sc->amr_dev, NULL, -1); if (dr->al_disk == 0) device_printf(sc->amr_dev, "device_add_child failed\n"); device_set_ivars(dr->al_disk, dr); } } if ((error = bus_generic_attach(sc->amr_dev)) != 0) device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error); /* mark controller back up */ sc->amr_state &= ~AMR_STATE_SHUTDOWN; /* interrupts will be enabled before we do anything more */ sc->amr_state |= AMR_STATE_INTEN; /* * Start the timeout routine. */ /* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/ return; } /******************************************************************************* * Free resources associated with a controller instance */ void amr_free(struct amr_softc *sc) { struct amr_command_cluster *acc; #ifdef AMR_SCSI_PASSTHROUGH /* detach from CAM */ amr_cam_detach(sc); #endif /* cancel status timeout */ untimeout(amr_periodic, sc, sc->amr_timeout); /* throw away any command buffers */ while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) { TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link); amr_freecmd_cluster(acc); } + +#if __FreeBSD_version >= 500005 + TASK_DESTROY(&sc->amr_task_complete); +#endif } /******************************************************************************* * Receive a bio structure from a child device and queue it on a particular * disk resource, then poke the disk resource to start as much work as it can. */ int amr_submit_bio(struct amr_softc *sc, struct bio *bio) { debug_called(2); amr_enqueue_bio(sc, bio); amr_startio(sc); return(0); } /******************************************************************************** * Accept an open operation on the control device. */ int amr_open(dev_t dev, int flags, int fmt, struct thread *td) { int unit = minor(dev); struct amr_softc *sc = devclass_get_softc(amr_devclass, unit); debug_called(1); sc->amr_state |= AMR_STATE_OPEN; return(0); } /******************************************************************************** * Accept the last close on the control device. */ int amr_close(dev_t dev, int flags, int fmt, struct thread *td) { int unit = minor(dev); struct amr_softc *sc = devclass_get_softc(amr_devclass, unit); debug_called(1); sc->amr_state &= ~AMR_STATE_OPEN; return (0); } /******************************************************************************** * Handle controller-specific control operations. */ int amr_ioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; int *arg = (int *)addr; struct amr_user_ioctl *au = (struct amr_user_ioctl *)addr; struct amr_command *ac; struct amr_mailbox_ioctl *mbi; struct amr_passthrough *ap; void *dp; int error; debug_called(1); error = 0; dp = NULL; ap = NULL; ac = NULL; switch(cmd) { case AMR_IO_VERSION: debug(1, "AMR_IO_VERSION"); *arg = AMR_IO_VERSION_NUMBER; break; case AMR_IO_COMMAND: debug(1, "AMR_IO_COMMAND 0x%x", au->au_cmd[0]); /* handle inbound data buffer */ if (au->au_length != 0) { if ((dp = malloc(au->au_length, M_DEVBUF, M_WAITOK)) == NULL) { error = ENOMEM; break; } if ((error = copyin(au->au_buffer, dp, au->au_length)) != 0) break; debug(2, "copyin %ld bytes from %p -> %p", au->au_length, au->au_buffer, dp); } if ((ac = amr_alloccmd(sc)) == NULL) { error = ENOMEM; break; } /* handle SCSI passthrough command */ if (au->au_cmd[0] == AMR_CMD_PASS) { if ((ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO)) == NULL) { error = ENOMEM; break; } /* copy cdb */ ap->ap_cdb_length = au->au_cmd[2]; bcopy(&au->au_cmd[3], &ap->ap_cdb[0], ap->ap_cdb_length); /* build passthrough */ ap->ap_timeout = au->au_cmd[ap->ap_cdb_length + 3] & 0x07; ap->ap_ars = (au->au_cmd[ap->ap_cdb_length + 3] & 0x08) ? 1 : 0; ap->ap_islogical = (au->au_cmd[ap->ap_cdb_length + 3] & 0x80) ? 1 : 0; ap->ap_logical_drive_no = au->au_cmd[ap->ap_cdb_length + 4]; ap->ap_channel = au->au_cmd[ap->ap_cdb_length + 5]; ap->ap_scsi_id = au->au_cmd[ap->ap_cdb_length + 6]; ap->ap_request_sense_length = 14; /* XXX what about the request-sense area? does the caller want it? */ /* build command */ ac->ac_data = ap; ac->ac_length = sizeof(*ap); ac->ac_flags |= AMR_CMD_DATAOUT; ac->ac_ccb_data = dp; ac->ac_ccb_length = au->au_length; if (au->au_direction & AMR_IO_READ) ac->ac_flags |= AMR_CMD_CCB_DATAIN; if (au->au_direction & AMR_IO_WRITE) ac->ac_flags |= AMR_CMD_CCB_DATAOUT; ac->ac_mailbox.mb_command = AMR_CMD_PASS; } else { /* direct command to controller */ mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox; /* copy pertinent mailbox items */ mbi->mb_command = au->au_cmd[0]; mbi->mb_channel = au->au_cmd[1]; mbi->mb_param = au->au_cmd[2]; mbi->mb_pad[0] = au->au_cmd[3]; mbi->mb_drive = au->au_cmd[4]; /* build the command */ ac->ac_data = dp; ac->ac_length = au->au_length; if (au->au_direction & AMR_IO_READ) ac->ac_flags |= AMR_CMD_DATAIN; if (au->au_direction & AMR_IO_WRITE) ac->ac_flags |= AMR_CMD_DATAOUT; } /* run the command */ if ((error = amr_wait_command(ac)) != 0) break; /* copy out data and set status */ if (au->au_length != 0) error = copyout(dp, au->au_buffer, au->au_length); debug(2, "copyout %ld bytes from %p -> %p", au->au_length, dp, au->au_buffer); if (dp != NULL) debug(2, "%16D", dp, " "); au->au_status = ac->ac_status; break; default: debug(1, "unknown ioctl 0x%lx", cmd); error = ENOIOCTL; break; } if (dp != NULL) free(dp, M_DEVBUF); if (ap != NULL) free(ap, M_DEVBUF); if (ac != NULL) amr_releasecmd(ac); return(error); } /******************************************************************************** ******************************************************************************** Status Monitoring ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Perform a periodic check of the controller status */ static void amr_periodic(void *data) { struct amr_softc *sc = (struct amr_softc *)data; debug_called(2); /* XXX perform periodic status checks here */ /* compensate for missed interrupts */ amr_done(sc); /* reschedule */ sc->amr_timeout = timeout(amr_periodic, sc, hz); } /******************************************************************************** ******************************************************************************** Command Wrappers ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Interrogate the controller for the operational parameters we require. */ static int amr_query_controller(struct amr_softc *sc) { struct amr_enquiry3 *aex; struct amr_prodinfo *ap; struct amr_enquiry *ae; int ldrv; /* * If we haven't found the real limit yet, let us have a couple of commands in * order to be able to probe. */ if (sc->amr_maxio == 0) sc->amr_maxio = 2; /* * Try to issue an ENQUIRY3 command */ if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) { /* * Fetch current state of logical drives. */ for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) { sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv]; sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv]; sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv]; debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); } free(aex, M_DEVBUF); /* * Get product info for channel count. */ if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) { device_printf(sc->amr_dev, "can't obtain product data from controller\n"); return(1); } sc->amr_maxdrives = 40; sc->amr_maxchan = ap->ap_nschan; sc->amr_maxio = ap->ap_maxio; sc->amr_type |= AMR_TYPE_40LD; free(ap, M_DEVBUF); } else { /* failed, try the 8LD ENQUIRY commands */ if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0)) == NULL) { if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0)) == NULL) { device_printf(sc->amr_dev, "can't obtain configuration data from controller\n"); return(1); } ae->ae_signature = 0; } /* * Fetch current state of logical drives. */ for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) { sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv]; sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv]; sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv]; debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); } sc->amr_maxdrives = 8; sc->amr_maxchan = ae->ae_adapter.aa_channels; sc->amr_maxio = ae->ae_adapter.aa_maxio; free(ae, M_DEVBUF); } /* * Mark remaining drives as unused. */ for (; ldrv < AMR_MAXLD; ldrv++) sc->amr_drive[ldrv].al_size = 0xffffffff; /* * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust * the controller's reported value, and lockups have been seen when we do. */ sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD); return(0); } /******************************************************************************** * Run a generic enquiry-style command. */ static void * amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual) { struct amr_command *ac; void *result; u_int8_t *mbox; int error; debug_called(1); error = 1; result = NULL; /* get ourselves a command buffer */ if ((ac = amr_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(bufsize, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; /* point the command at our data */ ac->ac_data = result; ac->ac_length = bufsize; /* build the command proper */ mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ mbox[0] = cmd; mbox[2] = cmdsub; mbox[3] = cmdqual; /* can't assume that interrupts are going to work here, so play it safe */ if (amr_poll_command(ac)) goto out; error = ac->ac_status; out: if (ac != NULL) amr_releasecmd(ac); if ((error != 0) && (result != NULL)) { free(result, M_DEVBUF); result = NULL; } return(result); } /******************************************************************************** * Flush the controller's internal cache, return status. */ int amr_flush(struct amr_softc *sc) { struct amr_command *ac; int error; /* get ourselves a command buffer */ error = 1; if ((ac = amr_alloccmd(sc)) == NULL) goto out; /* set command flags */ ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; /* build the command proper */ ac->ac_mailbox.mb_command = AMR_CMD_FLUSH; /* we have to poll, as the system may be going down or otherwise damaged */ if (amr_poll_command(ac)) goto out; error = ac->ac_status; out: if (ac != NULL) amr_releasecmd(ac); return(error); } /******************************************************************************** * Try to find I/O work for the controller from one or more of the work queues. * * We make the assumption that if the controller is not ready to take a command * at some given time, it will generate an interrupt at some later time when * it is. */ void amr_startio(struct amr_softc *sc) { struct amr_command *ac; /* spin until something prevents us from doing any work */ for (;;) { /* try to get a ready command */ ac = amr_dequeue_ready(sc); /* if that failed, build a command from a bio */ if (ac == NULL) (void)amr_bio_command(sc, &ac); #ifdef AMR_SCSI_PASSTHROUGH /* if that failed, build a command from a ccb */ if (ac == NULL) (void)amr_cam_command(sc, &ac); #endif /* if we don't have anything to do, give up */ if (ac == NULL) break; /* try to give the command to the controller; if this fails save it for later and give up */ if (amr_start(ac)) { debug(2, "controller busy, command deferred"); amr_requeue_ready(ac); /* XXX schedule retry very soon? */ break; } } } /******************************************************************************** * Handle completion of an I/O command. */ static void amr_completeio(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */ ac->ac_bio->bio_error = EIO; ac->ac_bio->bio_flags |= BIO_ERROR; device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status); /* amr_printcommand(ac);*/ } amrd_intr(ac->ac_bio); amr_releasecmd(ac); } /******************************************************************************** ******************************************************************************** Command Processing ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Convert a bio off the top of the bio queue into a command. */ static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp) { struct amr_command *ac; struct amrd_softc *amrd; struct bio *bio; int error; int blkcount; int driveno; int cmd; ac = NULL; error = 0; /* get a bio to work on */ if ((bio = amr_dequeue_bio(sc)) == NULL) goto out; /* get a command */ if ((ac = amr_alloccmd(sc)) == NULL) { error = ENOMEM; goto out; } /* connect the bio to the command */ ac->ac_complete = amr_completeio; ac->ac_bio = bio; ac->ac_data = bio->bio_data; ac->ac_length = bio->bio_bcount; if (BIO_IS_READ(bio)) { ac->ac_flags |= AMR_CMD_DATAIN; cmd = AMR_CMD_LREAD; } else { ac->ac_flags |= AMR_CMD_DATAOUT; cmd = AMR_CMD_LWRITE; } amrd = (struct amrd_softc *)bio->bio_dev->si_drv1; driveno = amrd->amrd_drive - sc->amr_drive; blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE; ac->ac_mailbox.mb_command = cmd; ac->ac_mailbox.mb_blkcount = blkcount; ac->ac_mailbox.mb_lba = bio->bio_pblkno; ac->ac_mailbox.mb_drive = driveno; /* we fill in the s/g related data when the command is mapped */ if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) device_printf(sc->amr_dev, "I/O beyond end of unit (%u,%d > %u)\n", bio->bio_pblkno, blkcount, sc->amr_drive[driveno].al_size); out: if (error != 0) { if (ac != NULL) amr_releasecmd(ac); if (bio != NULL) /* this breaks ordering... */ amr_enqueue_bio(sc, bio); } *acp = ac; return(error); } /******************************************************************************** * Take a command, submit it to the controller and sleep until it completes * or fails. Interrupts must be enabled, returns nonzero on error. */ static int amr_wait_command(struct amr_command *ac) { int error, count; debug_called(1); ac->ac_complete = NULL; ac->ac_flags |= AMR_CMD_SLEEP; if ((error = amr_start(ac)) != 0) return(error); count = 0; /* XXX better timeout? */ while ((ac->ac_flags & AMR_CMD_BUSY) && (count < 30)) { tsleep(ac, PRIBIO | PCATCH, "amrwcmd", hz); } return(0); } /******************************************************************************** * Take a command, submit it to the controller and busy-wait for it to return. * Returns nonzero on error. Can be safely called with interrupts enabled. */ static int amr_poll_command(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int error, count; debug_called(2); ac->ac_complete = NULL; if ((error = amr_start(ac)) != 0) return(error); count = 0; do { /* * Poll for completion, although the interrupt handler may beat us to it. * Note that the timeout here is somewhat arbitrary. */ amr_done(sc); DELAY(1000); } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000)); if (!(ac->ac_flags & AMR_CMD_BUSY)) { error = 0; } else { /* XXX the slot is now marked permanently busy */ error = EIO; device_printf(sc->amr_dev, "polled command timeout\n"); } return(error); } /******************************************************************************** * Get a free command slot for a command if it doesn't already have one. * * May be safely called multiple times for a given command. */ static int amr_getslot(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int s, slot, limit, error; debug_called(3); /* if the command already has a slot, don't try to give it another one */ if (ac->ac_slot != 0) return(0); /* enforce slot usage limit */ limit = (ac->ac_flags & AMR_CMD_PRIORITY) ? sc->amr_maxio : sc->amr_maxio - 4; if (sc->amr_busyslots > limit) return(EBUSY); /* * Allocate a slot. XXX linear scan is slow */ error = EBUSY; s = splbio(); for (slot = 0; slot < sc->amr_maxio; slot++) { if (sc->amr_busycmd[slot] == NULL) { sc->amr_busycmd[slot] = ac; sc->amr_busyslots++; ac->ac_slot = slot; error = 0; break; } } splx(s); return(error); } /******************************************************************************** * Map/unmap (ac)'s data in the controller's addressable space as required. * * These functions may be safely called multiple times on a given command. */ static void amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct amr_command *ac = (struct amr_command *)arg; struct amr_softc *sc = ac->ac_sc; struct amr_sgentry *sg; int i; u_int8_t *sgc; debug_called(3); /* get base address of s/g table */ sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); /* save data physical address */ ac->ac_dataphys = segs[0].ds_addr; /* for AMR_CMD_CONFIG the s/g count goes elsewhere */ if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG) { sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param); } else { sgc = &ac->ac_mailbox.mb_nsgelem; } /* decide whether we need to populate the s/g table */ if (nsegments < 2) { *sgc = 0; ac->ac_mailbox.mb_physaddr = ac->ac_dataphys; } else { *sgc = nsegments; ac->ac_mailbox.mb_physaddr = sc->amr_sgbusaddr + (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; } } } static void amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct amr_command *ac = (struct amr_command *)arg; struct amr_softc *sc = ac->ac_sc; struct amr_sgentry *sg; struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data; int i; /* get base address of s/g table */ sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); /* save s/g table information in passthrough */ ap->ap_no_sg_elements = nsegments; ap->ap_data_transfer_address = sc->amr_sgbusaddr + (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); /* save pointer to passthrough in command XXX is this already done above? */ ac->ac_mailbox.mb_physaddr = ac->ac_dataphys; debug(3, "slot %d %d segments at 0x%x, passthrough at 0x%x", ac->ac_slot, ap->ap_no_sg_elements, ap->ap_data_transfer_address, ac->ac_dataphys); /* populate s/g table (overwrites previous call which mapped the passthrough) */ for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count); } } static void amr_mapcmd(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; debug_called(3); /* if the command involves data at all, and hasn't been mapped */ if (!(ac->ac_flags & AMR_CMD_MAPPED)) { if (ac->ac_data != NULL) { /* map the data buffers into bus space and build the s/g list */ bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_dmamap, ac->ac_data, ac->ac_length, amr_setup_dmamap, ac, 0); if (ac->ac_flags & AMR_CMD_DATAIN) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, BUS_DMASYNC_PREREAD); if (ac->ac_flags & AMR_CMD_DATAOUT) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, BUS_DMASYNC_PREWRITE); } if (ac->ac_ccb_data != NULL) { bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccbmap, ac, 0); if (ac->ac_flags & AMR_CMD_CCB_DATAIN) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, BUS_DMASYNC_PREREAD); if (ac->ac_flags & AMR_CMD_CCB_DATAOUT) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, BUS_DMASYNC_PREWRITE); } ac->ac_flags |= AMR_CMD_MAPPED; } } static void amr_unmapcmd(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; debug_called(3); /* if the command involved data at all and was mapped */ if (ac->ac_flags & AMR_CMD_MAPPED) { if (ac->ac_data != NULL) { if (ac->ac_flags & AMR_CMD_DATAIN) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, BUS_DMASYNC_POSTREAD); if (ac->ac_flags & AMR_CMD_DATAOUT) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap); } if (ac->ac_ccb_data != NULL) { if (ac->ac_flags & AMR_CMD_CCB_DATAIN) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, BUS_DMASYNC_POSTREAD); if (ac->ac_flags & AMR_CMD_CCB_DATAOUT) bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap); } ac->ac_flags &= ~AMR_CMD_MAPPED; } } /******************************************************************************** * Take a command and give it to the controller, returns 0 if successful, or * EBUSY if the command should be retried later. */ static int amr_start(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; int done, s, i; debug_called(3); /* mark command as busy so that polling consumer can tell */ ac->ac_flags |= AMR_CMD_BUSY; /* get a command slot (freed in amr_done) */ if (amr_getslot(ac)) return(EBUSY); /* now we have a slot, we can map the command (unmapped in amr_complete) */ amr_mapcmd(ac); /* mark the new mailbox we are going to copy in as busy */ ac->ac_mailbox.mb_busy = 1; /* clear the poll/ack fields in the mailbox */ sc->amr_mailbox->mb_poll = 0; sc->amr_mailbox->mb_ack = 0; /* * Save the slot number so that we can locate this command when complete. * Note that ident = 0 seems to be special, so we don't use it. */ ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* * Spin waiting for the mailbox, give up after ~1 second. We expect the * controller to be able to handle our I/O. * * XXX perhaps we should wait for less time, and count on the deferred command * handling to deal with retries? */ debug(4, "wait for mailbox"); for (i = 10000, done = 0; (i > 0) && !done; i--) { s = splbio(); /* is the mailbox free? */ if (sc->amr_mailbox->mb_busy == 0) { debug(4, "got mailbox"); sc->amr_mailbox64->mb64_segment = 0; bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE); done = 1; /* not free, spin waiting */ } else { debug(4, "busy flag %x\n", sc->amr_mailbox->mb_busy); /* this is somewhat ugly */ DELAY(100); } splx(s); /* drop spl to allow completion interrupts */ } /* * Now give the command to the controller */ if (done) { if (sc->amr_submit_command(sc)) { /* the controller wasn't ready to take the command, forget that we tried to post it */ sc->amr_mailbox->mb_busy = 0; return(EBUSY); } debug(3, "posted command"); return(0); } /* * The controller wouldn't take the command. Return the command as busy * so that it is retried later. */ return(EBUSY); } /******************************************************************************** * Extract one or more completed commands from the controller (sc) * * Returns nonzero if any commands on the work queue were marked as completed. */ int amr_done(struct amr_softc *sc) { struct amr_command *ac; struct amr_mailbox mbox; int i, idx, result; debug_called(3); /* See if there's anything for us to do */ result = 0; /* loop collecting completed commands */ for (;;) { /* poll for a completed command's identifier and status */ if (sc->amr_get_work(sc, &mbox)) { result = 1; /* iterate over completed commands in this result */ for (i = 0; i < mbox.mb_nstatus; i++) { /* get pointer to busy command */ idx = mbox.mb_completed[i] - 1; ac = sc->amr_busycmd[idx]; /* really a busy command? */ if (ac != NULL) { /* pull the command from the busy index */ sc->amr_busycmd[idx] = NULL; sc->amr_busyslots--; /* save status for later use */ ac->ac_status = mbox.mb_status; amr_enqueue_completed(ac); debug(3, "completed command with status %x", mbox.mb_status); } else { device_printf(sc->amr_dev, "bad slot %d completed\n", idx); } } } else { break; /* no work */ } } /* if we've completed any commands, try posting some more */ if (result) amr_startio(sc); /* handle completion and timeouts */ #if __FreeBSD_version >= 500005 if (sc->amr_state & AMR_STATE_INTEN) taskqueue_enqueue(taskqueue_swi, &sc->amr_task_complete); else #endif amr_complete(sc, 0); return(result); } /******************************************************************************** * Do completion processing on done commands on (sc) */ static void amr_complete(void *context, int pending) { struct amr_softc *sc = (struct amr_softc *)context; struct amr_command *ac; debug_called(3); /* pull completed commands off the queue */ for (;;) { ac = amr_dequeue_completed(sc); if (ac == NULL) break; /* unmap the command's data buffer */ amr_unmapcmd(ac); /* unbusy the command */ ac->ac_flags &= ~AMR_CMD_BUSY; /* * Is there a completion handler? */ if (ac->ac_complete != NULL) { ac->ac_complete(ac); /* * Is someone sleeping on this one? */ } else if (ac->ac_flags & AMR_CMD_SLEEP) { wakeup(ac); } } } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Get a new command buffer. * * This may return NULL in low-memory cases. * * If possible, we recycle a command buffer that's been used before. */ struct amr_command * amr_alloccmd(struct amr_softc *sc) { struct amr_command *ac; debug_called(3); ac = amr_dequeue_free(sc); if (ac == NULL) { amr_alloccmd_cluster(sc); ac = amr_dequeue_free(sc); } if (ac == NULL) return(NULL); /* clear out significant fields */ ac->ac_slot = 0; ac->ac_status = 0; bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox)); ac->ac_flags = 0; ac->ac_bio = NULL; ac->ac_data = NULL; ac->ac_ccb_data = NULL; ac->ac_complete = NULL; return(ac); } /******************************************************************************** * Release a command buffer for recycling. */ void amr_releasecmd(struct amr_command *ac) { debug_called(3); amr_enqueue_free(ac); } /******************************************************************************** * Allocate a new command cluster and initialise it. */ void amr_alloccmd_cluster(struct amr_softc *sc) { struct amr_command_cluster *acc; struct amr_command *ac; int s, i; acc = malloc(AMR_CMD_CLUSTERSIZE, M_DEVBUF, M_NOWAIT); if (acc != NULL) { s = splbio(); TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link); splx(s); for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { ac = &acc->acc_command[i]; bzero(ac, sizeof(*ac)); ac->ac_sc = sc; if (!bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) && !bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap)) amr_releasecmd(ac); } } } /******************************************************************************** * Free a command cluster */ void amr_freecmd_cluster(struct amr_command_cluster *acc) { struct amr_softc *sc = acc->acc_command[0].ac_sc; int i; for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap); free(acc, M_DEVBUF); } /******************************************************************************** ******************************************************************************** Interface-specific Shims ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Tell the controller that the mailbox contains a valid command */ static int amr_quartz_submit_command(struct amr_softc *sc) { debug_called(3); if (AMR_QGET_IDB(sc) & AMR_QIDB_SUBMIT) return(EBUSY); AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); return(0); } static int amr_std_submit_command(struct amr_softc *sc) { debug_called(3); if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) return(EBUSY); AMR_SPOST_COMMAND(sc); return(0); } /******************************************************************************** * Claim any work that the controller has completed; acknowledge completion, * save details of the completion in (mbsave) */ static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) { int s, worked; u_int32_t outd; debug_called(3); worked = 0; s = splbio(); /* work waiting for us? */ if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) { /* save mailbox, which contains a list of completed commands */ bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); /* acknowledge interrupt */ AMR_QPUT_ODB(sc, AMR_QODB_READY); /* acknowledge that we have the commands */ AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK); #ifndef AMR_QUARTZ_GOFASTER /* * This waits for the controller to notice that we've taken the * command from it. It's very inefficient, and we shouldn't do it, * but if we remove this code, we stop completing commands under * load. * * Peter J says we shouldn't do this. The documentation says we * should. Who is right? */ while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) ; /* XXX aiee! what if it dies? */ #endif worked = 1; /* got some work */ } splx(s); return(worked); } static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) { int s, worked; u_int8_t istat; debug_called(3); worked = 0; s = splbio(); /* check for valid interrupt status */ istat = AMR_SGET_ISTAT(sc); if ((istat & AMR_SINTR_VALID) != 0) { AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */ /* save mailbox, which contains a list of completed commands */ bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */ worked = 1; } splx(s); return(worked); } /******************************************************************************** * Notify the controller of the mailbox location. */ static void amr_std_attach_mailbox(struct amr_softc *sc) { /* program the mailbox physical address */ AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff); AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR); /* clear any outstanding interrupt and enable interrupts proper */ AMR_SACK_INTERRUPT(sc); AMR_SENABLE_INTR(sc); } #ifdef AMR_BOARD_INIT /******************************************************************************** * Initialise the controller */ static int amr_quartz_init(struct amr_softc *sc) { int status, ostatus; device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc)); AMR_QRESET(sc); ostatus = 0xff; while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) { if (status != ostatus) { device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status)); ostatus = status; } switch (status) { case AMR_QINIT_NOMEM: return(ENOMEM); case AMR_QINIT_SCAN: /* XXX we could print channel/target here */ break; } } return(0); } static int amr_std_init(struct amr_softc *sc) { int status, ostatus; device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc)); AMR_SRESET(sc); ostatus = 0xff; while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) { if (status != ostatus) { device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status)); ostatus = status; } switch (status) { case AMR_SINIT_NOMEM: return(ENOMEM); case AMR_SINIT_INPROG: /* XXX we could print channel/target here? */ break; } } return(0); } #endif /******************************************************************************** ******************************************************************************** Debugging ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Identify the controller and print some information about it. */ static void amr_describe_controller(struct amr_softc *sc) { struct amr_prodinfo *ap; struct amr_enquiry *ae; char *prod; /* * Try to get 40LD product info, which tells us what the card is labelled as. */ if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) != NULL) { device_printf(sc->amr_dev, "<%.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n", ap->ap_product, ap->ap_firmware, ap->ap_bios, ap->ap_memsize); free(ap, M_DEVBUF); return; } /* * Try 8LD extended ENQUIRY to get controller signature, and use lookup table. */ if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0)) != NULL) { prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature); } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0)) != NULL) { /* * Try to work it out based on the PCI signatures. */ switch (pci_get_device(sc->amr_dev)) { case 0x9010: prod = "Series 428"; break; case 0x9060: prod = "Series 434"; break; default: prod = "unknown controller"; break; } } else { prod = "unsupported controller"; } /* * HP NetRaid controllers have a special encoding of the firmware and * BIOS versions. The AMI version seems to have it as strings whereas * the HP version does it with a leading uppercase character and two * binary numbers. */ if(ae->ae_adapter.aa_firmware[2] >= 'A' && ae->ae_adapter.aa_firmware[2] <= 'Z' && ae->ae_adapter.aa_firmware[1] < ' ' && ae->ae_adapter.aa_firmware[0] < ' ' && ae->ae_adapter.aa_bios[2] >= 'A' && ae->ae_adapter.aa_bios[2] <= 'Z' && ae->ae_adapter.aa_bios[1] < ' ' && ae->ae_adapter.aa_bios[0] < ' ') { /* this looks like we have an HP NetRaid version of the MegaRaid */ if(ae->ae_signature == AMR_SIG_438) { /* the AMI 438 is an NetRaid 3si in HP-land */ prod = "HP NetRaid 3si"; } device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n", prod, ae->ae_adapter.aa_firmware[2], ae->ae_adapter.aa_firmware[1], ae->ae_adapter.aa_firmware[0], ae->ae_adapter.aa_bios[2], ae->ae_adapter.aa_bios[1], ae->ae_adapter.aa_bios[0], ae->ae_adapter.aa_memorysize); } else { device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n", prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios, ae->ae_adapter.aa_memorysize); } free(ae, M_DEVBUF); } #ifdef AMR_DEBUG /******************************************************************************** * Print the command (ac) in human-readable format */ static void amr_printcommand(struct amr_command *ac) { struct amr_softc *sc = ac->ac_sc; struct amr_sgentry *sg; int i; device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n", ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive); device_printf(sc->amr_dev, "blkcount %d lba %d\n", ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba); device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length); device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n", ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem); device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio); /* get base address of s/g table */ sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++) device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count); } #endif Index: head/sys/dev/mly/mly.c =================================================================== --- head/sys/dev/mly/mly.c (revision 85520) +++ head/sys/dev/mly/mly.c (revision 85521) @@ -1,2942 +1,2946 @@ /*- * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mly_probe(device_t dev); static int mly_attach(device_t dev); static int mly_pci_attach(struct mly_softc *sc); static int mly_detach(device_t dev); static int mly_shutdown(device_t dev); static void mly_intr(void *arg); static int mly_sg_map(struct mly_softc *sc); static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_mmbox_map(struct mly_softc *sc); static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void mly_free(struct mly_softc *sc); static int mly_get_controllerinfo(struct mly_softc *sc); static void mly_scan_devices(struct mly_softc *sc); static void mly_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_complete_rescan(struct mly_command *mc); static int mly_get_eventstatus(struct mly_softc *sc); static int mly_enable_mmbox(struct mly_softc *sc); static int mly_flush(struct mly_softc *sc); static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length); static void mly_check_event(struct mly_softc *sc); static void mly_fetch_event(struct mly_softc *sc); static void mly_complete_event(struct mly_command *mc); static void mly_process_event(struct mly_softc *sc, struct mly_event *me); static void mly_periodic(void *data); static int mly_immediate_command(struct mly_command *mc); static int mly_start(struct mly_command *mc); static void mly_done(struct mly_softc *sc); static void mly_complete(void *context, int pending); static int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp); static void mly_release_command(struct mly_command *mc); static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int mly_alloc_commands(struct mly_softc *sc); static void mly_release_commands(struct mly_softc *sc); static void mly_map_command(struct mly_command *mc); static void mly_unmap_command(struct mly_command *mc); static int mly_cam_attach(struct mly_softc *sc); static void mly_cam_detach(struct mly_softc *sc); static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target); static void mly_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb); static void mly_cam_action(struct cam_sim *sim, union ccb *ccb); static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static void mly_cam_poll(struct cam_sim *sim); static void mly_cam_complete(struct mly_command *mc); static struct cam_periph *mly_find_periph(struct mly_softc *sc, int bus, int target); static int mly_name_device(struct mly_softc *sc, int bus, int target); static int mly_fwhandshake(struct mly_softc *sc); static void mly_describe_controller(struct mly_softc *sc); #ifdef MLY_DEBUG static void mly_printstate(struct mly_softc *sc); static void mly_print_command(struct mly_command *mc); static void mly_print_packet(struct mly_command *mc); static void mly_panic(struct mly_softc *sc, char *reason); #endif void mly_print_controller(int controller); static d_open_t mly_user_open; static d_close_t mly_user_close; static d_ioctl_t mly_user_ioctl; static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); static device_method_t mly_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mly_probe), DEVMETHOD(device_attach, mly_attach), DEVMETHOD(device_detach, mly_detach), DEVMETHOD(device_shutdown, mly_shutdown), { 0, 0 } }; static driver_t mly_pci_driver = { "mly", mly_methods, sizeof(struct mly_softc) }; static devclass_t mly_devclass; DRIVER_MODULE(mly, pci, mly_pci_driver, mly_devclass, 0, 0); #define MLY_CDEV_MAJOR 158 static struct cdevsw mly_cdevsw = { mly_user_open, mly_user_close, noread, nowrite, mly_user_ioctl, nopoll, nommap, nostrategy, "mly", MLY_CDEV_MAJOR, nodump, nopsize, 0 }; /******************************************************************************** ******************************************************************************** Device Interface ******************************************************************************** ********************************************************************************/ static struct mly_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } mly_identifiers[] = { {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 2000"}, {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM, "Mylex eXtremeRAID 3000"}, {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX, "Mylex AcceleRAID 352"}, {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX, "Mylex AcceleRAID 170"}, {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX, "Mylex AcceleRAID 160"}, {0, 0, 0, 0, 0, 0} }; /******************************************************************************** * Compare the provided PCI device with the list we support. */ static int mly_probe(device_t dev) { struct mly_ident *m; debug_called(1); for (m = mly_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); #ifdef MLY_MODULE return(-5); #else return(-10); /* allow room to be overridden */ #endif } } return(ENXIO); } /******************************************************************************** * Initialise the controller and softc */ int mly_attach(device_t dev) { struct mly_softc *sc = device_get_softc(dev); int error; debug_called(1); sc->mly_dev = dev; #ifdef MLY_DEBUG if (device_get_unit(sc->mly_dev) == 0) mly_softc0 = sc; #endif /* * Do PCI-specific initialisation. */ if ((error = mly_pci_attach(sc)) != 0) goto out; /* * Initialise per-controller queues. */ mly_initq_free(sc); mly_initq_busy(sc); mly_initq_complete(sc); #if __FreeBSD_version >= 500005 /* * Initialise command-completion task. */ TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc); #endif /* disable interrupts before we start talking to the controller */ MLY_MASK_INTERRUPTS(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ if ((error = mly_fwhandshake(sc))) goto out; /* * Allocate initial command buffers. */ if ((error = mly_alloc_commands(sc))) goto out; /* * Obtain controller feature information */ if ((error = mly_get_controllerinfo(sc))) goto out; /* * Reallocate command buffers now we know how many we want. */ mly_release_commands(sc); if ((error = mly_alloc_commands(sc))) goto out; /* * Get the current event counter for health purposes, populate the initial * health status buffer. */ if ((error = mly_get_eventstatus(sc))) goto out; /* * Enable memory-mailbox mode. */ if ((error = mly_enable_mmbox(sc))) goto out; /* * Attach to CAM. */ if ((error = mly_cam_attach(sc))) goto out; /* * Print a little information about the controller */ mly_describe_controller(sc); /* * Mark all attached devices for rescan. */ mly_scan_devices(sc); /* * Instigate the first status poll immediately. Rescan completions won't * happen until interrupts are enabled, which should still be before * the SCSI subsystem gets to us, courtesy of the "SCSI settling delay". */ mly_periodic((void *)sc); /* * Create the control device. */ sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev)); sc->mly_dev_t->si_drv1 = sc; /* enable interrupts now */ MLY_UNMASK_INTERRUPTS(sc); out: if (error != 0) mly_free(sc); return(error); } /******************************************************************************** * Perform PCI-specific initialisation. */ static int mly_pci_attach(struct mly_softc *sc) { int i, error; u_int32_t command; debug_called(1); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. * * XXX we shouldn't do this; the PCI code should. */ command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN; pci_write_config(sc->mly_dev, PCIR_COMMAND, command, 2); command = pci_read_config(sc->mly_dev, PCIR_COMMAND, 2); if (!(command & PCIM_CMD_BUSMASTEREN)) { mly_printf(sc, "can't enable busmaster feature\n"); goto fail; } if ((command & PCIM_CMD_MEMEN) == 0) { mly_printf(sc, "memory window not available\n"); goto fail; } /* * Allocate the PCI register window. */ sc->mly_regs_rid = PCIR_MAPS; /* first base address register */ if ((sc->mly_regs_resource = bus_alloc_resource(sc->mly_dev, SYS_RES_MEMORY, &sc->mly_regs_rid, 0, ~0, 1, RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate register window\n"); goto fail; } sc->mly_btag = rman_get_bustag(sc->mly_regs_resource); sc->mly_bhandle = rman_get_bushandle(sc->mly_regs_resource); /* * Allocate and connect our interrupt. */ sc->mly_irq_rid = 0; if ((sc->mly_irq = bus_alloc_resource(sc->mly_dev, SYS_RES_IRQ, &sc->mly_irq_rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { mly_printf(sc, "can't allocate interrupt\n"); goto fail; } if (bus_setup_intr(sc->mly_dev, sc->mly_irq, INTR_TYPE_CAM | INTR_ENTROPY, mly_intr, sc, &sc->mly_intr)) { mly_printf(sc, "can't set up interrupt\n"); goto fail; } /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that all of these controllers are 64-bit capable. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ &sc->mly_parent_dmat)) { mly_printf(sc, "can't allocate parent DMA tag\n"); goto fail; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, MLY_MAX_SGENTRIES, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->mly_buffer_dmat)) { mly_printf(sc, "can't allocate buffer DMA tag\n"); goto fail; } /* * Initialise the DMA tag for command packets. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(union mly_command_packet) * MLY_MAX_COMMANDS, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->mly_packet_dmat)) { mly_printf(sc, "can't allocate command packet DMA tag\n"); goto fail; } /* * Detect the hardware interface version */ for (i = 0; mly_identifiers[i].vendor != 0; i++) { if ((mly_identifiers[i].vendor == pci_get_vendor(sc->mly_dev)) && (mly_identifiers[i].device == pci_get_device(sc->mly_dev))) { sc->mly_hwif = mly_identifiers[i].hwif; switch(sc->mly_hwif) { case MLY_HWIF_I960RX: debug(1, "set hardware up for i960RX"); sc->mly_doorbell_true = 0x00; sc->mly_command_mailbox = MLY_I960RX_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX; sc->mly_idbr = MLY_I960RX_IDBR; sc->mly_odbr = MLY_I960RX_ODBR; sc->mly_error_status = MLY_I960RX_ERROR_STATUS; sc->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK; break; case MLY_HWIF_STRONGARM: debug(1, "set hardware up for StrongARM"); sc->mly_doorbell_true = 0xff; /* doorbell 'true' is 0 */ sc->mly_command_mailbox = MLY_STRONGARM_COMMAND_MAILBOX; sc->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX; sc->mly_idbr = MLY_STRONGARM_IDBR; sc->mly_odbr = MLY_STRONGARM_ODBR; sc->mly_error_status = MLY_STRONGARM_ERROR_STATUS; sc->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS; sc->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK; break; } break; } } /* * Create the scatter/gather mappings. */ if ((error = mly_sg_map(sc))) goto fail; /* * Allocate and map the memory mailbox */ if ((error = mly_mmbox_map(sc))) goto fail; error = 0; fail: return(error); } /******************************************************************************** * Shut the controller down and detach all our resources. */ static int mly_detach(device_t dev) { int error; if ((error = mly_shutdown(dev)) != 0) return(error); mly_free(device_get_softc(dev)); return(0); } /******************************************************************************** * Bring the controller to a state where it can be safely left alone. * * Note that it should not be necessary to wait for any outstanding commands, * as they should be completed prior to calling here. * * XXX this applies for I/O, but not status polls; we should beware of * the case where a status command is running while we detach. */ static int mly_shutdown(device_t dev) { struct mly_softc *sc = device_get_softc(dev); debug_called(1); if (sc->mly_state & MLY_STATE_OPEN) return(EBUSY); /* kill the periodic event */ untimeout(mly_periodic, sc, sc->mly_periodic); /* flush controller */ mly_printf(sc, "flushing cache..."); printf("%s\n", mly_flush(sc) ? "failed" : "done"); MLY_MASK_INTERRUPTS(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ static void mly_intr(void *arg) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(2); mly_done(sc); }; /******************************************************************************** ******************************************************************************** Bus-dependant Resource Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate memory for the scatter/gather tables */ static int mly_sg_map(struct mly_softc *sc) { size_t segsize; debug_called(1); /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. */ segsize = sizeof(struct mly_sg_entry) * MLY_MAX_COMMANDS * MLY_MAX_SGENTRIES; if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->mly_sg_dmat)) { mly_printf(sc, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. */ if (bus_dmamem_alloc(sc->mly_sg_dmat, (void **)&sc->mly_sg_table, BUS_DMA_NOWAIT, &sc->mly_sg_dmamap)) { mly_printf(sc, "can't allocate s/g table\n"); return(ENOMEM); } bus_dmamap_load(sc->mly_sg_dmat, sc->mly_sg_dmamap, sc->mly_sg_table, segsize, mly_sg_map_helper, sc, 0); return(0); } /******************************************************************************** * Save the physical address of the base of the s/g table. */ static void mly_sg_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mly_sg_busaddr = segs->ds_addr; } /******************************************************************************** * Allocate memory for the memory-mailbox interface */ static int mly_mmbox_map(struct mly_softc *sc) { /* * Create a DMA tag for a single contiguous region large enough for the * memory mailbox structure. */ if (bus_dma_tag_create(sc->mly_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct mly_mmbox), 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ &sc->mly_mmbox_dmat)) { mly_printf(sc, "can't allocate memory mailbox DMA tag\n"); return(ENOMEM); } /* * Allocate the buffer */ if (bus_dmamem_alloc(sc->mly_mmbox_dmat, (void **)&sc->mly_mmbox, BUS_DMA_NOWAIT, &sc->mly_mmbox_dmamap)) { mly_printf(sc, "can't allocate memory mailbox\n"); return(ENOMEM); } bus_dmamap_load(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap, sc->mly_mmbox, sizeof(struct mly_mmbox), mly_mmbox_map_helper, sc, 0); bzero(sc->mly_mmbox, sizeof(*sc->mly_mmbox)); return(0); } /******************************************************************************** * Save the physical address of the memory mailbox */ static void mly_mmbox_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_mmbox_busaddr = segs->ds_addr; } /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void mly_free(struct mly_softc *sc) { debug_called(1); /* detach from CAM */ mly_cam_detach(sc); /* release command memory */ mly_release_commands(sc); /* throw away the controllerinfo structure */ if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* throw away the controllerparam structure */ if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* destroy data-transfer DMA tag */ if (sc->mly_buffer_dmat) bus_dma_tag_destroy(sc->mly_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mly_sg_table) { bus_dmamap_unload(sc->mly_sg_dmat, sc->mly_sg_dmamap); bus_dmamem_free(sc->mly_sg_dmat, sc->mly_sg_table, sc->mly_sg_dmamap); } if (sc->mly_sg_dmat) bus_dma_tag_destroy(sc->mly_sg_dmat); /* free and destroy DMA memory and tag for memory mailbox */ if (sc->mly_mmbox) { bus_dmamap_unload(sc->mly_mmbox_dmat, sc->mly_mmbox_dmamap); bus_dmamem_free(sc->mly_mmbox_dmat, sc->mly_mmbox, sc->mly_mmbox_dmamap); } if (sc->mly_mmbox_dmat) bus_dma_tag_destroy(sc->mly_mmbox_dmat); /* disconnect the interrupt handler */ if (sc->mly_intr) bus_teardown_intr(sc->mly_dev, sc->mly_irq, sc->mly_intr); if (sc->mly_irq != NULL) bus_release_resource(sc->mly_dev, SYS_RES_IRQ, sc->mly_irq_rid, sc->mly_irq); /* destroy the parent DMA tag */ if (sc->mly_parent_dmat) bus_dma_tag_destroy(sc->mly_parent_dmat); /* release the register window mapping */ if (sc->mly_regs_resource != NULL) bus_release_resource(sc->mly_dev, SYS_RES_MEMORY, sc->mly_regs_rid, sc->mly_regs_resource); + +#if __FreeBSD_version >= 500005 + TASK_DESTROY(&sc->mly_task_complete); +#endif } /******************************************************************************** ******************************************************************************** Command Wrappers ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc. */ static int mly_get_controllerinfo(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); if (sc->mly_controllerinfo != NULL) free(sc->mly_controllerinfo, M_DEVBUF); /* build the getcontrollerinfo ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerinfo = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); if (sc->mly_controllerparam != NULL) free(sc->mly_controllerparam, M_DEVBUF); /* build the getcontrollerparameter ioctl and send it */ bzero(&mci, sizeof(mci)); sc->mly_controllerparam = NULL; mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); return(0); } /******************************************************************************** * Schedule all possible devices for a rescan. * */ static void mly_scan_devices(struct mly_softc *sc) { int bus, target; debug_called(1); /* * Clear any previous BTL information. */ bzero(&sc->mly_btl, sizeof(sc->mly_btl)); /* * Mark all devices as requiring a rescan, and let the next * periodic scan collect them. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) if (MLY_BUS_IS_VALID(sc, bus)) for (target = 0; target < MLY_MAX_TARGETS; target++) sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN; } /******************************************************************************** * Rescan a device, possibly as a consequence of getting an event which suggests * that it may have changed. * * If we suffer resource starvation, we can abandon the rescan as we'll be * retried. */ static void mly_rescan_btl(struct mly_softc *sc, int bus, int target) { struct mly_command *mc; struct mly_command_ioctl *mci; debug_called(1); /* check that this bus is valid */ if (!MLY_BUS_IS_VALID(sc, bus)) return; /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_rescan; /* * Build the ioctl. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; if (MLY_BUS_IS_VIRTUAL(sc, bus)) { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; mci->addr.log.logdev = MLY_LOGDEV_ID(sc, bus, target); debug(1, "logical device %d", mci->addr.log.logdev); } else { mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid); mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; mci->addr.phys.lun = 0; mci->addr.phys.target = target; mci->addr.phys.channel = bus; debug(1, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target); } /* * Dispatch the command. If we successfully send the command, clear the rescan * bit. */ if (mly_start(mc) != 0) { mly_release_command(mc); } else { sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; /* success */ } } /******************************************************************************** * Handle the completion of a rescan operation */ static void mly_complete_rescan(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_ioctl_getlogdevinfovalid *ldi; struct mly_ioctl_getphysdevinfovalid *pdi; struct mly_command_ioctl *mci; struct mly_btl btl, *btlp; int bus, target, rescan; debug_called(1); /* * Recover the bus and target from the command. We need these even in * the case where we don't have a useful response. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) { bus = MLY_LOGDEV_BUS(sc, mci->addr.log.logdev); target = MLY_LOGDEV_TARGET(sc, mci->addr.log.logdev); } else { bus = mci->addr.phys.channel; target = mci->addr.phys.target; } /* XXX validate bus/target? */ /* the default result is 'no device' */ bzero(&btl, sizeof(btl)); /* if the rescan completed OK, we have possibly-new BTL data */ if (mc->mc_status == 0) { if (mc->mc_length == sizeof(*ldi)) { ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; if ((MLY_LOGDEV_BUS(sc, ldi->logical_device_number) != bus) || (MLY_LOGDEV_TARGET(sc, ldi->logical_device_number) != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, MLY_LOGDEV_BUS(sc, ldi->logical_device_number), MLY_LOGDEV_TARGET(sc, ldi->logical_device_number)); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_LOGICAL; btl.mb_type = ldi->raid_level; btl.mb_state = ldi->state; debug(1, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, mly_describe_code(mly_table_device_type, ldi->raid_level), mly_describe_code(mly_table_device_state, ldi->state)); } else if (mc->mc_length == sizeof(*pdi)) { pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; if ((pdi->channel != bus) || (pdi->target != target)) { mly_printf(sc, "WARNING: BTL rescan for %d:%d returned data for %d:%d instead\n", bus, target, pdi->channel, pdi->target); /* XXX what can we do about this? */ } btl.mb_flags = MLY_BTL_PHYSICAL; btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL; btl.mb_state = pdi->state; btl.mb_speed = pdi->speed; btl.mb_width = pdi->width; if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; debug(1, "BTL rescan for %d:%d returns %s", bus, target, mly_describe_code(mly_table_device_state, pdi->state)); } else { mly_printf(sc, "BTL rescan result invalid\n"); } } free(mc->mc_data, M_DEVBUF); mly_release_command(mc); /* * Decide whether we need to rescan the device. */ rescan = 0; /* device type changes (usually between 'nothing' and 'something') */ btlp = &sc->mly_btl[bus][target]; if (btl.mb_flags != btlp->mb_flags) { debug(1, "flags changed, rescanning"); rescan = 1; } /* XXX other reasons? */ /* * Update BTL information. */ *btlp = btl; /* * Perform CAM rescan if required. */ if (rescan) mly_cam_rescan_btl(sc, bus, target); } /******************************************************************************** * Get the current health status and set the 'next event' counter to suit. */ static int mly_get_eventstatus(struct mly_softc *sc) { struct mly_command_ioctl mci; struct mly_health_status *mh; u_int8_t status; int error; /* build the gethealthstatus ioctl and send it */ bzero(&mci, sizeof(mci)); mh = NULL; mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL))) return(error); if (status != 0) return(EIO); /* get the event counter */ sc->mly_event_change = mh->change_counter; sc->mly_event_waiting = mh->next_event; sc->mly_event_counter = mh->next_event; /* save the health status into the memory mailbox */ bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh)); debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event); free(mh, M_DEVBUF); return(0); } /******************************************************************************** * Enable the memory mailbox mode. */ static int mly_enable_mmbox(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t *sp, status; int error; debug_called(1); /* build the ioctl and send it */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; /* set buffer addresses */ mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); /* set buffer sizes - abuse of data_size field is revolting */ sp = (u_int8_t *)&mci.data_size; sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024); sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024; mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024; debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size); if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); if (status != 0) return(EIO); sc->mly_state |= MLY_STATE_MMBOX_ACTIVE; debug(1, "memory mailbox active"); return(0); } /******************************************************************************** * Flush all pending I/O from the controller. */ static int mly_flush(struct mly_softc *sc) { struct mly_command_ioctl mci; u_int8_t status; int error; debug_called(1); /* build the ioctl */ bzero(&mci, sizeof(mci)); mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER; /* pass it off to the controller */ if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) return(error); return((status == 0) ? 0 : EIO); } /******************************************************************************** * Perform an ioctl command. * * If (data) is not NULL, the command requires data transfer. If (*data) is NULL * the command requires data transfer from the controller, and we will allocate * a buffer for it. If (*data) is not NULL, the command requires data transfer * to the controller. * * XXX passing in the whole ioctl structure is ugly. Better ideas? * * XXX we don't even try to handle the case where datasize > 4k. We should. */ static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length) { struct mly_command *mc; struct mly_command_ioctl *mci; int error; debug_called(1); mc = NULL; if (mly_alloc_command(sc, &mc)) { error = ENOMEM; goto out; } /* copy the ioctl structure, but save some important fields and then fixup */ mci = &mc->mc_packet->ioctl; ioctl->sense_buffer_address = mci->sense_buffer_address; ioctl->maximum_sense_size = mci->maximum_sense_size; *mci = *ioctl; mci->opcode = MDACMD_IOCTL; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; /* handle the data buffer */ if (data != NULL) { if (*data == NULL) { /* allocate data buffer */ if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } mc->mc_flags |= MLY_CMD_DATAIN; } else { mc->mc_data = *data; mc->mc_flags |= MLY_CMD_DATAOUT; } mc->mc_length = datasize; mc->mc_packet->generic.data_size = datasize; } /* run the command */ if ((error = mly_immediate_command(mc))) goto out; /* clean up and return any data */ *status = mc->mc_status; if ((mc->mc_sense > 0) && (sense_buffer != NULL)) { bcopy(mc->mc_packet, sense_buffer, mc->mc_sense); *sense_length = mc->mc_sense; goto out; } /* should we return a data pointer? */ if ((data != NULL) && (*data == NULL)) *data = mc->mc_data; /* command completed OK */ error = 0; out: if (mc != NULL) { /* do we need to free a data buffer we allocated? */ if (error && (mc->mc_data != NULL) && (*data == NULL)) free(mc->mc_data, M_DEVBUF); mly_release_command(mc); } return(error); } /******************************************************************************** * Check for event(s) outstanding in the controller. */ static void mly_check_event(struct mly_softc *sc) { /* * The controller may have updated the health status information, * so check for it here. Note that the counters are all in host memory, * so this check is very cheap. Also note that we depend on checking on * completion */ if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) { sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter; debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; /* wake up anyone that might be interested in this */ wakeup(&sc->mly_event_change); } if (sc->mly_event_counter != sc->mly_event_waiting) mly_fetch_event(sc); } /******************************************************************************** * Fetch one event from the controller. * * If we fail due to resource starvation, we'll be retried the next time a * command completes. */ static void mly_fetch_event(struct mly_softc *sc) { struct mly_command *mc; struct mly_command_ioctl *mci; int s; u_int32_t event; debug_called(1); /* get a command */ if (mly_alloc_command(sc, &mc)) return; /* set up the data buffer */ if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { mly_release_command(mc); return; } mc->mc_length = sizeof(struct mly_event); mc->mc_flags |= MLY_CMD_DATAIN; mc->mc_complete = mly_complete_event; /* * Get an event number to fetch. It's possible that we've raced with another * context for the last event, in which case there will be no more events. */ s = splcam(); if (sc->mly_event_counter == sc->mly_event_waiting) { mly_release_command(mc); splx(s); return; } event = sc->mly_event_counter++; splx(s); /* * Build the ioctl. * * At this point we are committed to sending this request, as it * will be the only one constructed for this particular event number. */ mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; mci->opcode = MDACMD_IOCTL; mci->data_size = sizeof(struct mly_event); mci->addr.phys.lun = (event >> 16) & 0xff; mci->addr.phys.target = (event >> 24) & 0xff; mci->addr.phys.channel = 0; mci->addr.phys.controller = 0; mci->timeout.value = 30; mci->timeout.scale = MLY_TIMEOUT_SECONDS; mci->sub_ioctl = MDACIOCTL_GETEVENT; mci->param.getevent.sequence_number_low = event & 0xffff; debug(1, "fetch event %u", event); /* * Submit the command. * * Note that failure of mly_start() will result in this event never being * fetched. */ if (mly_start(mc) != 0) { mly_printf(sc, "couldn't fetch event %u\n", event); mly_release_command(mc); } } /******************************************************************************** * Handle the completion of an event poll. */ static void mly_complete_event(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_event *me = (struct mly_event *)mc->mc_data; debug_called(1); /* * If the event was successfully fetched, process it. */ if (mc->mc_status == SCSI_STATUS_OK) { mly_process_event(sc, me); free(me, M_DEVBUF); } mly_release_command(mc); /* * Check for another event. */ mly_check_event(sc); } /******************************************************************************** * Process a controller event. */ static void mly_process_event(struct mly_softc *sc, struct mly_event *me) { struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0]; char *fp, *tp; int bus, target, event, class, action; /* * Errors can be reported using vendor-unique sense data. In this case, the * event code will be 0x1c (Request sense data present), the sense key will * be 0x09 (vendor specific), the MSB of the ASC will be set, and the * actual event code will be a 16-bit value comprised of the ASCQ (low byte) * and low seven bits of the ASC (low seven bits of the high byte). */ if ((me->code == 0x1c) && ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) && (ssd->add_sense_code & 0x80)) { event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual; } else { event = me->code; } /* look up event, get codes */ fp = mly_describe_code(mly_table_event, event); debug(1, "Event %d code 0x%x", me->sequence_number, me->code); /* quiet event? */ class = fp[0]; if (isupper(class) && bootverbose) class = tolower(class); /* get action code, text string */ action = fp[1]; tp = &fp[2]; /* * Print some information about the event. * * This code uses a table derived from the corresponding portion of the Linux * driver, and thus the parser is very similar. */ switch(class) { case 'p': /* error on physical device */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'l': /* error on logical unit */ case 'm': /* message about logical unit */ bus = MLY_LOGDEV_BUS(sc, me->lun); target = MLY_LOGDEV_TARGET(sc, me->lun); mly_name_device(sc, bus, target); mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp); if (action == 'r') sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; break; break; case 's': /* report of sense data */ if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) || (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && (ssd->add_sense_code == 0x04) && ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02)))) break; /* ignore NO_SENSE or NOT_READY in one case */ mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); mly_printf(sc, " sense key %d asc %02x ascq %02x\n", ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual); mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, ""); if (action == 'r') sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; break; case 'e': mly_printf(sc, tp, me->target, me->lun); break; case 'c': mly_printf(sc, "controller %s\n", tp); break; case '?': mly_printf(sc, "%s - %d\n", tp, me->code); break; default: /* probably a 'noisy' event being ignored */ break; } } /******************************************************************************** * Perform periodic activities. */ static void mly_periodic(void *data) { struct mly_softc *sc = (struct mly_softc *)data; int bus, target; debug_called(2); /* * Scan devices. */ for (bus = 0; bus < sc->mly_cam_channels; bus++) { if (MLY_BUS_IS_VALID(sc, bus)) { for (target = 0; target < MLY_MAX_TARGETS; target++) { /* ignore the controller in this scan */ if (target == sc->mly_controllerparam->initiator_id) continue; /* perform device rescan? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN) mly_rescan_btl(sc, bus, target); } } } /* check for controller events */ mly_check_event(sc); /* reschedule ourselves */ sc->mly_periodic = timeout(mly_periodic, sc, MLY_PERIODIC_INTERVAL * hz); } /******************************************************************************** ******************************************************************************** Command Processing ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Run a command and wait for it to complete. * */ static int mly_immediate_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; int error, s; debug_called(1); /* spinning at splcam is ugly, but we're only used during controller init */ s = splcam(); if ((error = mly_start(mc))) { splx(s); return(error); } if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { /* sleep on the command */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { tsleep(mc, PRIBIO, "mlywait", 0); } } else { /* spin and collect status while we do */ while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { mly_done(mc->mc_sc); } } splx(s); return(0); } /******************************************************************************** * Deliver a command to the controller. * * XXX it would be good to just queue commands that we can't submit immediately * and send them later, but we probably want a wrapper for that so that * we don't hang on a failed submission for an immediate command. */ static int mly_start(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; union mly_command_packet *pkt; int s; debug_called(2); /* * Set the command up for delivery to the controller. */ mly_map_command(mc); mc->mc_packet->generic.command_id = mc->mc_slot; s = splcam(); /* * Do we have to use the hardware mailbox? */ if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) { /* * Check to see if the controller is ready for us. */ if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) { splx(s); return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* * It's ready, send the command. */ MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT); } else { /* use memory-mailbox mode */ pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; /* check to see if the next index is free yet */ if (pkt->mmbox.flag != 0) { splx(s); return(EBUSY); } mc->mc_flags |= MLY_CMD_BUSY; /* copy in new command */ bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); /* barrier to ensure completion of previous write before we write the flag */ bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */ /* copy flag last */ pkt->mmbox.flag = mc->mc_packet->mmbox.flag; /* barrier to ensure completion of previous write before we notify the controller */ bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */ /* signal controller, update index */ MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT); sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; } mly_enqueue_busy(mc); splx(s); return(0); } /******************************************************************************** * Pick up command status from the controller, schedule a completion event */ void mly_done(struct mly_softc *sc) { struct mly_command *mc; union mly_status_packet *sp; u_int16_t slot; int s, worked; s = splcam(); worked = 0; /* pick up hardware-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got HM completion for illegal slot %u\n", slot); } /* unconditionally acknowledge status */ MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY); MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); } /* pick up memory-mailbox commands */ if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) { for (;;) { sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index]; /* check for more status */ if (sp->mmbox.flag == 0) break; /* get slot number */ slot = sp->status.command_id; if (slot < MLY_SLOT_MAX) { mc = &sc->mly_command[slot - MLY_SLOT_START]; mc->mc_status = sp->status.status; mc->mc_sense = sp->status.sense_length; mc->mc_resid = sp->status.residue; mly_remove_busy(mc); mc->mc_flags &= ~MLY_CMD_BUSY; mly_enqueue_complete(mc); worked = 1; } else { /* slot 0xffff may mean "extremely bogus command" */ mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index); } /* clear and move to next index */ sp->mmbox.flag = 0; sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; } /* acknowledge that we have collected status value(s) */ MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY); } splx(s); if (worked) { #if __FreeBSD_version >= 500005 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete); else #endif mly_complete(sc, 0); } } /******************************************************************************** * Process completed commands */ static void mly_complete(void *context, int pending) { struct mly_softc *sc = (struct mly_softc *)context; struct mly_command *mc; void (* mc_complete)(struct mly_command *mc); debug_called(2); /* * Spin pulling commands off the completed queue and processing them. */ while ((mc = mly_dequeue_complete(sc)) != NULL) { /* * Free controller resources, mark command complete. * * Note that as soon as we mark the command complete, it may be freed * out from under us, so we need to save the mc_complete field in * order to later avoid dereferencing mc. (We would not expect to * have a polling/sleeping consumer with mc_complete != NULL). */ mly_unmap_command(mc); mc_complete = mc->mc_complete; mc->mc_flags |= MLY_CMD_COMPLETE; /* * Call completion handler or wake up sleeping consumer. */ if (mc_complete != NULL) { mc_complete(mc); } else { wakeup(mc); } } /* * XXX if we are deferring commands due to controller-busy status, we should * retry submitting them here. */ } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Allocate a command. */ int mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) { struct mly_command *mc; debug_called(3); if ((mc = mly_dequeue_free(sc)) == NULL) return(ENOMEM); *mcp = mc; return(0); } /******************************************************************************** * Release a command back to the freelist. */ void mly_release_command(struct mly_command *mc) { debug_called(3); /* * Fill in parts of the command that may cause confusion if * a consumer doesn't when we are later allocated. */ mc->mc_data = NULL; mc->mc_flags = 0; mc->mc_complete = NULL; mc->mc_private = NULL; /* * By default, we set up to overwrite the command packet with * sense information. */ mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys; mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet); mly_enqueue_free(mc); } /******************************************************************************** * Map helper for command allocation. */ static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_softc *sc = (struct mly_softc *)arg; debug_called(1); sc->mly_packetphys = segs[0].ds_addr; } /******************************************************************************** * Allocate and initialise command and packet structures. * * If the controller supports fewer than MLY_MAX_COMMANDS commands, limit our * allocation to that number. If we don't yet know how many commands the * controller supports, allocate a very small set (suitable for initialisation * purposes only). */ static int mly_alloc_commands(struct mly_softc *sc) { struct mly_command *mc; int i, ncmd; if (sc->mly_controllerinfo == NULL) { ncmd = 4; } else { ncmd = min(MLY_MAX_COMMANDS, sc->mly_controllerinfo->maximum_parallel_commands); } /* * Allocate enough space for all the command packets in one chunk and * map them permanently into controller-visible space. */ if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, BUS_DMA_NOWAIT, &sc->mly_packetmap)) { return(ENOMEM); } bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, ncmd * sizeof(union mly_command_packet), mly_alloc_commands_map, sc, 0); for (i = 0; i < ncmd; i++) { mc = &sc->mly_command[i]; bzero(mc, sizeof(*mc)); mc->mc_sc = sc; mc->mc_slot = MLY_SLOT_START + i; mc->mc_packet = sc->mly_packet + i; mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) mly_release_command(mc); } return(0); } /******************************************************************************** * Free all the storage held by commands. * * Must be called with all commands on the free list. */ static void mly_release_commands(struct mly_softc *sc) { struct mly_command *mc; /* throw away command buffer DMA maps */ while (mly_alloc_command(sc, &mc) == 0) bus_dmamap_destroy(sc->mly_buffer_dmat, mc->mc_datamap); /* release the packet storage */ if (sc->mly_packet != NULL) { bus_dmamap_unload(sc->mly_packet_dmat, sc->mly_packetmap); bus_dmamem_free(sc->mly_packet_dmat, sc->mly_packet, sc->mly_packetmap); sc->mly_packet = NULL; } } /******************************************************************************** * Command-mapping helper function - populate this command's s/g table * with the s/g entries for its data. */ static void mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *gen = &(mc->mc_packet->generic); struct mly_sg_entry *sg; int i, tabofs; debug_called(2); /* can we use the transfer structure directly? */ if (nseg <= 2) { sg = &gen->transfer.direct.sg[0]; gen->command_control.extended_sg_table = 0; } else { tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAX_SGENTRIES); sg = sc->mly_sg_table + tabofs; gen->transfer.indirect.entries[0] = nseg; gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); gen->command_control.extended_sg_table = 1; } /* copy the s/g table */ for (i = 0; i < nseg; i++) { sg[i].physaddr = segs[i].ds_addr; sg[i].length = segs[i].ds_len; } } #if 0 /******************************************************************************** * Command-mapping helper function - save the cdb's physical address. * * We don't support 'large' SCSI commands at this time, so this is unused. */ static void mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mly_command *mc = (struct mly_command *)arg; debug_called(2); /* XXX can we safely assume that a CDB will never cross a page boundary? */ if ((segs[0].ds_addr % PAGE_SIZE) > ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE)) panic("cdb crosses page boundary"); /* fix up fields in the command packet */ mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr; } #endif /******************************************************************************** * Map a command into controller-visible space */ static void mly_map_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); /* don't map more than once */ if (mc->mc_flags & MLY_CMD_MAPPED) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, mly_map_command_sg, mc, 0); if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE); } mc->mc_flags |= MLY_CMD_MAPPED; } /******************************************************************************** * Unmap a command from controller-visible space */ static void mly_unmap_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; debug_called(2); if (!(mc->mc_flags & MLY_CMD_MAPPED)) return; /* does the command have a data buffer? */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLY_CMD_DATAIN) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLY_CMD_DATAOUT) bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap); } mc->mc_flags &= ~MLY_CMD_MAPPED; } /******************************************************************************** ******************************************************************************** CAM interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Attach the physical and virtual SCSI busses to CAM. * * Physical bus numbering starts from 0, virtual bus numbering from one greater * than the highest physical bus. Physical busses are only registered if * the kernel environment variable "hw.mly.register_physical_channels" is set. * * When we refer to a "bus", we are referring to the bus number registered with * the SIM, wheras a "channel" is a channel number given to the adapter. In order * to keep things simple, we map these 1:1, so "bus" and "channel" may be used * interchangeably. */ int mly_cam_attach(struct mly_softc *sc) { struct cam_devq *devq; int chn, i; debug_called(1); /* * Allocate a devq for all our channels combined. */ if ((devq = cam_simq_alloc(sc->mly_controllerinfo->maximum_parallel_commands)) == NULL) { mly_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * If physical channel registration has been requested, register these first. * Note that we enable tagged command queueing for physical channels. */ if (getenv("hw.mly.register_physical_channels") != NULL) { chn = 0; for (i = 0; i < sc->mly_controllerinfo->physical_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), sc->mly_controllerinfo->maximum_parallel_commands, 1, devq)) == NULL) { return(ENOMEM); } if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) { mly_printf(sc, "CAM XPT phsyical channel registration failed\n"); return(ENXIO); } debug(1, "registered physical channel %d", chn); } } /* * Register our virtual channels, with bus numbers matching channel numbers. */ chn = sc->mly_controllerinfo->physical_channels_present; for (i = 0; i < sc->mly_controllerinfo->virtual_channels_present; i++, chn++) { if ((sc->mly_cam_sim[chn] = cam_sim_alloc(mly_cam_action, mly_cam_poll, "mly", sc, device_get_unit(sc->mly_dev), sc->mly_controllerinfo->maximum_parallel_commands, 0, devq)) == NULL) { return(ENOMEM); } if (xpt_bus_register(sc->mly_cam_sim[chn], chn)) { mly_printf(sc, "CAM XPT virtual channel registration failed\n"); return(ENXIO); } debug(1, "registered virtual channel %d", chn); } /* * This is the total number of channels that (might have been) registered with * CAM. Some may not have been; check the mly_cam_sim array to be certain. */ sc->mly_cam_channels = sc->mly_controllerinfo->physical_channels_present + sc->mly_controllerinfo->virtual_channels_present; return(0); } /******************************************************************************** * Detach from CAM */ void mly_cam_detach(struct mly_softc *sc) { int i; debug_called(1); for (i = 0; i < sc->mly_cam_channels; i++) { if (sc->mly_cam_sim[i] != NULL) { xpt_bus_deregister(cam_sim_path(sc->mly_cam_sim[i])); cam_sim_free(sc->mly_cam_sim[i], 0); } } if (sc->mly_cam_devq != NULL) cam_simq_free(sc->mly_cam_devq); } /************************************************************************ * Rescan a device. */ static void mly_cam_rescan_btl(struct mly_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO)) == NULL) { mly_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&sc->mly_cam_path, xpt_periph, cam_sim_path(sc->mly_cam_sim[bus]), target, 0) != CAM_REQ_CMP) { mly_printf(sc, "rescan failed (can't create path)\n"); return; } xpt_setup_ccb(&ccb->ccb_h, sc->mly_cam_path, 5/*priority (low)*/); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = mly_cam_rescan_callback; ccb->crcn.flags = CAM_FLAG_NONE; debug(1, "rescan target %d:%d", bus, target); xpt_action(ccb); } static void mly_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) { free(ccb, M_TEMP); } /******************************************************************************** * Handle an action requested by CAM */ static void mly_cam_action(struct cam_sim *sim, union ccb *ccb) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!mly_cam_action_io(sim, (struct ccb_scsiio *)&ccb->csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; u_int32_t secs_per_cylinder; debug(2, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (sc->mly_controllerparam->bios_geometry == MLY_BIOSGEOM_8G) { ccg->heads = 255; ccg->secs_per_track = 63; } else { /* MLY_BIOSGEOM_2G */ ccg->heads = 128; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; debug(2, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX extra flags for physical channels? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = MLY_MAX_TARGETS - 1; cpi->max_lun = MLY_MAX_LUNS - 1; cpi->initiator_id = sc->mly_controllerparam->initiator_id; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; /* XXX validate bus/target? */ debug(2, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); cts->valid = 0; /* logical device? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { /* nothing special for these */ /* physical device? */ } else if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PHYSICAL) { /* allow CAM to try tagged transactions */ cts->flags |= CCB_TRANS_TAG_ENB; cts->valid |= CCB_TRANS_TQ_VALID; /* convert speed (MHz) to usec */ if (sc->mly_btl[bus][target].mb_speed == 0) { cts->sync_period = 1000000 / 5; } else { cts->sync_period = 1000000 / sc->mly_btl[bus][target].mb_speed; } /* convert bus width to CAM internal encoding */ switch (sc->mly_btl[bus][target].mb_width) { case 32: cts->bus_width = MSG_EXT_WDTR_BUS_32_BIT; break; case 16: cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; case 8: default: cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_BUS_WIDTH_VALID; /* not a device, bail out */ } else { cts->ccb_h.status = CAM_REQ_CMP_ERR; break; } /* disconnect always OK */ cts->flags |= CCB_TRANS_DISC_ENB; cts->valid |= CCB_TRANS_DISC_VALID; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(2, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /******************************************************************************** * Handle an I/O operation requested by CAM */ static int mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct mly_softc *sc = cam_sim_softc(sim); struct mly_command *mc; struct mly_command_scsi_small *ss; int bus, target; int error; bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* validate bus number */ if (!MLY_BUS_IS_VALID(sc, bus)) { debug(0, " invalid bus %d", bus); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to a protected device */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_PROTECTED) { debug(2, " device protected"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check for I/O attempt to nonexistent device */ if (!(sc->mly_btl[bus][target].mb_flags & (MLY_BTL_LOGICAL | MLY_BTL_PHYSICAL))) { debug(2, " device %d:%d does not exist", bus, target); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* XXX increase if/when we support large SCSI commands */ if (csio->cdb_len > MLY_CMD_SCSI_SMALL_CDB) { debug(0, " command too large (%d > %d)", csio->cdb_len, MLY_CMD_SCSI_SMALL_CDB); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(0, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* if there is data transfer, it must be to/from a virtual address */ if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */ debug(0, " data pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */ debug(0, " data has premature s/g setup"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } } /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(2, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* * Get a command, or push the ccb back to CAM and freeze the queue. */ if ((error = mly_alloc_command(sc, &mc))) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } /* build the command */ mc->mc_data = csio->data_ptr; mc->mc_length = csio->dxfer_len; mc->mc_complete = mly_cam_complete; mc->mc_private = csio; /* save the bus number in the ccb for later recovery XXX should be a better way */ csio->ccb_h.sim_priv.entries[0].field = bus; /* build the packet for the controller */ ss = &mc->mc_packet->scsi_small; ss->opcode = MDACMD_SCSI; if (csio->ccb_h.flags & CAM_DIS_DISCONNECT) ss->command_control.disable_disconnect = 1; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ss->command_control.data_direction = MLY_CCB_WRITE; ss->data_size = csio->dxfer_len; ss->addr.phys.lun = csio->ccb_h.target_lun; ss->addr.phys.target = csio->ccb_h.target_id; ss->addr.phys.channel = bus; if (csio->ccb_h.timeout < (60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / 1000; ss->timeout.scale = MLY_TIMEOUT_SECONDS; } else if (csio->ccb_h.timeout < (60 * 60 * 1000)) { ss->timeout.value = csio->ccb_h.timeout / (60 * 1000); ss->timeout.scale = MLY_TIMEOUT_MINUTES; } else { ss->timeout.value = csio->ccb_h.timeout / (60 * 60 * 1000); /* overflow? */ ss->timeout.scale = MLY_TIMEOUT_HOURS; } ss->maximum_sense_size = csio->sense_len; ss->cdb_length = csio->cdb_len; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, ss->cdb, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, ss->cdb, csio->cdb_len); } /* give the command to the controller */ if ((error = mly_start(mc))) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } return(0); } /******************************************************************************** * Check for possibly-completed commands. */ static void mly_cam_poll(struct cam_sim *sim) { struct mly_softc *sc = cam_sim_softc(sim); debug_called(2); mly_done(sc); } /******************************************************************************** * Handle completion of a command - pass results back through the CCB */ static void mly_cam_complete(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct ccb_scsiio *csio = (struct ccb_scsiio *)mc->mc_private; struct scsi_inquiry_data *inq = (struct scsi_inquiry_data *)csio->data_ptr; struct mly_btl *btl; u_int8_t cmd; int bus, target; debug_called(2); csio->scsi_status = mc->mc_status; switch(mc->mc_status) { case SCSI_STATUS_OK: /* * In order to report logical device type and status, we overwrite * the result of the INQUIRY command to logical devices. */ bus = csio->ccb_h.sim_priv.entries[0].field; target = csio->ccb_h.target_id; /* XXX validate bus/target? */ if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_LOGICAL) { if (csio->ccb_h.flags & CAM_CDB_POINTER) { cmd = *csio->cdb_io.cdb_ptr; } else { cmd = csio->cdb_io.cdb_bytes[0]; } if (cmd == INQUIRY) { btl = &sc->mly_btl[bus][target]; padstr(inq->vendor, mly_describe_code(mly_table_device_type, btl->mb_type), 8); padstr(inq->product, mly_describe_code(mly_table_device_state, btl->mb_state), 16); padstr(inq->revision, "", 4); } } debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status = CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: debug(1, "SCSI_STATUS_CHECK_COND sense %d resid %d", mc->mc_sense, mc->mc_resid); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR; bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(mc->mc_packet, &csio->sense_data, mc->mc_sense); csio->sense_len = mc->mc_sense; csio->ccb_h.status |= CAM_AUTOSNS_VALID; csio->resid = mc->mc_resid; /* XXX this is a signed value... */ break; case SCSI_STATUS_BUSY: debug(1, "SCSI_STATUS_BUSY"); csio->ccb_h.status = CAM_SCSI_BUSY; break; default: debug(1, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status = CAM_REQ_CMP_ERR; break; } xpt_done((union ccb *)csio); mly_release_command(mc); } /******************************************************************************** * Find a peripheral attahed at (bus),(target) */ static struct cam_periph * mly_find_periph(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; status = xpt_create_path(&path, NULL, cam_sim_path(sc->mly_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { periph = cam_periph_find(path, NULL); xpt_free_path(path); } else { periph = NULL; } return(periph); } /******************************************************************************** * Name the device at (bus)(target) */ int mly_name_device(struct mly_softc *sc, int bus, int target) { struct cam_periph *periph; if ((periph = mly_find_periph(sc, bus, target)) != NULL) { sprintf(sc->mly_btl[bus][target].mb_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } sc->mly_btl[bus][target].mb_name[0] = 0; return(ENOENT); } /******************************************************************************** ******************************************************************************** Hardware Control ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Handshake with the firmware while the card is being initialised. */ static int mly_fwhandshake(struct mly_softc *sc) { u_int8_t error, param0, param1; int spinup = 0; debug_called(1); /* set HM_STSACK and let the firmware initialise */ MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); DELAY(1000); /* too short? */ /* if HM_STSACK is still true, the controller is initialising */ if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) return(0); mly_printf(sc, "controller initialisation started\n"); /* spin waiting for initialisation to finish, or for a message to be delivered */ while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) { /* check for a message */ if (MLY_ERROR_VALID(sc)) { error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY; param0 = MLY_GET_REG(sc, sc->mly_command_mailbox); param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1); switch(error) { case MLY_MSG_SPINUP: if (!spinup) { mly_printf(sc, "drive spinup in progress\n"); spinup = 1; /* only print this once (should print drive being spun?) */ } break; case MLY_MSG_RACE_RECOVERY_FAIL: mly_printf(sc, "mirror race recovery failed, one or more drives offline\n"); break; case MLY_MSG_RACE_IN_PROGRESS: mly_printf(sc, "mirror race recovery in progress\n"); break; case MLY_MSG_RACE_ON_CRITICAL: mly_printf(sc, "mirror race recovery on a critical drive\n"); break; case MLY_MSG_PARITY_ERROR: mly_printf(sc, "FATAL MEMORY PARITY ERROR\n"); return(ENXIO); default: mly_printf(sc, "unknown initialisation code 0x%x\n", error); } } } return(0); } /******************************************************************************** ******************************************************************************** Debugging and Diagnostics ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Print some information about the controller. */ static void mly_describe_controller(struct mly_softc *sc) { struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo; mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "", mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */ mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, mi->memory_size); if (bootverbose) { mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", mly_describe_code(mly_table_oemname, mi->oem_information), mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type, mi->interface_speed, mi->interface_width, mi->interface_name); mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n", mi->memory_size, mi->memory_speed, mi->memory_width, mly_describe_code(mly_table_memorytype, mi->memory_type), mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "", mi->cache_size); mly_printf(sc, "CPU: %s @ %dMHZ\n", mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed); if (mi->l2cache_size != 0) mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size); if (mi->exmemory_size != 0) mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n", mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width, mly_describe_code(mly_table_memorytype, mi->exmemory_type), mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": ""); mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed"); mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n", mi->maximum_block_count, mi->maximum_sg_entries); mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n", mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline); mly_printf(sc, "physical devices present %d\n", mi->physical_devices_present); mly_printf(sc, "physical disks present/offline %d/%d\n", mi->physical_disks_present, mi->physical_disks_offline); mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n", mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s", mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s", mi->virtual_channels_possible); mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands); mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n", mi->flash_size, mi->flash_age, mi->flash_maximum_age); } } #ifdef MLY_DEBUG /******************************************************************************** * Print some controller state */ static void mly_printstate(struct mly_softc *sc) { mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n", MLY_GET_REG(sc, sc->mly_idbr), MLY_GET_REG(sc, sc->mly_odbr), MLY_GET_REG(sc, sc->mly_error_status), sc->mly_idbr, sc->mly_odbr, sc->mly_error_status); mly_printf(sc, "IMASK %02x ISTATUS %02x\n", MLY_GET_REG(sc, sc->mly_interrupt_mask), MLY_GET_REG(sc, sc->mly_interrupt_status)); mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_command_mailbox), MLY_GET_REG(sc, sc->mly_command_mailbox + 1), MLY_GET_REG(sc, sc->mly_command_mailbox + 2), MLY_GET_REG(sc, sc->mly_command_mailbox + 3), MLY_GET_REG(sc, sc->mly_command_mailbox + 4), MLY_GET_REG(sc, sc->mly_command_mailbox + 5), MLY_GET_REG(sc, sc->mly_command_mailbox + 6), MLY_GET_REG(sc, sc->mly_command_mailbox + 7)); mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n", MLY_GET_REG(sc, sc->mly_status_mailbox), MLY_GET_REG(sc, sc->mly_status_mailbox + 1), MLY_GET_REG(sc, sc->mly_status_mailbox + 2), MLY_GET_REG(sc, sc->mly_status_mailbox + 3), MLY_GET_REG(sc, sc->mly_status_mailbox + 4), MLY_GET_REG(sc, sc->mly_status_mailbox + 5), MLY_GET_REG(sc, sc->mly_status_mailbox + 6), MLY_GET_REG(sc, sc->mly_status_mailbox + 7)); mly_printf(sc, " %04x %08x\n", MLY_GET_REG2(sc, sc->mly_status_mailbox), MLY_GET_REG4(sc, sc->mly_status_mailbox + 4)); } struct mly_softc *mly_softc0 = NULL; void mly_printstate0(void) { if (mly_softc0 != NULL) mly_printstate(mly_softc0); } /******************************************************************************** * Print a command */ static void mly_print_command(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; mly_printf(sc, "COMMAND @ %p\n", mc); mly_printf(sc, " slot %d\n", mc->mc_slot); mly_printf(sc, " status 0x%x\n", mc->mc_status); mly_printf(sc, " sense len %d\n", mc->mc_sense); mly_printf(sc, " resid %d\n", mc->mc_resid); mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys); if (mc->mc_packet != NULL) mly_print_packet(mc); mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); mly_printf(sc, " complete %p\n", mc->mc_complete); mly_printf(sc, " private %p\n", mc->mc_private); } /******************************************************************************** * Print a command packet */ static void mly_print_packet(struct mly_command *mc) { struct mly_softc *sc = mc->mc_sc; struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet; struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet; struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet; struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet; int transfer; mly_printf(sc, " command_id %d\n", ge->command_id); mly_printf(sc, " opcode %d\n", ge->opcode); mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n", ge->command_control.force_unit_access, ge->command_control.disable_page_out, ge->command_control.extended_sg_table, (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ", ge->command_control.no_auto_sense, ge->command_control.disable_disconnect); mly_printf(sc, " data_size %d\n", ge->data_size); mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address); mly_printf(sc, " lun %d\n", ge->addr.phys.lun); mly_printf(sc, " target %d\n", ge->addr.phys.target); mly_printf(sc, " channel %d\n", ge->addr.phys.channel); mly_printf(sc, " logical device %d\n", ge->addr.log.logdev); mly_printf(sc, " controller %d\n", ge->addr.phys.controller); mly_printf(sc, " timeout %d %s\n", ge->timeout.value, (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours")); mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size); switch(ge->opcode) { case MDACMD_SCSIPT: case MDACMD_SCSI: mly_printf(sc, " cdb length %d\n", ss->cdb_length); mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " "); transfer = 1; break; case MDACMD_SCSILC: case MDACMD_SCSILCPT: mly_printf(sc, " cdb length %d\n", sl->cdb_length); mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr); transfer = 1; break; case MDACMD_IOCTL: mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl); switch(io->sub_ioctl) { case MDACIOCTL_SETMEMORYMAILBOX: mly_printf(sc, " health_buffer_size %d\n", io->param.setmemorymailbox.health_buffer_size); mly_printf(sc, " health_buffer_phys 0x%llx\n", io->param.setmemorymailbox.health_buffer_physaddr); mly_printf(sc, " command_mailbox 0x%llx\n", io->param.setmemorymailbox.command_mailbox_physaddr); mly_printf(sc, " status_mailbox 0x%llx\n", io->param.setmemorymailbox.status_mailbox_physaddr); transfer = 0; break; case MDACIOCTL_SETREALTIMECLOCK: case MDACIOCTL_GETHEALTHSTATUS: case MDACIOCTL_GETCONTROLLERINFO: case MDACIOCTL_GETLOGDEVINFOVALID: case MDACIOCTL_GETPHYSDEVINFOVALID: case MDACIOCTL_GETPHYSDEVSTATISTICS: case MDACIOCTL_GETLOGDEVSTATISTICS: case MDACIOCTL_GETCONTROLLERSTATISTICS: case MDACIOCTL_GETBDT_FOR_SYSDRIVE: case MDACIOCTL_CREATENEWCONF: case MDACIOCTL_ADDNEWCONF: case MDACIOCTL_GETDEVCONFINFO: case MDACIOCTL_GETFREESPACELIST: case MDACIOCTL_MORE: case MDACIOCTL_SETPHYSDEVPARAMETER: case MDACIOCTL_GETPHYSDEVPARAMETER: case MDACIOCTL_GETLOGDEVPARAMETER: case MDACIOCTL_SETLOGDEVPARAMETER: mly_printf(sc, " param %10D\n", io->param.data.param, " "); transfer = 1; break; case MDACIOCTL_GETEVENT: mly_printf(sc, " event %d\n", io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16)); transfer = 1; break; case MDACIOCTL_SETRAIDDEVSTATE: mly_printf(sc, " state %d\n", io->param.setraiddevstate.state); transfer = 0; break; case MDACIOCTL_XLATEPHYSDEVTORAIDDEV: mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device); mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller); mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel); mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target); mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun); transfer = 0; break; case MDACIOCTL_GETGROUPCONFINFO: mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group); transfer = 1; break; case MDACIOCTL_GET_SUBSYSTEM_DATA: case MDACIOCTL_SET_SUBSYSTEM_DATA: case MDACIOCTL_STARTDISOCVERY: case MDACIOCTL_INITPHYSDEVSTART: case MDACIOCTL_INITPHYSDEVSTOP: case MDACIOCTL_INITRAIDDEVSTART: case MDACIOCTL_INITRAIDDEVSTOP: case MDACIOCTL_REBUILDRAIDDEVSTART: case MDACIOCTL_REBUILDRAIDDEVSTOP: case MDACIOCTL_MAKECONSISTENTDATASTART: case MDACIOCTL_MAKECONSISTENTDATASTOP: case MDACIOCTL_CONSISTENCYCHECKSTART: case MDACIOCTL_CONSISTENCYCHECKSTOP: case MDACIOCTL_RESETDEVICE: case MDACIOCTL_FLUSHDEVICEDATA: case MDACIOCTL_PAUSEDEVICE: case MDACIOCTL_UNPAUSEDEVICE: case MDACIOCTL_LOCATEDEVICE: case MDACIOCTL_SETMASTERSLAVEMODE: case MDACIOCTL_DELETERAIDDEV: case MDACIOCTL_REPLACEINTERNALDEV: case MDACIOCTL_CLEARCONF: case MDACIOCTL_GETCONTROLLERPARAMETER: case MDACIOCTL_SETCONTRLLERPARAMETER: case MDACIOCTL_CLEARCONFSUSPMODE: case MDACIOCTL_STOREIMAGE: case MDACIOCTL_READIMAGE: case MDACIOCTL_FLASHIMAGES: case MDACIOCTL_RENAMERAIDDEV: default: /* no idea what to print */ transfer = 0; break; } break; case MDACMD_IOCTLCHECK: case MDACMD_MEMCOPY: default: transfer = 0; break; /* print nothing */ } if (transfer) { if (ge->command_control.extended_sg_table) { mly_printf(sc, " sg table 0x%llx/%d\n", ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]); } else { mly_printf(sc, " 0000 0x%llx/%lld\n", ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length); mly_printf(sc, " 0001 0x%llx/%lld\n", ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length); } } } /******************************************************************************** * Panic in a slightly informative fashion */ static void mly_panic(struct mly_softc *sc, char *reason) { mly_printstate(sc); panic(reason); } /******************************************************************************** * Print queue statistics, callable from DDB. */ void mly_print_controller(int controller) { struct mly_softc *sc; if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { printf("mly: controller %d invalid\n", controller); } else { device_printf(sc->mly_dev, "queue curr max\n"); device_printf(sc->mly_dev, "free %04d/%04d\n", sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); device_printf(sc->mly_dev, "busy %04d/%04d\n", sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); device_printf(sc->mly_dev, "complete %04d/%04d\n", sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); } } #endif /******************************************************************************** ******************************************************************************** Control device interface ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Accept an open operation on the control device. */ static int mly_user_open(dev_t dev, int flags, int fmt, struct thread *td) { int unit = minor(dev); struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); sc->mly_state |= MLY_STATE_OPEN; return(0); } /******************************************************************************** * Accept the last close on the control device. */ static int mly_user_close(dev_t dev, int flags, int fmt, struct thread *td) { int unit = minor(dev); struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); sc->mly_state &= ~MLY_STATE_OPEN; return (0); } /******************************************************************************** * Handle controller-specific control operations. */ static int mly_user_ioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; struct mly_user_command *uc = (struct mly_user_command *)addr; struct mly_user_health *uh = (struct mly_user_health *)addr; switch(cmd) { case MLYIO_COMMAND: return(mly_user_command(sc, uc)); case MLYIO_HEALTH: return(mly_user_health(sc, uh)); default: return(ENOIOCTL); } } /******************************************************************************** * Execute a command passed in from userspace. * * The control structure contains the actual command for the controller, as well * as the user-space data pointer and data size, and an optional sense buffer * size/pointer. On completion, the data size is adjusted to the command * residual, and the sense buffer size to the size of the returned sense data. * */ static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) { struct mly_command *mc; int error, s; /* allocate a command */ if (mly_alloc_command(sc, &mc)) { error = ENOMEM; goto out; /* XXX Linux version will wait for a command */ } /* handle data size/direction */ mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; if (mc->mc_length > 0) { if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } } if (uc->DataTransferLength > 0) { mc->mc_flags |= MLY_CMD_DATAIN; bzero(mc->mc_data, mc->mc_length); } if (uc->DataTransferLength < 0) { mc->mc_flags |= MLY_CMD_DATAOUT; if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) goto out; } /* copy the controller command */ bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); /* clear command completion handler so that we get woken up */ mc->mc_complete = NULL; /* execute the command */ if ((error = mly_start(mc)) != 0) goto out; s = splcam(); while (!(mc->mc_flags & MLY_CMD_COMPLETE)) tsleep(mc, PRIBIO, "mlyioctl", 0); splx(s); /* return the data to userspace */ if (uc->DataTransferLength > 0) if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) goto out; /* return the sense buffer to userspace */ if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, min(uc->RequestSenseLength, mc->mc_sense))) != 0) goto out; } /* return command results to userspace (caller will copy out) */ uc->DataTransferLength = mc->mc_resid; uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); uc->CommandStatus = mc->mc_status; error = 0; out: if (mc->mc_data != NULL) free(mc->mc_data, M_DEVBUF); if (mc != NULL) mly_release_command(mc); return(error); } /******************************************************************************** * Return health status to userspace. If the health change index in the user * structure does not match that currently exported by the controller, we * return the current status immediately. Otherwise, we block until either * interrupted or new status is delivered. */ static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) { struct mly_health_status mh; int error, s; /* fetch the current health status from userspace */ if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) return(error); /* spin waiting for a status update */ s = splcam(); error = EWOULDBLOCK; while ((error != 0) && (sc->mly_event_change == mh.change_counter)) error = tsleep(&sc->mly_event_change, PRIBIO | PCATCH, "mlyhealth", 0); splx(s); /* copy the controller's health status buffer out (there is a race here if it changes again) */ error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer)); return(error); } Index: head/sys/kern/subr_taskqueue.c =================================================================== --- head/sys/kern/subr_taskqueue.c (revision 85520) +++ head/sys/kern/subr_taskqueue.c (revision 85521) @@ -1,206 +1,252 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include -#include -#include #include +#include #include -#include +#include #include #include +#include +#include static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues"); static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues; static void *taskqueue_ih; +static struct mtx taskqueue_queues_mutex; struct taskqueue { STAILQ_ENTRY(taskqueue) tq_link; STAILQ_HEAD(, task) tq_queue; const char *tq_name; taskqueue_enqueue_fn tq_enqueue; void *tq_context; int tq_draining; + struct mtx tq_mutex; }; +static void init_taskqueue_list(void *data); + +static void +init_taskqueue_list(void *data __unused) +{ + + mtx_init(&taskqueue_queues_mutex, "taskqueue list", MTX_DEF); + STAILQ_INIT(&taskqueue_queues); +} +SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list, + NULL); + +void +task_init(struct task *task, int priority, task_fn_t *func, void *context) +{ + + KASSERT(task != NULL, ("task == NULL")); + + mtx_init(&task->ta_mutex, "task", MTX_DEF); + mtx_lock(&task->ta_mutex); + task->ta_pending = 0; + task->ta_priority = priority; + task->ta_func = func; + task->ta_context = context; + mtx_unlock(&task->ta_mutex); +} + +void +task_destroy(struct task *task) +{ + + mtx_destroy(&task->ta_mutex); +} + struct taskqueue * taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { struct taskqueue *queue; - static int once = 1; - int s; - queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags); + queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO); if (!queue) return 0; + STAILQ_INIT(&queue->tq_queue); queue->tq_name = name; queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_draining = 0; + mtx_init(&queue->tq_mutex, "taskqueue", MTX_DEF); - s = splhigh(); - if (once) { - STAILQ_INIT(&taskqueue_queues); - once = 0; - } + mtx_lock(&taskqueue_queues_mutex); STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link); - splx(s); + mtx_unlock(&taskqueue_queues_mutex); return queue; } void taskqueue_free(struct taskqueue *queue) { - int s = splhigh(); + + mtx_lock(&queue->tq_mutex); queue->tq_draining = 1; - splx(s); + mtx_unlock(&queue->tq_mutex); taskqueue_run(queue); - s = splhigh(); + mtx_lock(&taskqueue_queues_mutex); STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link); - splx(s); + mtx_unlock(&taskqueue_queues_mutex); + mtx_destroy(&queue->tq_mutex); free(queue, M_TASKQUEUE); } +/* + * Returns with the taskqueue locked. + */ struct taskqueue * taskqueue_find(const char *name) { struct taskqueue *queue; - int s; - s = splhigh(); - STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) + mtx_lock(&taskqueue_queues_mutex); + STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) { + mtx_lock(&queue->tq_mutex); if (!strcmp(queue->tq_name, name)) { - splx(s); + mtx_unlock(&taskqueue_queues_mutex); return queue; } - splx(s); + mtx_unlock(&queue->tq_mutex); + } + mtx_unlock(&taskqueue_queues_mutex); return 0; } int taskqueue_enqueue(struct taskqueue *queue, struct task *task) { struct task *ins; struct task *prev; - int s = splhigh(); - /* * Don't allow new tasks on a queue which is being freed. */ + mtx_lock(&queue->tq_mutex); if (queue->tq_draining) { - splx(s); + mtx_unlock(&queue->tq_mutex); return EPIPE; } /* * Count multiple enqueues. */ + mtx_lock(&task->ta_mutex); if (task->ta_pending) { task->ta_pending++; - splx(s); + mtx_unlock(&task->ta_mutex); + mtx_unlock(&queue->tq_mutex); return 0; } /* * Optimise the case when all tasks have the same priority. */ prev = STAILQ_LAST(&queue->tq_queue, task, ta_link); if (!prev || prev->ta_priority >= task->ta_priority) { STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link); } else { prev = 0; for (ins = STAILQ_FIRST(&queue->tq_queue); ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link)) if (ins->ta_priority < task->ta_priority) break; if (prev) STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link); else STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link); } task->ta_pending = 1; + mtx_unlock(&task->ta_mutex); + if (queue->tq_enqueue) queue->tq_enqueue(queue->tq_context); - - splx(s); - + mtx_unlock(&queue->tq_mutex); return 0; } void taskqueue_run(struct taskqueue *queue) { - int s; struct task *task; + task_fn_t *saved_func; + void *arg; int pending; - s = splhigh(); + mtx_lock(&queue->tq_mutex); while (STAILQ_FIRST(&queue->tq_queue)) { /* * Carefully remove the first task from the queue and * zero its pending count. */ task = STAILQ_FIRST(&queue->tq_queue); + mtx_lock(&task->ta_mutex); STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); + mtx_unlock(&queue->tq_mutex); pending = task->ta_pending; task->ta_pending = 0; - splx(s); + saved_func = task->ta_func; + arg = task->ta_context; + mtx_unlock(&task->ta_mutex); - task->ta_func(task->ta_context, pending); + saved_func(arg, pending); - s = splhigh(); + mtx_lock(&queue->tq_mutex); } - splx(s); + mtx_unlock(&queue->tq_mutex); } static void taskqueue_swi_enqueue(void *context) { swi_sched(taskqueue_ih, SWI_NOSWITCH); } static void taskqueue_swi_run(void *dummy) { taskqueue_run(taskqueue_swi); } TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0, swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 0, &taskqueue_ih)); Index: head/sys/sys/taskqueue.h =================================================================== --- head/sys/sys/taskqueue.h (revision 85520) +++ head/sys/sys/taskqueue.h (revision 85521) @@ -1,115 +1,123 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_TASKQUEUE_H_ #define _SYS_TASKQUEUE_H_ #ifndef _KERNEL #error "no user-servicable parts inside" #endif #include +#include +#include struct taskqueue; /* * Each task includes a function which is called from * taskqueue_run(). The first argument is taken from the 'ta_context' * field of struct task and the second argument is a count of how many * times the task was enqueued before the call to taskqueue_run(). */ typedef void task_fn_t(void *context, int pending); /* * A notification callback function which is called from * taskqueue_enqueue(). The context argument is given in the call to * taskqueue_create(). This function would normally be used to allow the * queue to arrange to run itself later (e.g., by scheduling a software * interrupt or waking a kernel thread). */ typedef void (*taskqueue_enqueue_fn)(void *context); struct task { STAILQ_ENTRY(task) ta_link; /* link for queue */ - int ta_pending; /* count times queued */ - int ta_priority; /* priority of task in queue */ - task_fn_t *ta_func; /* task handler */ - void *ta_context; /* argument for handler */ + int ta_pending; /* count times queued */ + int ta_priority; /* priority of task in queue */ + task_fn_t *ta_func; /* task handler */ + void *ta_context; /* argument for handler */ + struct mtx ta_mutex; /* lock for each task */ }; +void task_init(struct task *task, int priority, task_fn_t *func, + void *context); +void task_destroy(struct task *task); struct taskqueue *taskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context); int taskqueue_enqueue(struct taskqueue *queue, struct task *task); struct taskqueue *taskqueue_find(const char *name); void taskqueue_free(struct taskqueue *queue); void taskqueue_run(struct taskqueue *queue); /* * Initialise a task structure. */ -#define TASK_INIT(task, priority, func, context) do { \ - (task)->ta_pending = 0; \ - (task)->ta_priority = (priority); \ - (task)->ta_func = (func); \ - (task)->ta_context = (context); \ -} while (0) +#define TASK_INIT(task, priority, func, context) \ + task_init((task), (priority), (func), (context)) + +/* + * Destroy a task structure. + */ +#define TASK_DESTROY(task) \ + task_destroy((task)) /* * Declare a reference to a taskqueue. */ #define TASKQUEUE_DECLARE(name) \ extern struct taskqueue *taskqueue_##name /* * Define and initialise a taskqueue. */ #define TASKQUEUE_DEFINE(name, enqueue, context, init) \ \ struct taskqueue *taskqueue_##name; \ \ static void \ taskqueue_define_##name(void *arg) \ { \ taskqueue_##name = \ taskqueue_create(#name, M_NOWAIT, (enqueue), (context)); \ init; \ } \ \ SYSINIT(taskqueue_##name, SI_SUB_CONFIGURE, SI_ORDER_SECOND, \ taskqueue_define_##name, NULL) \ \ struct __hack /* * This queue is serviced by a software interrupt handler. To enqueue * a task, call taskqueue_enqueue(taskqueue_swi, &task). */ TASKQUEUE_DECLARE(swi); #endif /* !_SYS_TASKQUEUE_H_ */