Index: head/sys/dev/tws/tws.c =================================================================== --- head/sys/dev/tws/tws.c (revision 340417) +++ head/sys/dev/tws/tws.c (revision 340418) @@ -1,908 +1,906 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2010, LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * Support: freebsdraid@lsi.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of the nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver"); int tws_queue_depth = TWS_MAX_REQS; int tws_enable_msi = 0; int tws_enable_msix = 0; /* externs */ extern int tws_cam_attach(struct tws_softc *sc); extern void tws_cam_detach(struct tws_softc *sc); extern int tws_init_ctlr(struct tws_softc *sc); extern boolean tws_ctlr_ready(struct tws_softc *sc); extern void tws_turn_off_interrupts(struct tws_softc *sc); extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_request(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_head(struct tws_softc *sc, u_int8_t q_type ); extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id); extern boolean tws_ctlr_reset(struct tws_softc *sc); extern void tws_intr(void *arg); extern int tws_use_32bit_sgls; struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); int tws_init_connect(struct tws_softc *sc, u_int16_t mc); void tws_send_event(struct tws_softc *sc, u_int8_t event); uint8_t tws_get_state(struct tws_softc *sc); void tws_release_request(struct tws_request *req); /* Function prototypes */ static d_open_t tws_open; static d_close_t tws_close; static d_read_t tws_read; static d_write_t tws_write; extern d_ioctl_t tws_ioctl; static int tws_init(struct tws_softc *sc); static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size); static int tws_init_aen_q(struct tws_softc *sc); static int tws_init_trace_q(struct tws_softc *sc); static int tws_setup_irq(struct tws_softc *sc); int tws_setup_intr(struct tws_softc *sc, int irqs); int tws_teardown_intr(struct tws_softc *sc); /* Character device entry points */ static struct cdevsw tws_cdevsw = { .d_version = D_VERSION, .d_open = tws_open, .d_close = tws_close, .d_read = tws_read, .d_write = tws_write, .d_ioctl = tws_ioctl, .d_name = "tws", }; /* * In the cdevsw routines, we find our softc by using the si_drv1 member * of struct cdev. We set this variable to point to our softc in our * attach routine when we create the /dev entry. */ int tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, oflags); return (0); } int tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, fflag); return (0); } int tws_read(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } int tws_write(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } /* PCI Support Functions */ /* * Compare the device ID of this device against the IDs that this driver * supports. If there is a match, set the description and return success. */ static int tws_probe(device_t dev) { static u_int8_t first_ctlr = 1; if ((pci_get_vendor(dev) == TWS_VENDOR_ID) && (pci_get_device(dev) == TWS_DEVICE_ID)) { device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller"); if (first_ctlr) { printf("LSI 3ware device driver for SAS/SATA storage " "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING); first_ctlr = 0; } return(BUS_PROBE_DEFAULT); } return (ENXIO); } /* Attach function is only called if the probe is successful. */ static int tws_attach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); u_int32_t bar; int error=0,i; /* no tracing yet */ /* Look up our softc and initialize its fields. */ sc->tws_dev = dev; sc->device_id = pci_get_device(dev); sc->subvendor_id = pci_get_subvendor(dev); sc->subdevice_id = pci_get_subdevice(dev); /* Intialize mutexes */ mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); callout_init(&sc->stats_timer, 1); if ( tws_init_trace_q(sc) == FAILURE ) printf("trace init failure\n"); /* send init event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_START); mtx_unlock(&sc->gen_lock); #if _BYTE_ORDER == _BIG_ENDIAN TWS_TRACE(sc, "BIG endian", 0, 0); #endif /* sysctl context setup */ sysctl_ctx_init(&sc->tws_clist); sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if ( sc->tws_oidp == NULL ) { tws_log(sc, SYSCTL_TREE_NODE_ADD); goto attach_fail_1; } SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), OID_AUTO, "driver_version", CTLFLAG_RD, TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); pci_enable_busmaster(dev); bar = pci_read_config(dev, TWS_PCI_BAR0, 4); TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); bar = pci_read_config(dev, TWS_PCI_BAR1, 4); bar = bar & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); /* MFA base address is BAR2 register used for * push mode. Firmware will evatualy move to * pull mode during witch this needs to change */ #ifndef TWS_PULL_MODE_ENABLE sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); sc->mfa_base = sc->mfa_base & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); #endif /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->mfa_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; } sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); #endif /* Allocate and register our interrupt. */ sc->intr_type = TWS_INTx; /* default */ if ( tws_enable_msi ) sc->intr_type = TWS_MSI; if ( tws_setup_irq(sc) == FAILURE ) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_3; } /* * Create a /dev entry for this device. The kernel will assign us * a major number automatically. We use the unit number of this * device as the minor number and name the character device * "tws". */ sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", device_get_unit(dev)); sc->tws_cdev->si_drv1 = sc; if ( tws_init(sc) == FAILURE ) { tws_log(sc, TWS_INIT_FAILURE); goto attach_fail_4; } if ( tws_init_ctlr(sc) == FAILURE ) { tws_log(sc, TWS_CTLR_INIT_FAILURE); goto attach_fail_4; } if ((error = tws_cam_attach(sc))) { tws_log(sc, TWS_CAM_ATTACH); goto attach_fail_4; } /* send init complete event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_COMPLETE); mtx_unlock(&sc->gen_lock); TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); return(0); attach_fail_4: tws_teardown_intr(sc); destroy_dev(sc->tws_cdev); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); attach_fail_3: for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus irq res", 0, 0); } } #ifndef TWS_PULL_MODE_ENABLE attach_fail_2: #endif if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); } attach_fail_1: mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); sysctl_ctx_free(&sc->tws_clist); return (ENXIO); } /* Detach device. */ static int tws_detach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); int i; u_int32_t reg; TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_UNINIT_START); mtx_unlock(&sc->gen_lock); /* needs to disable interrupt before detaching from cam */ tws_turn_off_interrupts(sc); /* clear door bell */ tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4); TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0); sc->obfl_q_overrun = false; tws_init_connect(sc, 1); /* Teardown the state in our softc created in our attach routine. */ /* Disconnect the interrupt handler. */ tws_teardown_intr(sc); /* Release irq resource */ for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus release irq resource", i, sc->irq_res_id[i]); } } if ( sc->intr_type == TWS_MSI ) { pci_release_msi(sc->tws_dev); } tws_cam_detach(sc); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); /* Release memory resource */ if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id); } for ( i=0; i< tws_queue_depth; i++) { if (sc->reqs[i].dma_map) bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map); callout_drain(&sc->reqs[i].timeout); } callout_drain(&sc->stats_timer); free(sc->reqs, M_TWS); free(sc->sense_bufs, M_TWS); free(sc->scan_ccb, M_TWS); if (sc->ioctl_data_mem) bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map); if (sc->data_tag) bus_dma_tag_destroy(sc->data_tag); free(sc->aen_q.q, M_TWS); free(sc->trace_q.q, M_TWS); mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); destroy_dev(sc->tws_cdev); sysctl_ctx_free(&sc->tws_clist); return (0); } int tws_setup_intr(struct tws_softc *sc, int irqs) { int i, error; for(i=0;iintr_handle[i])) { if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i], INTR_TYPE_CAM | INTR_MPSAFE, -#if (__FreeBSD_version >= 700000) NULL, -#endif tws_intr, sc, &sc->intr_handle[i]))) { tws_log(sc, SETUP_INTR_RES); return(FAILURE); } } } return(SUCCESS); } int tws_teardown_intr(struct tws_softc *sc) { int i, error; for(i=0;iirqs;i++) { if (sc->intr_handle[i]) { error = bus_teardown_intr(sc->tws_dev, sc->irq_res[i], sc->intr_handle[i]); sc->intr_handle[i] = NULL; } } return(SUCCESS); } static int tws_setup_irq(struct tws_softc *sc) { int messages; switch(sc->intr_type) { case TWS_INTx : sc->irqs = 1; sc->irq_res_id[0] = 0; sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( ! sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using legacy INTx\n"); break; case TWS_MSI : sc->irqs = 1; sc->irq_res_id[0] = 1; messages = 1; if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) { TWS_TRACE(sc, "pci alloc msi fail", 0, messages); return(FAILURE); } sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( !sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using MSI\n"); break; } return(SUCCESS); } static int tws_init(struct tws_softc *sc) { u_int32_t max_sg_elements; u_int32_t dma_mem_size; int error; u_int32_t reg; sc->seq_id = 0; if ( tws_queue_depth > TWS_MAX_REQS ) tws_queue_depth = TWS_MAX_REQS; if (tws_queue_depth < TWS_RESERVED_REQS+1) tws_queue_depth = TWS_RESERVED_REQS+1; sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false; max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? TWS_MAX_64BIT_SG_ELEMENTS : TWS_MAX_32BIT_SG_ELEMENTS; dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) + (TWS_SECTOR_SIZE) ; if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ max_sg_elements, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->parent_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* In bound message frame requires 16byte alignment. * Outbound MF's can live with 4byte alignment - for now just * use 16 for both. */ if ( bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_IN_MF_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem_size, /* maxsize */ 1, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->cmd_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */ sc->dma_mem_phys=0; error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, dma_mem_size, tws_dmamap_cmds_load_cbfn, &sc->dma_mem_phys, 0); /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TWS_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TWS_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->io_lock, /* lockfuncarg */ &sc->data_tag /* tag */)) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO); if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) { device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n"); return(ENOMEM); } if ( !tws_ctlr_ready(sc) ) if( !tws_ctlr_reset(sc) ) return(FAILURE); bzero(&sc->stats, sizeof(struct tws_stats)); tws_init_qs(sc); tws_turn_off_interrupts(sc); /* * enable pull mode by setting bit1 . * setting bit0 to 1 will enable interrupt coalesing * will revisit. */ #ifdef TWS_PULL_MODE_ENABLE reg = tws_read_reg(sc, TWS_I2O0_CTL, 4); TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL); tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4); #endif TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL); if ( tws_init_reqs(sc, dma_mem_size) == FAILURE ) return(FAILURE); if ( tws_init_aen_q(sc) == FAILURE ) return(FAILURE); return(SUCCESS); } static int tws_init_aen_q(struct tws_softc *sc) { sc->aen_q.head=0; sc->aen_q.tail=0; sc->aen_q.depth=256; sc->aen_q.overflow=0; sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth, M_TWS, M_WAITOK | M_ZERO); return(SUCCESS); } static int tws_init_trace_q(struct tws_softc *sc) { sc->trace_q.head=0; sc->trace_q.tail=0; sc->trace_q.depth=256; sc->trace_q.overflow=0; sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth, M_TWS, M_WAITOK | M_ZERO); return(SUCCESS); } static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size) { struct tws_command_packet *cmd_buf; cmd_buf = (struct tws_command_packet *)sc->dma_mem; int i; bzero(cmd_buf, dma_mem_size); TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0); mtx_lock(&sc->q_lock); for ( i=0; i< tws_queue_depth; i++) { if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) { /* log a ENOMEM failure msg here */ mtx_unlock(&sc->q_lock); return(FAILURE); } sc->reqs[i].cmd_pkt = &cmd_buf[i]; sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ; sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys + (i * sizeof(struct tws_command_packet)); sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys + sizeof(struct tws_command_header) + (i * sizeof(struct tws_command_packet)); sc->reqs[i].request_id = i; sc->reqs[i].sc = sc; sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128; callout_init(&sc->reqs[i].timeout, 1); sc->reqs[i].state = TWS_REQ_STATE_FREE; if ( i >= TWS_RESERVED_REQS ) tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q); } mtx_unlock(&sc->q_lock); return(SUCCESS); } static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error) { /* printf("command load done \n"); */ *((bus_addr_t *)arg) = segs[0].ds_addr; } void tws_send_event(struct tws_softc *sc, u_int8_t event) { mtx_assert(&sc->gen_lock, MA_OWNED); TWS_TRACE_DEBUG(sc, "received event ", 0, event); switch (event) { case TWS_INIT_START: sc->tws_state = TWS_INIT; break; case TWS_INIT_COMPLETE: if (sc->tws_state != TWS_INIT) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state); } else { sc->tws_state = TWS_ONLINE; } break; case TWS_RESET_START: /* We can transition to reset state from any state except reset*/ if (sc->tws_state != TWS_RESET) { sc->tws_prev_state = sc->tws_state; sc->tws_state = TWS_RESET; } break; case TWS_RESET_COMPLETE: if (sc->tws_state != TWS_RESET) { device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state); } else { sc->tws_state = sc->tws_prev_state; } break; case TWS_SCAN_FAILURE: if (sc->tws_state != TWS_ONLINE) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state); } else { sc->tws_state = TWS_OFFLINE; } break; case TWS_UNINIT_START: if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state); } else { sc->tws_state = TWS_UNINIT; } break; } } uint8_t tws_get_state(struct tws_softc *sc) { return((u_int8_t)sc->tws_state); } /* Called during system shutdown after sync. */ static int tws_shutdown(device_t dev) { struct tws_softc *sc = device_get_softc(dev); TWS_TRACE_DEBUG(sc, "entry", 0, 0); tws_turn_off_interrupts(sc); tws_init_connect(sc, 1); return (0); } /* * Device suspend routine. */ static int tws_suspend(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } /* * Device resume routine. */ static int tws_resume(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } struct tws_request * tws_get_request(struct tws_softc *sc, u_int16_t type) { struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock); struct tws_request *r = NULL; mtx_lock(my_mutex); if (type == TWS_REQ_TYPE_SCSI_IO) { r = tws_q_remove_head(sc, TWS_FREE_Q); } else { if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) { r = &sc->reqs[type]; } } if ( r ) { bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache)); r->data = NULL; r->length = 0; r->type = type; r->flags = TWS_DIR_UNKNOWN; r->error_code = TWS_REQ_RET_INVALID; r->cb = NULL; r->ccb_ptr = NULL; callout_stop(&r->timeout); r->next = r->prev = NULL; r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY); } mtx_unlock(my_mutex); return(r); } void tws_release_request(struct tws_request *req) { struct tws_softc *sc = req->sc; TWS_TRACE_DEBUG(sc, "entry", sc, 0); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } static device_method_t tws_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tws_probe), DEVMETHOD(device_attach, tws_attach), DEVMETHOD(device_detach, tws_detach), DEVMETHOD(device_shutdown, tws_shutdown), DEVMETHOD(device_suspend, tws_suspend), DEVMETHOD(device_resume, tws_resume), DEVMETHOD_END }; static driver_t tws_driver = { "tws", tws_methods, sizeof(struct tws_softc) }; static devclass_t tws_devclass; /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */ DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0); MODULE_DEPEND(tws, cam, 1, 1, 1); MODULE_DEPEND(tws, pci, 1, 1, 1); TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth); TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi); Index: head/sys/dev/tws/tws_cam.c =================================================================== --- head/sys/dev/tws/tws_cam.c (revision 340417) +++ head/sys/dev/tws/tws_cam.c (revision 340418) @@ -1,1317 +1,1306 @@ /* * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS); static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"}; static void tws_action(struct cam_sim *sim, union ccb *ccb); static void tws_poll(struct cam_sim *sim); static void tws_scsi_complete(struct tws_request *req); void tws_unmap_request(struct tws_softc *sc, struct tws_request *req); int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req); int tws_bus_scan(struct tws_softc *sc); int tws_cam_attach(struct tws_softc *sc); void tws_cam_detach(struct tws_softc *sc); void tws_reset(void *arg); static void tws_reset_cb(void *arg); static void tws_reinit(void *arg); static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb); static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req); static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, u_int16_t num_sgl_entries); static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa); static void tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr); static void tws_passthru_err_complete(struct tws_request *req, struct tws_command_header *hdr); void tws_timeout(void *arg); static void tws_intr_attn_aen(struct tws_softc *sc); static void tws_intr_attn_error(struct tws_softc *sc); static void tws_intr_resp(struct tws_softc *sc); void tws_intr(void *arg); void tws_cmd_complete(struct tws_request *req); void tws_aen_complete(struct tws_request *req); int tws_send_scsi_cmd(struct tws_softc *sc, int cmd); void tws_getset_param_complete(struct tws_request *req); int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data); int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data); extern struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); extern void *tws_release_request(struct tws_request *req); extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req); extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id, u_int64_t *mfa); extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request * tws_q_remove_request(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern void tws_send_event(struct tws_softc *sc, u_int8_t event); extern struct tws_sense * tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa); extern void tws_fetch_aen(void *arg); extern void tws_disable_db_intr(struct tws_softc *sc); extern void tws_enable_db_intr(struct tws_softc *sc); extern void tws_passthru_complete(struct tws_request *req); extern void tws_aen_synctime_with_host(struct tws_softc *sc); extern void tws_circular_aenq_insert(struct tws_softc *sc, struct tws_circular_q *cq, struct tws_event_packet *aen); extern int tws_use_32bit_sgls; extern boolean tws_ctlr_reset(struct tws_softc *sc); extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc, u_int8_t q_type ); extern void tws_turn_off_interrupts(struct tws_softc *sc); extern void tws_turn_on_interrupts(struct tws_softc *sc); extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc); extern void tws_init_obfl_q(struct tws_softc *sc); extern uint8_t tws_get_state(struct tws_softc *sc); extern void tws_assert_soft_reset(struct tws_softc *sc); extern boolean tws_ctlr_ready(struct tws_softc *sc); extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa); extern int tws_setup_intr(struct tws_softc *sc, int irqs); extern int tws_teardown_intr(struct tws_softc *sc); int tws_cam_attach(struct tws_softc *sc) { struct cam_devq *devq; TWS_TRACE_DEBUG(sc, "entry", 0, sc); /* Create a device queue for sim */ /* * if the user sets cam depth to less than 1 * cam may get confused */ if ( tws_cam_depth < 1 ) tws_cam_depth = 1; if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) ) tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS; TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth); if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) { tws_log(sc, CAM_SIMQ_ALLOC); return(ENOMEM); } /* * Create a SIM entry. Though we can support tws_cam_depth * simultaneous requests, we claim to be able to handle only * (tws_cam_depth), so that we always have reserved requests * packet available to service ioctls and internal commands. */ sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc, device_get_unit(sc->tws_dev), -#if (__FreeBSD_version >= 700000) &sc->sim_lock, -#endif tws_cam_depth, 1, devq); /* 1, 1, devq); */ if (sc->sim == NULL) { cam_simq_free(devq); tws_log(sc, CAM_SIM_ALLOC); } /* Register the bus. */ mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim, -#if (__FreeBSD_version >= 700000) sc->tws_dev, -#endif 0) != CAM_SUCCESS) { cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */ sc->sim = NULL; /* so cam_detach will not try to free it */ mtx_unlock(&sc->sim_lock); tws_log(sc, TWS_XPT_BUS_REGISTER); return(ENXIO); } if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim)); /* Passing TRUE to cam_sim_free will free the devq as well. */ cam_sim_free(sc->sim, TRUE); tws_log(sc, TWS_XPT_CREATE_PATH); mtx_unlock(&sc->sim_lock); return(ENXIO); } mtx_unlock(&sc->sim_lock); return(0); } void tws_cam_detach(struct tws_softc *sc) { TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->sim_lock); if (sc->path) xpt_free_path(sc->path); if (sc->sim) { xpt_bus_deregister(cam_sim_path(sc->sim)); cam_sim_free(sc->sim, TRUE); } mtx_unlock(&sc->sim_lock); } int tws_bus_scan(struct tws_softc *sc) { union ccb *ccb; TWS_TRACE_DEBUG(sc, "entry", sc, 0); if (!(sc->sim)) return(ENXIO); ccb = xpt_alloc_ccb(); mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock(&sc->sim_lock); xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); mtx_unlock(&sc->sim_lock); return(0); } static void tws_action(struct cam_sim *sim, union ccb *ccb) { struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); switch( ccb->ccb_h.func_code ) { case XPT_SCSI_IO: { if ( tws_execute_scsi(sc, ccb) ) TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0); break; } case XPT_ABORT: { TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0); ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb); break; } case XPT_SET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb); -#if (__FreeBSD_version >= 700000 ) ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; -#else - ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID); - ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); -#endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb, ccb->ccg.block_size); cam_calc_geometry(&ccb->ccg, 1/* extended */); xpt_done(ccb); break; } case XPT_PATH_INQ: { TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb); ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; ccb->cpi.hba_misc = 0; ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_target = TWS_MAX_NUM_UNITS; ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 6000000; strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN); strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); -#if (__FreeBSD_version >= 700000 ) ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; ccb->cpi.maxio = TWS_MAX_IO_SIZE; -#endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: TWS_TRACE_DEBUG(sc, "default", sim, ccb); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void tws_scsi_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; mtx_lock(&sc->q_lock); tws_q_remove_request(sc, req, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); callout_stop(&req->timeout); tws_unmap_request(req->sc, req); req->ccb_ptr->ccb_h.status = CAM_REQ_CMP; mtx_lock(&sc->sim_lock); xpt_done(req->ccb_ptr); mtx_unlock(&sc->sim_lock); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } void tws_getset_param_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id); callout_stop(&req->timeout); tws_unmap_request(sc, req); free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; } void tws_aen_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; struct tws_command_header *sense; struct tws_event_packet event; u_int16_t aen_code=0; TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id); callout_stop(&req->timeout); tws_unmap_request(sc, req); sense = (struct tws_command_header *)req->data; TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0], sense->sense_data[2]); TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id, sense->status_block.res__severity); TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum, sense->status_block.error); TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header, sense->header_desc.size_sense); aen_code = sense->status_block.error; switch ( aen_code ) { case TWS_AEN_SYNC_TIME_WITH_HOST : tws_aen_synctime_with_host(sc); break; case TWS_AEN_QUEUE_EMPTY : break; default : bzero(&event, sizeof(struct tws_event_packet)); event.sequence_id = sc->seq_id; event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME; event.aen_code = sense->status_block.error; event.severity = sense->status_block.res__severity & 0x7; event.event_src = TWS_SRC_CTRL_EVENT; strcpy(event.severity_str, tws_sev_str[event.severity]); event.retrieved = TWS_AEN_NOT_RETRIEVED; bcopy(sense->err_specific_desc, event.parameter_data, TWS_ERROR_SPECIFIC_DESC_LEN); event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0'; event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1; if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) { event.parameter_len += ((u_int8_t)strlen(event.parameter_data + event.parameter_len) + 1); } device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n", event.severity_str, event.event_src, event.aen_code, event.parameter_data + (strlen(event.parameter_data) + 1), event.parameter_data); mtx_lock(&sc->gen_lock); tws_circular_aenq_insert(sc, &sc->aen_q, &event); sc->seq_id++; mtx_unlock(&sc->gen_lock); break; } free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; if ( aen_code != TWS_AEN_QUEUE_EMPTY ) { /* timeout(tws_fetch_aen, sc, 1);*/ sc->stats.num_aens++; tws_fetch_aen((void *)sc); } } void tws_cmd_complete(struct tws_request *req) { struct tws_softc *sc = req->sc; callout_stop(&req->timeout); tws_unmap_request(sc, req); } static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa) { struct tws_command_header *hdr; struct tws_sense *sen; struct tws_request *req; u_int16_t req_id; u_int32_t reg, status; if ( !mfa ) { TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa); return; } else { /* lookup the sense */ sen = tws_find_sense_from_mfa(sc, mfa); if ( sen == NULL ) { TWS_TRACE_DEBUG(sc, "found null req", 0, mfa); return; } hdr = sen->hdr; TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr); req_id = hdr->header_desc.request_id; req = &sc->reqs[req_id]; TWS_TRACE_DEBUG(sc, "req, id", req, req_id); if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS ) TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code); } switch (req->type) { case TWS_REQ_TYPE_PASSTHRU : tws_passthru_err_complete(req, hdr); break; case TWS_REQ_TYPE_GETSET_PARAM : tws_getset_param_complete(req); break; case TWS_REQ_TYPE_SCSI_IO : tws_scsi_err_complete(req, hdr); break; } mtx_lock(&sc->io_lock); hdr->header_desc.size_header = 128; reg = (u_int32_t)( mfa>>32); tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4); reg = (u_int32_t)(mfa); tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4); status = tws_read_reg(sc, TWS_I2O0_STATUS, 4); if ( status & TWS_BIT13 ) { device_printf(sc->tws_dev, "OBFL Overrun\n"); sc->obfl_q_overrun = true; } mtx_unlock(&sc->io_lock); } static void tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr) { u_int8_t *sense_data; struct tws_softc *sc = req->sc; union ccb *ccb = req->ccb_ptr; TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error, req->cmd_pkt->cmd.pkt_a.status); if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED || hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) { if ( ccb->ccb_h.target_lun ) { TWS_TRACE_DEBUG(sc, "invalid lun error",0,0); ccb->ccb_h.status |= CAM_DEV_NOT_THERE; } else { TWS_TRACE_DEBUG(sc, "invalid target error",0,0); ccb->ccb_h.status |= CAM_SEL_TIMEOUT; } } else { TWS_TRACE_DEBUG(sc, "scsi status error",0,0); ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) && (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; TWS_TRACE_DEBUG(sc, "page mode not supported",0,0); } } /* if there were no error simply mark complete error */ if (ccb->ccb_h.status == 0) ccb->ccb_h.status = CAM_REQ_CMP_ERR; sense_data = (u_int8_t *)&ccb->csio.sense_data; if (sense_data) { memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH ); ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mtx_lock(&sc->sim_lock); xpt_done(ccb); mtx_unlock(&sc->sim_lock); callout_stop(&req->timeout); tws_unmap_request(req->sc, req); mtx_lock(&sc->q_lock); tws_q_remove_request(sc, req, TWS_BUSY_Q); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } static void tws_passthru_err_complete(struct tws_request *req, struct tws_command_header *hdr) { TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id); req->error_code = hdr->status_block.error; memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header)); tws_passthru_complete(req); } static void tws_drain_busy_queue(struct tws_softc *sc) { struct tws_request *req; union ccb *ccb; TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->q_lock); req = tws_q_remove_tail(sc, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); while ( req ) { TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id); callout_stop(&req->timeout); req->error_code = TWS_REQ_RET_RESET; ccb = (union ccb *)(req->ccb_ptr); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; ccb->ccb_h.status |= CAM_SCSI_BUS_RESET; tws_unmap_request(req->sc, req); mtx_lock(&sc->sim_lock); xpt_done(req->ccb_ptr); mtx_unlock(&sc->sim_lock); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); req = tws_q_remove_tail(sc, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); } } static void tws_drain_reserved_reqs(struct tws_softc *sc) { struct tws_request *r; r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH]; if ( r->state != TWS_REQ_STATE_FREE ) { TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0); callout_stop(&r->timeout); tws_unmap_request(sc, r); free(r->data, M_TWS); r->state = TWS_REQ_STATE_FREE; r->error_code = TWS_REQ_RET_RESET; } r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU]; if ( r->state == TWS_REQ_STATE_BUSY ) { TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0); r->error_code = TWS_REQ_RET_RESET; } r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM]; if ( r->state != TWS_REQ_STATE_FREE ) { TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0); callout_stop(&r->timeout); tws_unmap_request(sc, r); free(r->data, M_TWS); r->state = TWS_REQ_STATE_FREE; r->error_code = TWS_REQ_RET_RESET; } } static void tws_drain_response_queue(struct tws_softc *sc) { u_int16_t req_id; u_int64_t mfa; while ( tws_get_response(sc, &req_id, &mfa) ); } static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb) { struct tws_command_packet *cmd_pkt; struct tws_request *req; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; u_int16_t lun; mtx_assert(&sc->sim_lock, MA_OWNED); if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) { TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; xpt_done(ccb); return(0); } if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) { TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; xpt_done(ccb); return(0); } if(ccb_h->flags & CAM_CDB_PHYS) { TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); return(0); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO); if ( !req ) { TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return(0); } if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) req->flags |= TWS_DIR_IN; if(ccb_h->flags & CAM_DIR_OUT) req->flags |= TWS_DIR_OUT; } else { req->flags = TWS_DIR_NONE; /* no data */ } req->type = TWS_REQ_TYPE_SCSI_IO; req->cb = tws_scsi_complete; cmd_pkt = req->cmd_pkt; /* cmd_pkt->hdr.header_desc.size_header = 128; */ cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; /* lower nibble */ lun = ccb_h->target_lun & 0XF; lun = lun << 12; cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id; /* upper nibble */ lun = ccb_h->target_lun & 0XF0; lun = lun << 8; cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun; #ifdef TWS_DEBUG if ( csio->cdb_len > 16 ) TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len); #endif if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len); req->data = ccb; req->flags |= TWS_DATA_CCB; /* save ccb ptr */ req->ccb_ptr = ccb; /* * tws_map_load_data_callback will fill in the SGL, * and submit the I/O. */ sc->stats.scsi_ios++; callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, tws_timeout, req, 0); error = tws_map_request(sc, req); return(error); } int tws_send_scsi_cmd(struct tws_softc *sc, int cmd) { struct tws_request *req; struct tws_command_packet *cmd_pkt; int error; TWS_TRACE_DEBUG(sc, "entry",sc, cmd); req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH); if ( req == NULL ) return(ENOMEM); req->cb = tws_aen_complete; cmd_pkt = req->cmd_pkt; cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI; cmd_pkt->cmd.pkt_a.status = 0; cmd_pkt->cmd.pkt_a.unit = 0; cmd_pkt->cmd.pkt_a.sgl_offset = 16; cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id; cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd; cmd_pkt->cmd.pkt_a.cdb[4] = 128; req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(ENOMEM); bzero(req->data, TWS_SECTOR_SIZE); req->flags = TWS_DIR_IN; callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req); error = tws_map_request(sc, req); return(error); } int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; int error; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(ENOMEM); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(ENOMEM); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = tws_getset_param_complete; req->flags = TWS_DIR_OUT; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; memcpy(param->data, data, param_size); callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req); error = tws_map_request(sc, req); return(error); } int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id, u_int32_t param_size, void *data) { struct tws_request *req; struct tws_command_packet *cmd_pkt; union tws_command_giga *cmd; struct tws_getset_param *param; u_int16_t reqid; u_int64_t mfa; int error = SUCCESS; req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM); if ( req == NULL ) { TWS_TRACE_DEBUG(sc, "null req", 0, 0); return(FAILURE); } req->length = TWS_SECTOR_SIZE; req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT); if ( req->data == NULL ) return(FAILURE); bzero(req->data, TWS_SECTOR_SIZE); param = (struct tws_getset_param *)req->data; req->cb = NULL; req->flags = TWS_DIR_IN; cmd_pkt = req->cmd_pkt; cmd = &cmd_pkt->cmd.pkt_g; cmd->param.sgl_off__opcode = BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM); cmd->param.request_id = (u_int8_t)req->request_id; cmd->param.host_id__unit = 0; cmd->param.param_count = 1; cmd->param.size = 2; /* map routine will add sgls */ /* Specify which parameter we want to set. */ param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR); param->parameter_id = (u_int8_t)(param_id); param->parameter_size_bytes = (u_int16_t)param_size; error = tws_map_request(sc, req); if (!error) { reqid = tws_poll4_response(sc, &mfa); tws_unmap_request(sc, req); if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) { memcpy(data, param->data, param_size); } else { error = FAILURE; } } free(req->data, M_TWS); req->state = TWS_REQ_STATE_FREE; return(error); } void tws_unmap_request(struct tws_softc *sc, struct tws_request *req) { if (req->data != NULL) { if ( req->flags & TWS_DIR_IN ) bus_dmamap_sync(sc->data_tag, req->dma_map, BUS_DMASYNC_POSTREAD); if ( req->flags & TWS_DIR_OUT ) bus_dmamap_sync(sc->data_tag, req->dma_map, BUS_DMASYNC_POSTWRITE); mtx_lock(&sc->io_lock); bus_dmamap_unload(sc->data_tag, req->dma_map); mtx_unlock(&sc->io_lock); } } int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req) { int32_t error = 0; /* If the command involves data, map that too. */ if (req->data != NULL) { int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); /* * Map the data buffer into bus space and build the SG list. */ mtx_lock(&sc->io_lock); if (req->flags & TWS_DATA_CCB) error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map, req->data, tws_dmamap_data_load_cbfn, req, my_flags); else error = bus_dmamap_load(sc->data_tag, req->dma_map, req->data, req->length, tws_dmamap_data_load_cbfn, req, my_flags); mtx_unlock(&sc->io_lock); if (error == EINPROGRESS) { TWS_TRACE(sc, "in progress", 0, error); tws_freeze_simq(sc, req); error = 0; // EINPROGRESS is not a fatal error. } } else { /* no data involved */ error = tws_submit_command(sc, req); } return(error); } static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct tws_request *req = (struct tws_request *)arg; struct tws_softc *sc = req->sc; u_int16_t sgls = nseg; void *sgl_ptr; struct tws_cmd_generic *gcmd; if ( error ) { TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0); } if ( error == EFBIG ) { TWS_TRACE(sc, "not enough data segs", 0, nseg); req->error_code = error; req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; return; } if ( req->flags & TWS_DIR_IN ) bus_dmamap_sync(req->sc->data_tag, req->dma_map, BUS_DMASYNC_PREREAD); if ( req->flags & TWS_DIR_OUT ) bus_dmamap_sync(req->sc->data_tag, req->dma_map, BUS_DMASYNC_PREWRITE); if ( segs ) { if ( (req->type == TWS_REQ_TYPE_PASSTHRU && GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) != TWS_FW_CMD_EXECUTE_SCSI) || req->type == TWS_REQ_TYPE_GETSET_PARAM) { gcmd = &req->cmd_pkt->cmd.pkt_g.generic; sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size; gcmd->size += sgls * ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 ); tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls); } else { tws_fill_sg_list(req->sc, (void *)segs, (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls); req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ; } } req->error_code = tws_submit_command(req->sc, req); } static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest, u_int16_t num_sgl_entries) { int i; if ( sc->is64bit ) { struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src; if ( !tws_use_32bit_sgls ) { struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS ) TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s->address; sgl_d[i].length = sgl_s->length; sgl_d[i].flag = 0; sgl_d[i].reserved = 0; sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + sizeof(bus_dma_segment_t)); } } else { struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s->address; sgl_d[i].length = sgl_s->length; sgl_d[i].flag = 0; sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) + sizeof(bus_dma_segment_t)); } } } else { struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src; struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest; if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS ) TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0); for (i = 0; i < num_sgl_entries; i++) { sgl_d[i].address = sgl_s[i].address; sgl_d[i].length = sgl_s[i].length; sgl_d[i].flag = 0; } } } void tws_intr(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; u_int32_t histat=0, db=0; if (!(sc)) { device_printf(sc->tws_dev, "null softc!!!\n"); return; } if ( tws_get_state(sc) == TWS_RESET ) { return; } if ( tws_get_state(sc) != TWS_ONLINE ) { return; } sc->stats.num_intrs++; histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4); if ( histat & TWS_BIT2 ) { TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); if ( db & TWS_BIT21 ) { tws_intr_attn_error(sc); return; } if ( db & TWS_BIT18 ) { tws_intr_attn_aen(sc); } } if ( histat & TWS_BIT3 ) { tws_intr_resp(sc); } } static void tws_intr_attn_aen(struct tws_softc *sc) { u_int32_t db=0; /* maskoff db intrs until all the aens are fetched */ /* tws_disable_db_intr(sc); */ tws_fetch_aen((void *)sc); tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); } static void tws_intr_attn_error(struct tws_softc *sc) { u_int32_t db=0; TWS_TRACE(sc, "attn error", 0, 0); tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4); device_printf(sc->tws_dev, "Micro controller error.\n"); tws_reset(sc); } static void tws_intr_resp(struct tws_softc *sc) { u_int16_t req_id; u_int64_t mfa; while ( tws_get_response(sc, &req_id, &mfa) ) { sc->stats.reqs_out++; if ( req_id == TWS_INVALID_REQID ) { TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id); sc->stats.reqs_errored++; tws_err_complete(sc, mfa); continue; } sc->reqs[req_id].cb(&sc->reqs[req_id]); } } static void tws_poll(struct cam_sim *sim) { struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim); TWS_TRACE_DEBUG(sc, "entry", 0, 0); tws_intr((void *) sc); } void tws_timeout(void *arg) { struct tws_request *req = (struct tws_request *)arg; struct tws_softc *sc = req->sc; if ( req->error_code == TWS_REQ_RET_RESET ) { return; } mtx_lock(&sc->gen_lock); if ( req->error_code == TWS_REQ_RET_RESET ) { mtx_unlock(&sc->gen_lock); return; } if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); if (req->type == TWS_REQ_TYPE_SCSI_IO) { device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n"); } else if (req->type == TWS_REQ_TYPE_PASSTHRU) { device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n"); } else { device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n"); } tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); } void tws_reset(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; mtx_lock(&sc->gen_lock); if ( tws_get_state(sc) == TWS_RESET ) { mtx_unlock(&sc->gen_lock); return; } xpt_freeze_simq(sc->sim, 1); tws_send_event(sc, TWS_RESET_START); device_printf(sc->tws_dev, "Resetting controller\n"); tws_assert_soft_reset(sc); tws_turn_off_interrupts(sc); tws_reset_cb( (void*) sc ); tws_reinit( (void*) sc ); // device_printf(sc->tws_dev, "Controller Reset complete!\n"); tws_send_event(sc, TWS_RESET_COMPLETE); mtx_unlock(&sc->gen_lock); xpt_release_simq(sc->sim, 1); } static void tws_reset_cb(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; time_t endt; int found = 0; u_int32_t reg; if ( tws_get_state(sc) != TWS_RESET ) { return; } // device_printf(sc->tws_dev, "Draining Busy Queue\n"); tws_drain_busy_queue(sc); // device_printf(sc->tws_dev, "Draining Reserved Reqs\n"); tws_drain_reserved_reqs(sc); // device_printf(sc->tws_dev, "Draining Response Queue\n"); tws_drain_response_queue(sc); // device_printf(sc->tws_dev, "Looking for controller ready flag...\n"); endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT; while ((TWS_LOCAL_TIME <= endt) && (!found)) { reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4); if ( reg & TWS_BIT13 ) { found = 1; // device_printf(sc->tws_dev, " ... Got it!\n"); } } if ( !found ) device_printf(sc->tws_dev, " ... Controller ready flag NOT found!\n"); } static void tws_reinit(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; int timeout_val=0; int try=2; int done=0; // device_printf(sc->tws_dev, "Waiting for Controller Ready\n"); while ( !done && try ) { if ( tws_ctlr_ready(sc) ) { done = 1; break; } else { timeout_val += 5; if ( timeout_val >= TWS_RESET_TIMEOUT ) { timeout_val = 0; if ( try ) tws_assert_soft_reset(sc); try--; } mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz); } } if (!done) { device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n"); return; } sc->obfl_q_overrun = false; // device_printf(sc->tws_dev, "Sending initConnect\n"); if ( tws_init_connect(sc, tws_queue_depth) ) { TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); } tws_init_obfl_q(sc); tws_turn_on_interrupts(sc); wakeup_one(sc); } static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req) { /* Only for IO commands */ if (req->type == TWS_REQ_TYPE_SCSI_IO) { union ccb *ccb = (union ccb *)(req->ccb_ptr); xpt_freeze_simq(sc->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } } TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth); Index: head/sys/dev/tws/tws_services.h =================================================================== --- head/sys/dev/tws/tws_services.h (revision 340417) +++ head/sys/dev/tws/tws_services.h (revision 340418) @@ -1,142 +1,135 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2010, LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * Support: freebsdraid@lsi.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of the nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* #define TWS_DEBUG on */ void tws_trace(const char *file, const char *fun, int linenum, struct tws_softc *sc, char *desc, u_int64_t val1, u_int64_t val2); void tws_log(struct tws_softc *sc, int index); u_int32_t tws_read_reg(struct tws_softc *sc, int offset, int size); void tws_write_reg(struct tws_softc *sc, int offset, u_int32_t value, int size); u_int16_t tws_swap16(u_int16_t val); u_int32_t tws_swap32(u_int32_t val); u_int64_t tws_swap64(u_int64_t val); void tws_init_qs(struct tws_softc *sc); /* ----------------- trace ----------------- */ #define TWS_TRACE_ON on /* Alawys on - use wisely to trace errors */ #ifdef TWS_DEBUG #define TWS_TRACE_DEBUG_ON on #endif #ifdef TWS_TRACE_DEBUG_ON #define TWS_TRACE_DEBUG(sc, desc, val1, val2) \ tws_trace(__FILE__, __func__, __LINE__, sc, desc, \ (u_int64_t)val1, (u_int64_t)val2) #else #define TWS_TRACE_DEBUG(sc, desc, val1, val2) #endif #ifdef TWS_TRACE_ON #define TWS_TRACE(sc, desc, val1, val2) \ tws_trace(__FILE__, __func__, __LINE__, sc, desc, \ (u_int64_t)val1, (u_int64_t)val2) #else #define TWS_TRACE(sc, desc, val1, val2) #endif /* ---------------- logging ---------------- */ /* ---------------- logging ---------------- */ enum error_index { SYSCTL_TREE_NODE_ADD, PCI_COMMAND_READ, ALLOC_MEMORY_RES, ALLOC_IRQ_RES, SETUP_INTR_RES, TWS_CAM_ATTACH, CAM_SIMQ_ALLOC, CAM_SIM_ALLOC, TWS_XPT_BUS_REGISTER, TWS_XPT_CREATE_PATH, TWS_BUS_SCAN_REQ, TWS_INIT_FAILURE, TWS_CTLR_INIT_FAILURE, }; enum severity { ERROR = 1, WARNING, INFO, #if 0 DEBUG, #endif }; struct error_desc { char desc[256]; u_int32_t error_code; int severity_level; char *fmt; char *error_str; }; /* ----------- q services ------------- */ #define TWS_FREE_Q 0 #define TWS_PENDING_Q 1 #define TWS_BUSY_Q 2 #define TWS_COMPLETE_Q 3 /* req return codes */ #define TWS_REQ_RET_SUBMIT_SUCCESS 0 #define TWS_REQ_RET_PEND_NOMFA 1 #define TWS_REQ_RET_RESET 2 #define TWS_REQ_RET_INVALID 0xdead /* ------------------------ */ -#if (__FreeBSD_version >= 700000) #include #define TWS_LOCAL_TIME (time_second - utc_offset()) -#else -#include -#define TWS_LOCAL_TIME (time_second - (tz_minuteswest * 60) - \ - (wall_cmos_clock ? adjkerntz : 0)) -#endif -