Index: head/sys/dev/arcmsr/arcmsr.c =================================================================== --- head/sys/dev/arcmsr/arcmsr.c (revision 227911) +++ head/sys/dev/arcmsr/arcmsr.c (revision 227912) @@ -1,3943 +1,3948 @@ /* ***************************************************************************************** ** O.S : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter ** ARCMSR RAID Host adapter ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] ****************************************************************************************** ************************************************************************ ** ** Copyright (c) 2004-2010 ARECA Co. Ltd. ** Erich Chen, Taipei Taiwan All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #define ARCMSR_LOCK_INIT(l, s) mtx_init(l, s, NULL, MTX_DEF) #define ARCMSR_LOCK_DESTROY(l) mtx_destroy(l) #define ARCMSR_LOCK_ACQUIRE(l) mtx_lock(l) #define ARCMSR_LOCK_RELEASE(l) mtx_unlock(l) #define ARCMSR_LOCK_TRY(l) mtx_trylock(l) #define arcmsr_htole32(x) htole32(x) typedef struct mtx arcmsr_lock_t; #else #include #include #include #define ARCMSR_LOCK_INIT(l, s) simple_lock_init(l) #define ARCMSR_LOCK_DESTROY(l) #define ARCMSR_LOCK_ACQUIRE(l) simple_lock(l) #define ARCMSR_LOCK_RELEASE(l) simple_unlock(l) #define ARCMSR_LOCK_TRY(l) simple_lock_try(l) #define arcmsr_htole32(x) (x) typedef struct simplelock arcmsr_lock_t; #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.22 2011-07-04" #include #define SRB_SIZE ((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0) #define ARCMSR_SRBS_POOL_SIZE (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM) /* ************************************************************************** ************************************************************************** */ #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r)) #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d) /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void* arg); static void arcmsr_srb_timeout(void* arg); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), DEVMETHOD_END }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb=dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb==NULL) { return ENXIO; } return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; } return(intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f; } break; } return; } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return FALSE; } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return FALSE; } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries=0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return FALSE; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count!=0); return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; } return; } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim * sim; sim=(struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id=xpt_path_target_id(path); target_lun=xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb * pccb=srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(&pccb->csio.sense_data) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } return; } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } return; } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } return; } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } return; } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb=srb->acb; union ccb * pccb=srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag==1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); #ifdef ARCMSR_DEBUG1 acb->pktReturnCount++; #endif xpt_done(pccb); return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target=srb->pccb->ccb_h.target_id; lun=srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun]==ARECA_RAID_GONE) { acb->devstate[target][lun]=ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun]==ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun]=ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun]=ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknow error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void* arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target=srb->pccb->ccb_h.target_id; lun=srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if(srb->srb_state == ARCMSR_SRB_START) { cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb=phbbmu->done_qbuffer[i])!=0) { phbbmu->done_qbuffer[i]=0; error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i]=0; }/*drain reply FIFO*/ phbbmu->doneq_index=0; phbbmu->postq_index=0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; } return; } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { srb->srb_state=ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount=0; acb->workingsrb_doneindex=0; acb->workingsrb_startindex=0; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount = 0; acb->pktReturnCount = 0; #endif return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb * pccb=srb->pccb; struct ccb_scsiio * pcsio= &pccb->csio; u_int32_t arccdbsize=0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus=0; arcmsr_cdb->TargetID=pccb->ccb_h.target_id; arcmsr_cdb->LUN=pccb->ccb_h.target_lun; arcmsr_cdb->Function=1; arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; arcmsr_cdb->Context=0; bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb=srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount=0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op=BUS_DMASYNC_PREREAD; } else { op=BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; srb->srb_flags|=SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0;iaddress=address_lo; pdma_sg->length=length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size=0, tmplength=length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; span4G=(u_int64_t)address_lo + tmplength; pdma_sg->addresshigh=address_hi; pdma_sg->address=address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0=0x100000000-address_lo; pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; address_hi=address_hi+1; address_lo=0; tmplength=tmplength-(u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length=tmplength|IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength=pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size=arccdbsize; return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state=ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index=phbbmu->postq_index; ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index]=0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index=index; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size; ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; } return; } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } } return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t * iop_data; int32_t allxfer_len=0; pwbuffer=arcmsr_get_iop_wqbuffer(acb); iop_data=(u_int8_t *)pwbuffer->data; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; allxfer_len++; } pwbuffer->data_len=allxfer_len; /* ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post */ arcmsr_iop_message_wrote(acb); } return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &=~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &=~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; } return; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim * psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; u_int8_t *pQbuffer; u_int8_t *iop_data; int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; /*check this iop data if overflow my rqbuffer*/ rqbuf_lastindex=acb->rqbuf_lastindex; rqbuf_firstindex=acb->rqbuf_firstindex; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data=(u_int8_t *)prbuffer->data; iop_len=prbuffer->data_len; my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); if(my_empty_len>=iop_len) { while(iop_len > 0) { pQbuffer=&acb->rqbuffer[rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); rqbuf_lastindex++; rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */ iop_data++; iop_len--; } acb->rqbuf_lastindex=rqbuf_lastindex; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been read */ } else { acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; } return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int allxfer_len=0; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer=arcmsr_get_iop_wqbuffer(acb); iop_data=(u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) && (allxfer_len<124)) { pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; allxfer_len++; } pwbuffer->data_len=allxfer_len; /* ** push inbound doorbell tell iop driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ arcmsr_iop_message_wrote(acb); } if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } return; } static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); return; } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target= 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference= *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check=(1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target]= *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; /* clear interrupts */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */ if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */ if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index=phbbmu->doneq_index; while((flag_srb=phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index]=0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index=index; /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling=0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); break; } throttling++; } /*drain reply FIFO*/ return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intstatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intstatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell); CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); if(!host_interrupt_status) { /*it must be share irq*/ return; } /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } return; } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void* arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb!=NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD * pcmdmessagefld; u_int32_t retvalue=EINVAL; pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t * pQbuffer; u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) && (allxfer_len<1031)) { /*copy READ QBUFFER to srb*/ pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; memcpy(ptmpQbuffer, pQbuffer, 1); acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER * prbuffer; u_int8_t * iop_data; u_int32_t iop_len; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data=(u_int8_t *)prbuffer->data; iop_len=(u_int32_t)prbuffer->data_len; /*this iop data does no chance to make me overflow again here, so just do it*/ while(iop_len>0) { pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); acb->rqbuf_lastindex++; acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ iop_data++; iop_len--; } arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } pcmdmessagefld->cmdmessage.Length=allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t * pQbuffer; u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; user_len=pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex=acb->wqbuf_lastindex; wqbuf_firstindex=acb->wqbuf_firstindex; if(wqbuf_lastindex!=wqbuf_firstindex) { arcmsr_post_ioctldata2iop(acb); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); if(my_empty_len>=user_len) { while(user_len>0) { /*copy srb data to wqbuffer*/ pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_post_ioctldata2iop(acb); } pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t * pQbuffer=acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex=0; acb->rqbuf_lastindex=0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t * pQbuffer=acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex=0; acb->wqbuf_lastindex=0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t * pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex=0; acb->rqbuf_lastindex=0; acb->wqbuf_firstindex=0; acb->wqbuf_lastindex=0; pQbuffer=acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer=acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t * hello_string="Hello! I am ARCMSR"; u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue=ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return retvalue; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; int mutex; acb = srb->acb; mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); srb->srb_state=ARCMSR_SRB_DONE; srb->srb_flags=0; acb->srbworkingQ[acb->workingsrb_doneindex]=srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb=NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; int mutex; mutex = mtx_owned(&acb->qbuffer_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); workingsrb_doneindex=acb->workingsrb_doneindex; workingsrb_startindex=acb->workingsrb_startindex; srb=acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex!=workingsrb_startindex) { acb->workingsrb_startindex=workingsrb_startindex; } else { srb=NULL; } if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) { struct CMD_MESSAGE_FIELD * pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; /* 4 bytes: Areca io control code */ if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; memcpy(ptmpQbuffer, pQbuffer, 1); acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; u_int8_t *iop_data; int32_t iop_len; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer=arcmsr_get_iop_rqbuffer(acb); iop_data = (u_int8_t *)prbuffer->data; iop_len =(u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); acb->rqbuf_lastindex++; acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } arcmsr_iop_message_read(acb); } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue=ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_post_ioctldata2iop(acb); /* has error report sensedata */ if(&pccb->csio.sense_data) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_post_ioctldata2iop(acb); } } else { /* has error report sensedata */ if(&pccb->csio.sense_data) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t * hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return retvalue; } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; union ccb * pccb; int target, lun; pccb=srb->pccb; target=pccb->ccb_h.target_id; lun=pccb->ccb_h.target_lun; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount++; #endif if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun]==ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; cmd = pccb->csio.cdb_io.cdb_bytes[0]; block_cmd= cmd & 0x0f; if(block_cmd==0x08 || block_cmd==0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status = CAM_REQUEUE_REQ; acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz ) / 1000, arcmsr_srb_timeout, srb); srb->srb_flags |= SRB_FLAG_TIMER_START; } return; } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i=0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount!=0) { /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { if(srb->pccb==abortccb) { srb->srb_state=ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry=0; acb->num_resets++; acb->acb_flags |=ACB_F_BUS_RESET; while(acb->srboutstandingcount!=0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; return; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb * pccb) { pccb->ccb_h.status |= CAM_REQ_CMP; switch (pccb->csio.cdb_io.cdb_bytes[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer=pccb->csio.data_ptr; if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done(pccb); return; } inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) { struct AdapterControlBlock * acb; acb=(struct AdapterControlBlock *) cam_sim_softc(psim); if(acb==NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target=pccb->ccb_h.target_id; if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb=arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; pccb->ccb_h.arcmsr_ccbacb_ptr=acb; srb->pccb=pccb; if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { /* Single buffer */ if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { /* Buffer is virtual */ u_int32_t error, s; s=splsoftvm(); error = bus_dmamap_load(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb->csio.data_ptr , pccb->csio.dxfer_len , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { /* Buffer is physical */ #ifdef PAE panic("arcmsr: CAM_DATA_PHYS not supported"); #else struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr; seg.ds_len = pccb->csio.dxfer_len; arcmsr_execute_srb(srb, &seg, 1, 0); #endif } } else { /* Scatter/gather list */ struct bus_dma_segment *segs; if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { pccb->ccb_h.status |= CAM_PROVIDE_FAIL; xpt_done(pccb); free(srb, M_DEVBUF); return; } segs=(struct bus_dma_segment *)pccb->csio.data_ptr; arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0); } } else { arcmsr_execute_srb(srb, NULL, 0, 0); } break; } case XPT_TARGET_IO: { /* target mode not yet support vendor specific commands. */ pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi= &pccb->cpi; cpi->version_num=1; cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt=0; cpi->hba_misc=0; cpi->hba_eng_cnt=0; cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id=cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number=cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb=pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts= &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->flags = CTS_SPI_FLAGS_DISC_ENB; spi->sync_period=3; spi->sync_offset=32; spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; scsi->valid = CTS_SCSI_VALID_TQ; } #else { cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); cts->sync_period=3; cts->sync_offset=32; cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT; cts->valid=CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } - case XPT_CALC_GEOMETRY: { - struct ccb_calc_geometry *ccg; - u_int32_t size_mb; - u_int32_t secs_per_cylinder; - + case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } +#if __FreeBSD_version >= 500000 + cam_calc_geometry(&pccb->ccg, 1); +#else + { + struct ccb_calc_geometry *ccg; + u_int32_t size_mb; + u_int32_t secs_per_cylinder; + ccg= &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads=255; ccg->secs_per_track=63; } else { ccg->heads=64; ccg->secs_per_track=32; } secs_per_cylinder=ccg->heads * ccg->secs_per_track; ccg->cylinders=ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; + } +#endif xpt_done(pccb); break; - } default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; } return; } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport))==0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb=(struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ return; } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index=phbbmu->doneq_index; if((flag_srb=phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index]=0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index=index; /* check if command done with no error*/ srb=(struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ return; } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb==poll_srb) ? 1:0; if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) { if(srb->srb_state==ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model=acb->firm_model; char *acb_firm_version=acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i=0; while(i<8) { *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i<16) { *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i<16) { *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); printf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; } return; } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr; // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16); srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32!=0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu=(struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index=0; phbbmu->doneq_index=0; CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32!=0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; } return TRUE; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: break; case ACB_ADAPTER_TYPE_B: { CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |=ACB_F_IOP_INITED; return; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb=arg; struct CommandControlBlock *srb_tmp; u_int8_t * dma_memptr; u_int32_t i; unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; dma_memptr=acb->uncacheptr; acb->srb_phyaddr.phyaddr=srb_phyaddr; srb_tmp=(struct CommandControlBlock *)dma_memptr; for(i=0;idm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5); srb_tmp->acb=acb; acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; srb_phyaddr=srb_phyaddr+SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE); } acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; return; } /* ************************************************************************ ** ** ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); return; } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb=device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; switch (pci_get_devid(dev)) { case PCIDevVenIDARC1880: { acb->adapter_type=ACB_ADAPTER_TYPE_C; max_coherent_size=ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type=ACB_ADAPTER_TYPE_B; max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type=ACB_ADAPTER_TYPE_A; max_coherent_size=ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } if(bus_dma_tag_create( /*parent*/ NULL, /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->qbuffer_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command=pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster/Mem */ pci_command |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0=PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu=(struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; for(i=0; i<2; i++) { if(i==0) { acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE); } else { acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i]==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb=(struct CommandControlBlock *)acb->uncacheptr; // acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM]; acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu=(struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1]; } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0=PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0==0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu=(struct MessageUnit_UNION *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0;idevstate[i][j]=ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid=0; irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres=irqres; acb->pci_dev=dev; acb->pci_unit=unit; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code=XPT_SASYNC_CB; csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback=arcmsr_async; csa.callback_arg=acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); /* Create the control device. */ acb->ioctl_dev=make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1=acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return 0; } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; static char buf[256]; char x_type[]={"X-TYPE"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } switch(id=pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: type = "SAS 6G"; break; default: type = x_type; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n", type, raid6 ? "(RAID6 capable)" : ""); device_set_desc_copy(dev, buf); return 0; } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); /* disable all outbound interrupt */ intmask_org=arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount!=0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0;ipsrb_pool[i]; if(srb->srb_state==ARCMSR_SRB_START) { srb->srb_state=ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount=0; acb->workingsrb_doneindex=0; acb->workingsrb_startindex=0; #ifdef ARCMSR_DEBUG1 acb->pktRequestCount = 0; acb->pktReturnCount = 0; #endif ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: head/sys/dev/hptiop/hptiop.c =================================================================== --- head/sys/dev/hptiop/hptiop.c (revision 227911) +++ head/sys/dev/hptiop/hptiop.c (revision 227912) @@ -1,2209 +1,2213 @@ /* * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #if (__FreeBSD_version >= 500000) #include #include #else #include #endif #include #include #include #include #include #if (__FreeBSD_version >= 500000) #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #if (__FreeBSD_version >= 500000) #include #include #else #include #include #endif #if (__FreeBSD_version <= 500043) #include #endif #include #include #include #include #include #include #include #include #if (__FreeBSD_version < 500043) #include #endif #include static char driver_name[] = "hptiop"; static char driver_version[] = "v1.3 (010208)"; static devclass_t hptiop_devclass; static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec); static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_rescan_bus(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); static int hptiop_get_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); static int hptiop_probe(device_t dev); static int hptiop_attach(device_t dev); static int hptiop_detach(device_t dev); static int hptiop_shutdown(device_t dev); static void hptiop_action(struct cam_sim *sim, union ccb *ccb); static void hptiop_poll(struct cam_sim *sim); static void hptiop_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hptiop_pci_intr(void *arg); static void hptiop_release_resource(struct hpt_iop_hba *hba); static int hptiop_reset_adapter(struct hpt_iop_hba *hba); static d_open_t hptiop_open; static d_close_t hptiop_close; static d_ioctl_t hptiop_ioctl; static struct cdevsw hptiop_cdevsw = { .d_open = hptiop_open, .d_close = hptiop_close, .d_ioctl = hptiop_ioctl, .d_name = driver_name, #if __FreeBSD_version>=503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif #if __FreeBSD_version<600034 #if __FreeBSD_version>=501000 .d_maj = MAJOR_AUTO, #else .d_maj = HPT_DEV_MAJOR, #endif #endif }; #if __FreeBSD_version < 503000 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1) #else #define hba_from_dev(dev) \ ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) #endif #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) static int hptiop_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); if (hba==NULL) return ENXIO; if (hba->flag & HPT_IOCTL_FLAG_OPEN) return EBUSY; hba->flag |= HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int flags, ioctl_thread_t proc) { int ret = EFAULT; struct hpt_iop_hba *hba = hba_from_dev(dev); #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif switch (cmd) { case HPT_DO_IOCONTROL: ret = hba->ops->do_ioctl(hba, (struct hpt_iop_ioctl_param *)data); break; case HPT_SCAN_BUS: ret = hptiop_rescan_bus(hba); break; } #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif return ret; } static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) { u_int64_t p; u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); if (outbound_tail != outbound_head) { bus_space_read_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, outbound_q[outbound_tail]), (u_int32_t *)&p, 2); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); return p; } else return 0; } static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) { u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); u_int32_t head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; bus_space_write_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), (u_int32_t *)&p, 2); BUS_SPACE_WRT4_MV2(inbound_head, head); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); } static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MV2(inbound_msg, msg); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); BUS_SPACE_RD4_MV0(outbound_intmask); } static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) { u_int32_t req=0; int i; for (i = 0; i < millisec; i++) { req = BUS_SPACE_RD4_ITL(inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; DELAY(1000); } if (req!=IOPMU_QUEUE_EMPTY) { BUS_SPACE_WRT4_ITL(outbound_queue, req); BUS_SPACE_RD4_ITL(outbound_intstatus); return 0; } return -1; } static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, u_int32_t index) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req=0; union ccb *ccb; u_int8_t *cdb; u_int32_t result, temp, dxfer; u_int64_t temp64; if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { srb = hba->srb[index & ~(u_int32_t) (IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; req = (struct hpt_iop_request_scsi_command *)srb; if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) result = IOP_RESULT_SUCCESS; else result = req->header.result; } else { srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; result = req->header.result; } dxfer = req->dataxfer_length; goto srb_complete; } /*iop req*/ temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, type)); result = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, result)); switch(temp) { case IOP_REQUEST_TYPE_IOCTL_COMMAND: { temp64 = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); wakeup((void *)((unsigned long)hba->u.itl.mu + index)); break; } case IOP_REQUEST_TYPE_SCSI_COMMAND: bus_space_read_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); srb = (struct hpt_iop_srb *)(unsigned long)temp64; dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, dataxfer_length)); srb_complete: ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } switch (result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (dxfer < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - dxfer; else ccb->csio.sense_resid = 0; if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ bus_space_read_region_1(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, sg_list), (u_int8_t *)&ccb->csio.sense_data, MIN(dxfer, sizeof(ccb->csio.sense_data))); } else { memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(dxfer, sizeof(ccb->csio.sense_data))); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) BUS_SPACE_WRT4_ITL(outbound_queue, index); ccb->csio.resid = ccb->csio.dxfer_len - dxfer; hptiop_free_srb(hba, srb); xpt_done(ccb); break; } } static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) { u_int32_t req, temp; while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header *p; p = (struct hpt_iop_request_header *) ((char *)hba->u.itl.mu + req); temp = bus_space_read_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, flags)); if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { u_int64_t temp64; bus_space_read_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) { hptiop_request_callback_itl(hba, req); } else { temp64 = 1; bus_space_write_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); } } else hptiop_request_callback_itl(hba, req); } } } static int hptiop_intr_itl(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_ITL(outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); KdPrint(("hptiop: received outbound msg %x\n", msg)); BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, u_int64_t _tag) { u_int32_t context = (u_int32_t)_tag; if (context & MVIOP_CMD_TYPE_SCSI) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); } else if (context & MVIOP_CMD_TYPE_IOCTL) { struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup(req); } else if (context & (MVIOP_CMD_TYPE_SET_CONFIG | MVIOP_CMD_TYPE_GET_CONFIG)) hba->config_done = 1; else { device_printf(hba->pcidev, "wrong callback type\n"); } } static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) { u_int64_t req; while ((req = hptiop_mv_outbound_read(hba))) { if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { hptiop_request_callback_mv(hba, req); } } } } static int hptiop_intr_mv(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_MV0(outbound_doorbell); if (status) BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); KdPrint(("hptiop: received outbound msg %x\n", msg)); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_mv(hba); ret = 1; } return ret; } static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, u_int32_t req32, u_int32_t millisec) { u_int32_t i; u_int64_t temp64; BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); for (i = 0; i < millisec; i++) { hptiop_intr_itl(hba); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i; u_int64_t phy_addr; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy | (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; ((struct hpt_iop_request_get_config *)req)->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT; hptiop_mv_inbound_write(phy_addr, hba); BUS_SPACE_RD4_MV0(outbound_intmask); for (i = 0; i < millisec; i++) { hptiop_intr_mv(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec) { u_int32_t i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i=0; iops->iop_intr(hba); if (hba->msg_done) break; DELAY(1000); } return hba->msg_done? 0 : -1; } static int hptiop_get_config_itl(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { u_int32_t req32; config->header.size = sizeof(struct hpt_iop_request_get_config); config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_header) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } bus_space_read_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_get_config) >> 2); BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_get_config_mv(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_get_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } *config = *req; return 0; } static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { u_int32_t req32; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; config->header.size = sizeof(struct hpt_iop_request_set_config); config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_set_config) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams) { u_int64_t temp64; struct hpt_iop_request_ioctl_command req; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; req.header.result = IOP_RESULT_PENDING; req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req.inbuf_size = pParams->nInBufferSize; req.outbuf_size = pParams->nOutBufferSize; req.bytes_returned = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); hptiop_lock_adapter(hba); BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); while (temp64) { if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); } hptiop_unlock_adapter(hba); return 0; } static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i, byte); } return 0; } static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i); if (copyout(&byte, (u_int8_t *)user + i, 1)) return -1; } return 0; } static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams) { u_int32_t req32; u_int32_t result; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return EFAULT; if (pParams->nInBufferSize) if (hptiop_bus_space_copyin(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf), (void *)pParams->lpInBuffer, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) goto invalid; result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.result)); if (result == IOP_RESULT_SUCCESS) { if (pParams->nOutBufferSize) if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf) + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) { if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), (void *)pParams->lpBytesReturned, sizeof(unsigned long))) goto invalid; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } else{ invalid: BUS_SPACE_WRT4_ITL(outbound_queue, req32); return EFAULT; } } static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t req_phy; int size = 0; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; size = size > 3 ? 3 : size; req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; hptiop_mv_inbound_write(req_phy, hba); BUS_SPACE_RD4_MV0(outbound_intmask); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mv(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_rescan_bus(struct hpt_iop_hba * hba) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); return(0); } static bus_dmamap_callback_t hptiop_map_srb; static bus_dmamap_callback_t hptiop_post_scsi_command; static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop base adrress.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.itl.mu = (struct hpt_iopmu_itl *) rman_get_virtual(hba->bar0_res); if (!hba->u.itl.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc mem res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mv.regs = (struct hpt_iopmv_regs *) rman_get_virtual(hba->bar0_res); if (!hba->u.mv.regs) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); if (!hba->u.mv.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); } static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) { if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0x800 - 0x8, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, #if __FreeBSD_version > 502000 NULL, NULL, #endif &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, #if __FreeBSD_version>501000 BUS_DMA_WAITOK | BUS_DMA_COHERENT, #else BUS_DMA_WAITOK, #endif &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, MVIOP_IOCTLCFG_SIZE, hptiop_mv_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } return 0; } static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hptiop_probe), DEVMETHOD(device_attach, hptiop_attach), DEVMETHOD(device_detach, hptiop_detach), DEVMETHOD(device_shutdown, hptiop_shutdown), { 0, 0 } }; static struct hptiop_adapter_ops hptiop_itl_ops = { .iop_wait_ready = hptiop_wait_ready_itl, .internal_memalloc = 0, .internal_memfree = 0, .alloc_pci_res = hptiop_alloc_pci_res_itl, .release_pci_res = hptiop_release_pci_res_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = hptiop_get_config_itl, .set_config = hptiop_set_config_itl, .iop_intr = hptiop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .do_ioctl = hptiop_do_ioctl_itl, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .iop_wait_ready = hptiop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .alloc_pci_res = hptiop_alloc_pci_res_mv, .release_pci_res = hptiop_release_pci_res_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = hptiop_get_config_mv, .set_config = hptiop_set_config_mv, .iop_intr = hptiop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .do_ioctl = hptiop_do_ioctl_mv, }; static driver_t hptiop_pci_driver = { driver_name, driver_methods, sizeof(struct hpt_iop_hba) }; DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); static int hptiop_probe(device_t dev) { struct hpt_iop_hba *hba; u_int32_t id; static char buf[256]; int sas = 0; struct hptiop_adapter_ops *ops; if (pci_get_vendor(dev) != 0x1103) return (ENXIO); id = pci_get_device(dev); switch (id) { case 0x4322: case 0x4321: case 0x4320: sas = 1; case 0x3220: case 0x3320: case 0x3410: case 0x3520: case 0x3510: case 0x3511: case 0x3521: case 0x3522: case 0x3540: ops = &hptiop_itl_ops; break; case 0x3120: case 0x3122: case 0x3020: ops = &hptiop_mv_ops; break; default: return (ENXIO); } device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)); sprintf(buf, "RocketRAID %x %s Controller\n", id, sas ? "SAS" : "SATA"); device_set_desc_copy(dev, buf); hba = (struct hpt_iop_hba *)device_get_softc(dev); bzero(hba, sizeof(struct hpt_iop_hba)); hba->ops = ops; KdPrint(("hba->ops=%p\n", hba->ops)); return 0; } static int hptiop_attach(device_t dev) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; int rid = 0; struct cam_devq *devq; struct ccb_setasync ccb; u_int32_t unit = device_get_unit(dev); device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", unit, driver_version); KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), hba->ops)); #if __FreeBSD_version >=440000 pci_enable_busmaster(dev); #endif hba->pcidev = dev; hba->pciunit = unit; if (hba->ops->alloc_pci_res(hba)) return ENXIO; if (hba->ops->iop_wait_ready(hba, 2000)) { device_printf(dev, "adapter is not ready\n"); goto release_pci_res; } #if (__FreeBSD_version >= 500000) mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); #endif if (bus_dma_tag_create(NULL,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ #if __FreeBSD_version>502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &hba->parent_dmat /* tag */)) { device_printf(dev, "alloc parent_dmat failed\n"); goto release_pci_res; } if (hba->ops->internal_memalloc) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } } if (hba->ops->get_config(hba, &iop_config)) { device_printf(dev, "get iop config failed.\n"); goto get_config_failed; } hba->firmware_version = iop_config.firmware_version; hba->interface_version = iop_config.interface_version; hba->max_requests = iop_config.max_requests; hba->max_devices = iop_config.max_devices; hba->max_request_size = iop_config.request_size; hba->max_sg_count = iop_config.max_sg_count; if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ hba->max_sg_count, /* nsegments */ 0x20000, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &hba->lock, /* lockfuncarg */ #endif &hba->io_dmat /* tag */)) { device_printf(dev, "alloc io_dmat failed\n"); goto get_config_failed; } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ #if __FreeBSD_version>502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &hba->srb_dmat /* tag */)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_io_dmat; } if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, #if __FreeBSD_version>501000 BUS_DMA_WAITOK | BUS_DMA_COHERENT, #else BUS_DMA_WAITOK, #endif &hba->srb_dmamap) != 0) { device_printf(dev, "srb bus_dmamem_alloc failed!\n"); goto destroy_srb_dmat; } if (bus_dmamap_load(hba->srb_dmat, hba->srb_dmamap, hba->uncached_ptr, (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, hptiop_map_srb, hba, 0)) { device_printf(dev, "bus_dmamap_load failed!\n"); goto srb_dmamem_free; } if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { device_printf(dev, "cam_simq_alloc failed\n"); goto srb_dmamap_unload; } #if __FreeBSD_version <700000 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, hba->max_requests - 1, 1, devq); #else hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, &Giant, hba->max_requests - 1, 1, devq); #endif if (!hba->sim) { device_printf(dev, "cam_sim_alloc failed\n"); cam_simq_free(devq); goto srb_dmamap_unload; } #if __FreeBSD_version <700000 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS) #else if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) #endif { device_printf(dev, "xpt_bus_register failed\n"); goto free_cam_sim; } if (xpt_create_path(&hba->path, /*periph */ NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt_create_path failed\n"); goto deregister_xpt_bus; } bzero(&set_config, sizeof(set_config)); set_config.iop_id = unit; set_config.vbus_id = cam_sim_path(hba->sim); set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; if (hba->ops->set_config(hba, &set_config)) { device_printf(dev, "set iop config failed.\n"); goto free_hba_path; } xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } #if __FreeBSD_version <700000 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, hptiop_pci_intr, hba, &hba->irq_handle)) #else if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, NULL, hptiop_pci_intr, hba, &hba->irq_handle)) #endif { device_printf(dev, "allocate intr function failed!\n"); goto free_irq_resource; } if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { device_printf(dev, "fail to start background task\n"); goto teartown_irq_resource; } hba->ops->enable_intr(hba); hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); #if __FreeBSD_version < 503000 hba->ioctl_dev->si_drv1 = hba; #endif return 0; teartown_irq_resource: bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); free_irq_resource: bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); free_hba_path: xpt_free_path(hba->path); deregister_xpt_bus: xpt_bus_deregister(cam_sim_path(hba->sim)); free_cam_sim: cam_sim_free(hba->sim, /*free devq*/ TRUE); srb_dmamap_unload: if (hba->uncached_ptr) bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); srb_dmamem_free: if (hba->uncached_ptr) bus_dmamem_free(hba->srb_dmat, hba->uncached_ptr, hba->srb_dmamap); destroy_srb_dmat: if (hba->srb_dmat) bus_dma_tag_destroy(hba->srb_dmat); destroy_io_dmat: if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); get_config_failed: if (hba->ops->internal_memfree) hba->ops->internal_memfree(hba); destroy_parent_tag: if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); release_pci_res: if (hba->ops->release_pci_res) hba->ops->release_pci_res(hba); return ENXIO; } static int hptiop_detach(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int i; int error = EBUSY; hptiop_lock_adapter(hba); for (i = 0; i < hba->max_devices; i++) if (hptiop_os_query_remove_device(hba, i)) { device_printf(dev, "%d file system is busy. id=%d", hba->pciunit, i); goto out; } if ((error = hptiop_shutdown(dev)) != 0) goto out; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) goto out; hptiop_release_resource(hba); error = 0; out: hptiop_unlock_adapter(hba); return error; } static int hptiop_shutdown(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int error = 0; if (hba->flag & HPT_IOCTL_FLAG_OPEN) { device_printf(dev, "%d device is busy", hba->pciunit); return EBUSY; } hba->ops->disable_intr(hba); if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) error = EBUSY; return error; } static void hptiop_pci_intr(void *arg) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; hptiop_lock_adapter(hba); hba->ops->iop_intr(hba); hptiop_unlock_adapter(hba); } static void hptiop_poll(struct cam_sim *sim) { hptiop_pci_intr(cam_sim_softc(sim)); } static void hptiop_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { } static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_ITL(outbound_intmask, ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); } static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG; BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); } static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); BUS_SPACE_RD4_MV0(outbound_intmask); } static int hptiop_reset_adapter(struct hpt_iop_hba * hba) { return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } static void *hptiop_get_srb(struct hpt_iop_hba * hba) { struct hpt_iop_srb * srb; if (hba->srb_list) { srb = hba->srb_list; hba->srb_list = srb->next; return srb; } return NULL; } static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) { srb->next = hba->srb_list; hba->srb_list = srb; } static void hptiop_action(struct cam_sim *sim, union ccb *ccb) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hptiop_lock_adapter(hba); if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= hba->max_devices || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); goto scsi_done; } if ((srb = hptiop_get_srb(hba)) == NULL) { device_printf(hba->pcidev, "srb allocated failed"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); goto scsi_done; } srb->ccb = ccb; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) hptiop_post_scsi_command(srb, NULL, 0, 0); else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { int error; error = bus_dmamap_load(hba->io_dmat, srb->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hptiop_post_scsi_command, srb, 0); if (error && error != EINPROGRESS) { device_printf(hba->pcidev, "%d bus_dmamap_load error %d", hba->pciunit, error); xpt_freeze_simq(hba->sim, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR; invalid: hptiop_free_srb(hba, srb); xpt_done(ccb); goto scsi_done; } } else { device_printf(hba->pcidev, "CAM_DATA_PHYS not supported"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; goto invalid; } } else { struct bus_dma_segment *segs; if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { device_printf(hba->pcidev, "SCSI cmd failed"); ccb->ccb_h.status=CAM_PROVIDE_FAIL; goto invalid; } segs = (struct bus_dma_segment *)ccb->csio.data_ptr; hptiop_post_scsi_command(srb, segs, ccb->csio.sglist_cnt, 0); } scsi_done: hptiop_unlock_adapter(hba); return; case XPT_RESET_BUS: device_printf(hba->pcidev, "reset adapter"); hptiop_lock_adapter(hba); hba->msg_done = 0; hptiop_reset_adapter(hba); hptiop_unlock_adapter(hba); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: +#if __FreeBSD_version >= 500000 + cam_calc_geometry(&ccb->ccg, 1); +#else ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; +#endif break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = hba->max_devices; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = hba->max_devices; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx; union ccb *ccb = srb->ccb; u_int8_t *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("ccb=%p %x-%x-%x\n", ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { u_int32_t iop_req32; struct hpt_iop_request_scsi_command req; iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (iop_req32 == IOPMU_QUEUE_EMPTY) { device_printf(hba->pcidev, "invaild req offset\n"); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req.sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req.cdb, ccb->csio.cdb_len); req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req.header.flags = 0; req.header.result = IOP_RESULT_PENDING; req.header.context = (u_int64_t)(unsigned long)srb; req.dataxfer_length = ccb->csio.dxfer_len; req.channel = 0; req.target = ccb->ccb_h.target_id; req.lun = ccb->ccb_h.target_lun; bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, (u_int8_t *)&req, req.header.size); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); } else { struct hpt_iop_request_scsi_command *req; req = (struct hpt_iop_request_scsi_command *)srb; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req->header.context = (u_int64_t)srb->index | IOPMU_QUEUE_ADDR_HOST_BIT; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { u_int32_t size_bits; if (req->header.size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (req->header.size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr | size_bits); } else BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr |IOPMU_QUEUE_ADDR_HOST_BIT); } } static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, size; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.context = (u_int64_t)srb->index << MVIOP_REQUEST_NUMBER_START_BIT | MVIOP_CMD_TYPE_SCSI; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; hptiop_mv_inbound_write(req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | (size > 3 ? 3 : size), hba); } static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; union ccb *ccb = srb->ccb; struct hpt_iop_hba *hba = srb->hba; if (error || nsegs > hba->max_sg_count) { KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n", ccb->ccb_h.func_code, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, nsegs)); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } hba->ops->post_req(hba, srb, segs, nsegs); } static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); } static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; struct hpt_iop_srb *srb, *tmp_srb; int i; if (error || nsegs == 0) { device_printf(hba->pcidev, "hptiop_map_srb error"); return; } /* map srb */ srb = (struct hpt_iop_srb *) (((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F); for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { tmp_srb = (struct hpt_iop_srb *) ((char *)srb + i * HPT_SRB_MAX_SIZE); if (((unsigned long)tmp_srb & 0x1F) == 0) { if (bus_dmamap_create(hba->io_dmat, 0, &tmp_srb->dma_map)) { device_printf(hba->pcidev, "dmamap create failed"); return; } bzero(tmp_srb, sizeof(struct hpt_iop_srb)); tmp_srb->hba = hba; tmp_srb->index = i; if (hba->ctlcfg_ptr == 0) {/*itl iop*/ tmp_srb->phy_addr = (u_int64_t)(u_int32_t) (phy_addr >> 5); if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS; } else { tmp_srb->phy_addr = phy_addr; } hptiop_free_srb(hba, tmp_srb); hba->srb[i] = tmp_srb; phy_addr += HPT_SRB_MAX_SIZE; } else { device_printf(hba->pcidev, "invalid alignment"); return; } } } static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) { hba->msg_done = 1; } static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, int target_id) { struct cam_periph *periph = NULL; struct cam_path *path; int status, retval = 0; status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); if (status == CAM_REQ_CMP) { if ((periph = cam_periph_find(path, "da")) != NULL) { if (periph->refcount >= 1) { device_printf(hba->pcidev, "%d ," "target_id=0x%x," "refcount=%d", hba->pciunit, target_id, periph->refcount); retval = -1; } } xpt_free_path(path); } return retval; } static void hptiop_release_resource(struct hpt_iop_hba *hba) { int i; if (hba->path) { struct ccb_setasync ccb; xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = 0; ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); xpt_free_path(hba->path); } if (hba->sim) { xpt_bus_deregister(cam_sim_path(hba->sim)); cam_sim_free(hba->sim, TRUE); } if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { struct hpt_iop_srb *srb = hba->srb[i]; if (srb->dma_map) bus_dmamap_destroy(hba->io_dmat, srb->dma_map); } if (hba->srb_dmat) { bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); bus_dma_tag_destroy(hba->srb_dmat); } if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); if (hba->irq_handle) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); if (hba->irq_res) bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res); if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); if (hba->ioctl_dev) destroy_dev(hba->ioctl_dev); } Index: head/sys/dev/hptmv/entry.c =================================================================== --- head/sys/dev/hptmv/entry.c (revision 227911) +++ head/sys/dev/hptmv/entry.c (revision 227912) @@ -1,3115 +1,3120 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ + +#include +__FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #if (__FreeBSD_version >= 500000) #include #include #endif #if (__FreeBSD_version >= 500000) #include #include #else #include #include #include #include #endif #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #ifdef DEBUG #ifdef DEBUG_LEVEL int hpt_dbg_level = DEBUG_LEVEL; #else int hpt_dbg_level = 0; #endif #endif #define MV_ERROR printf /* * CAM SIM entry points */ static int hpt_probe (device_t dev); static void launch_worker_thread(void); static int hpt_attach(device_t dev); static int hpt_detach(device_t dev); static int hpt_shutdown(device_t dev); static void hpt_poll(struct cam_sim *sim); static void hpt_intr(void *arg); static void hpt_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hpt_action(struct cam_sim *sim, union ccb *ccb); static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), - { 0, 0 } + DEVMETHOD_END }; static driver_t hpt_pci_driver = { __str(PROC_DIR_NAME), driver_methods, sizeof(IAL_ADAPTER_T) }; static devclass_t hpt_devclass; #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) __DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev); static void HPTLIBAPI OsSendCommand (_VBUS_ARG union ccb * ccb); static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd); static void ccb_done(union ccb *ccb); static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb); static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void handleEdmaError(_VBUS_ARG PCommand pCmd); static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static int fResetActiveCommands(PVBus _vbus_p); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter); static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_handle_event_disconnect(void *data); static void hptmv_handle_event_connect(void *data); static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel); static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel); static int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical); static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct); static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 IAL_ADAPTER_T *gIal_Adapter = 0; IAL_ADAPTER_T *pCurAdapter = 0; static MV_SATA_CHANNEL gMvSataChannels[MAX_VBUS][MV_SATA_CHANNELS_NUM]; typedef struct st_HPT_DPC { IAL_ADAPTER_T *pAdapter; void (*dpc)(IAL_ADAPTER_T *, void *, UCHAR); void *arg; UCHAR flags; } ST_HPT_DPC; #define MAX_DPC 16 UCHAR DPC_Request_Nums = 0; static ST_HPT_DPC DpcQueue[MAX_DPC]; static int DpcQueue_First=0; static int DpcQueue_Last = 0; char DRIVER_VERSION[] = "v1.16"; #if (__FreeBSD_version >= 500000) static struct mtx driver_lock; intrmask_t lock_driver() { intrmask_t spl = 0; mtx_lock(&driver_lock); return spl; } void unlock_driver(intrmask_t spl) { mtx_unlock(&driver_lock); } #else static int driver_locked = 0; intrmask_t lock_driver() { intrmask_t spl = splcam(); loop: while (driver_locked) tsleep(&driver_locked, PRIBIO, "hptlck", hz); atomic_add_int(&driver_locked, 1); if (driver_locked>1) { atomic_subtract_int(&driver_locked, 1); goto loop; } return spl; } void unlock_driver(intrmask_t spl) { atomic_subtract_int(&driver_locked, 1); if (driver_locked==0) { wakeup(&driver_locked); } splx(spl); } #endif /******************************************************************************* * Name: hptmv_free_channel * * Description: free allocated queues for the given channel * * Parameters: pMvSataAdapter - pointer to the RR18xx controler this * channel connected to. * channelNum - channel number. * ******************************************************************************/ static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { HPT_ASSERT(channelNum < MV_SATA_CHANNELS_NUM); pAdapter->mvSataAdapter.sataChannel[channelNum] = NULL; } static void failDevice(PVDevice pVDev) { PVBus _vbus_p = pVDev->pVBus; IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; pVDev->u.disk.df_on_line = 0; pVDev->vf_online = 0; if (pVDev->pfnDeviceFailed) CallWhenIdle(_VBUS_P (DPC_PROC)pVDev->pfnDeviceFailed, pVDev); fNotifyGUI(ET_DEVICE_REMOVED, pVDev); #ifndef FOR_DEMO if (pAdapter->ver_601==2 && !pAdapter->beeping) { pAdapter->beeping = 1; BeepOn(pAdapter->mvSataAdapter.adapterIoBaseAddress); set_fail_led(&pAdapter->mvSataAdapter, pVDev->u.disk.mv->channelNumber, 1); } #endif } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel); static void handleEdmaError(_VBUS_ARG PCommand pCmd) { PDevice pDevice = &pCmd->pVDevice->u.disk; MV_SATA_ADAPTER * pSataAdapter = pDevice->mv->mvSataAdapter; if (!pDevice->df_on_line) { KdPrint(("Device is offline")); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } if (pCmd->RetryCount++>5) { hpt_printk(("too many retries on channel(%d)\n", pDevice->mv->channelNumber)); failed: failDevice(pCmd->pVDevice); pCmd->Result = RETURN_IDE_ERROR; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* reset the channel and retry the command */ if (MvSataResetChannel(pSataAdapter, pDevice->mv->channelNumber)) goto failed; fNotifyGUI(ET_DEVICE_ERROR, Map2pVDevice(pDevice)); hpt_printk(("Retry on channel(%d)\n", pDevice->mv->channelNumber)); fDeviceSendCommand(_VBUS_P pCmd); } /**************************************************************** * Name: hptmv_init_channel * * Description: allocate request and response queues for the EDMA of the * given channel and sets other fields. * * Parameters: * pAdapter - pointer to the emulated adapter data structure * channelNum - channel number. * Return: 0 on success, otherwise on failure ****************************************************************/ static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_CHANNEL *pMvSataChannel; dma_addr_t req_dma_addr; dma_addr_t rsp_dma_addr; if (channelNum >= MV_SATA_CHANNELS_NUM) { MV_ERROR("RR18xx[%d]: Bad channelNum=%d", pAdapter->mvSataAdapter.adapterId, channelNum); return -1; } pMvSataChannel = &gMvSataChannels[pAdapter->mvSataAdapter.adapterId][channelNum]; pAdapter->mvSataAdapter.sataChannel[channelNum] = pMvSataChannel; pMvSataChannel->channelNumber = channelNum; pMvSataChannel->lba48Address = MV_FALSE; pMvSataChannel->maxReadTransfer = MV_FALSE; pMvSataChannel->requestQueue = (struct mvDmaRequestQueueEntry *) (pAdapter->requestsArrayBaseAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE)); req_dma_addr = pAdapter->requestsArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE); KdPrint(("requestQueue addr is 0x%llX", (HPT_U64)(ULONG_PTR)req_dma_addr)); /* check the 1K alignment of the request queue*/ if (req_dma_addr & 0x3ff) { MV_ERROR("RR18xx[%d]: request queue allocated isn't 1 K aligned," " dma_addr=%llx channel=%d\n", pAdapter->mvSataAdapter.adapterId, (HPT_U64)(ULONG_PTR)req_dma_addr, channelNum); return -1; } pMvSataChannel->requestQueuePciLowAddress = req_dma_addr; pMvSataChannel->requestQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: request queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->requestQueue)); pMvSataChannel->responseQueue = (struct mvDmaResponseQueueEntry *) (pAdapter->responsesArrayBaseAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE)); rsp_dma_addr = pAdapter->responsesArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE); /* check the 256 alignment of the response queue*/ if (rsp_dma_addr & 0xff) { MV_ERROR("RR18xx[%d,%d]: response queue allocated isn't 256 byte " "aligned, dma_addr=%llx\n", pAdapter->mvSataAdapter.adapterId, channelNum, (HPT_U64)(ULONG_PTR)rsp_dma_addr); return -1; } pMvSataChannel->responseQueuePciLowAddress = rsp_dma_addr; pMvSataChannel->responseQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: response queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->responseQueue)); pAdapter->mvChannel[channelNum].online = MV_TRUE; return 0; } /****************************************************************************** * Name: hptmv_parse_identify_results * * Description: this functions parses the identify command results, checks * that the connected deives can be accesed by RR18xx EDMA, * and updates the channel stucture accordingly. * * Parameters: pMvSataChannel, pointer to the channel data structure. * * Returns: =0 ->success, < 0 ->failure. * ******************************************************************************/ static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel) { MV_U16 *iden = pMvSataChannel->identifyDevice; /*LBA addressing*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x200)) { KdPrint(("IAL Error in IDENTIFY info: LBA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "LBA supported")); } /*DMA support*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x100)) { KdPrint(("IAL Error in IDENTIFY info: DMA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "DMA supported")); } /* PIO */ if ((iden[IDEN_VALID] & 2) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find PIO mode\n")); return -1; } KdPrint(("%25s - 0x%02x\n", "PIO modes supported", iden[IDEN_PIO_MODE_SPPORTED] & 0xff)); /*UDMA*/ if ((iden[IDEN_VALID] & 4) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find UDMA mode\n")); return -1; } /* 48 bit address */ if ((iden[IDEN_SUPPORTED_COMMANDS2] & 0x400)) { KdPrint(("%25s - %s\n", "LBA48 addressing", "supported")); pMvSataChannel->lba48Address = MV_TRUE; } else { KdPrint(("%25s - %s\n", "LBA48 addressing", "Not supported")); pMvSataChannel->lba48Address = MV_FALSE; } return 0; } static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel) { PVDevice pVDev = &pAdapter->VDevices[channel]; MV_SATA_CHANNEL *pMvSataChannel = pAdapter->mvSataAdapter.sataChannel[channel]; MV_U16_PTR IdentifyData = pMvSataChannel->identifyDevice; pMvSataChannel->outstandingCommands = 0; pVDev->u.disk.mv = pMvSataChannel; pVDev->u.disk.df_on_line = 1; pVDev->u.disk.pVBus = &pAdapter->VBus; pVDev->pVBus = &pAdapter->VBus; #ifdef SUPPORT_48BIT_LBA if (pMvSataChannel->lba48Address == MV_TRUE) pVDev->u.disk.dDeRealCapacity = ((IdentifyData[101]<<16) | IdentifyData[100]) - 1; else #endif if(IdentifyData[53] & 1) { pVDev->u.disk.dDeRealCapacity = (((IdentifyData[58]<<16 | IdentifyData[57]) < (IdentifyData[61]<<16 | IdentifyData[60])) ? (IdentifyData[61]<<16 | IdentifyData[60]) : (IdentifyData[58]<<16 | IdentifyData[57])) - 1; } else pVDev->u.disk.dDeRealCapacity = (IdentifyData[61]<<16 | IdentifyData[60]) - 1; pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxPioModeSupported - MV_ATA_TRANSFER_PIO_0; if (pAdapter->mvChannel[channel].maxUltraDmaModeSupported!=0xFF) { pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxUltraDmaModeSupported - MV_ATA_TRANSFER_UDMA_0 + 8; } } static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plugged) { PVDevice pVDev; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelIndex]; if (!pMvSataChannel) return; if (plugged) { pVDev = &(pAdapter->VDevices[channelIndex]); init_vdev_params(pAdapter, channelIndex); pVDev->VDeviceType = pVDev->u.disk.df_atapi? VD_ATAPI : pVDev->u.disk.df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->VDeviceCapacity = pVDev->u.disk.dDeRealCapacity-SAVE_FOR_RAID_INFO; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; pVDev->vf_online = 1; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { int iMember; for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; } #endif fNotifyGUI(ET_DEVICE_PLUGGED,pVDev); fCheckBootable(pVDev); RegisterVDevice(pVDev); #ifndef FOR_DEMO if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } #endif } else { pVDev = &(pAdapter->VDevices[channelIndex]); failDevice(pVDev); } } static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelNum]; MV_CHANNEL *pChannelInfo = &(pAdapter->mvChannel[channelNum]); MV_U32 udmaMode,pioMode; KdPrint(("RR18xx [%d]: start channel (%d)", pMvSataAdapter->adapterId, channelNum)); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { /* If failed, try again - this is when trying to hardreset a channel */ /* when drive is just spinning up */ StallExec(5000000); /* wait 5 sec before trying again */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Hard reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* identify device*/ if (mvStorageDevATAIdentifyDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform ATA Identify command\n" , pMvSataAdapter->adapterId, channelNum); return -1; } if (hptmv_parse_identify_results(pMvSataChannel)) { MV_ERROR("RR18xx [%d,%d]: Error in parsing ATA Identify message\n" , pMvSataAdapter->adapterId, channelNum); return -1; } /* mvStorageDevATASetFeatures */ /* Disable 8 bit PIO in case CFA enabled */ if (pMvSataChannel->identifyDevice[86] & 4) { KdPrint(("RR18xx [%d]: Disable 8 bit PIO (CFA enabled) \n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_8_BIT_PIO, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures" " failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Write cache */ #ifdef ENABLE_WRITE_CACHE if (pMvSataChannel->identifyDevice[82] & 0x20) { if (!(pMvSataChannel->identifyDevice[85] & 0x20)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel %d, write cache enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, write cache not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else /* disable write cache */ { if (pMvSataChannel->identifyDevice[85] & 0x20) { KdPrint(("RR18xx [%d]: channel =%d, disable write cache\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, write cache disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif /* Set transfer mode */ KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_SLOW\n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 1) { pioMode = MV_ATA_TRANSFER_PIO_4; } else if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 2) { pioMode = MV_ATA_TRANSFER_PIO_3; } else { MV_ERROR("IAL Error in IDENTIFY info: PIO modes 3 and 4 not supported\n"); pioMode = MV_ATA_TRANSFER_PIO_SLOW; } KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_4\n", pMvSataAdapter->adapterId)); pAdapter->mvChannel[channelNum].maxPioModeSupported = pioMode; if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pioMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } udmaMode = MV_ATA_TRANSFER_UDMA_0; if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x40) { udmaMode = MV_ATA_TRANSFER_UDMA_6; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x20) { udmaMode = MV_ATA_TRANSFER_UDMA_5; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x10) { udmaMode = MV_ATA_TRANSFER_UDMA_4; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 8) { udmaMode = MV_ATA_TRANSFER_UDMA_3; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 4) { udmaMode = MV_ATA_TRANSFER_UDMA_2; } KdPrint(("RR18xx [%d] Set transfer mode XFER_UDMA_%d\n", pMvSataAdapter->adapterId, udmaMode & 0xf)); pChannelInfo->maxUltraDmaModeSupported = udmaMode; /*if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, udmaMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; }*/ if (pChannelInfo->maxUltraDmaModeSupported == 0xFF) return TRUE; else do { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pChannelInfo->maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) { if (pChannelInfo->maxUltraDmaModeSupported > MV_ATA_TRANSFER_UDMA_0) { if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_REG_WRITE_BYTE(pMvSataAdapter->adapterIoBaseAddress, pMvSataChannel->eDmaRegsOffset + 0x11c, /* command reg */ MV_ATA_COMMAND_IDLE_IMMEDIATE); mvMicroSecondsDelay(10000); mvSataChannelHardReset(pMvSataAdapter, channelNum); if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; } if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; pChannelInfo->maxUltraDmaModeSupported--; continue; } else return FALSE; } break; }while (1); /* Read look ahead */ #ifdef ENABLE_READ_AHEAD if (pMvSataChannel->identifyDevice[82] & 0x40) { if (!(pMvSataChannel->identifyDevice[85] & 0x40)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, read look ahead enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, Read Look Ahead not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else { if (pMvSataChannel->identifyDevice[86] & 0x20) { KdPrint(("RR18xx [%d]:channel %d, disable read look ahead\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]:channel %d: ATA Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]:channel %d, read look ahead disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif { KdPrint(("RR18xx [%d]: channel %d config EDMA, Non Queued Mode\n", pMvSataAdapter->adapterId, channelNum)); if (mvSataConfigEdmaMode(pMvSataAdapter, channelNum, MV_EDMA_MODE_NOT_QUEUED, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d Error: mvSataConfigEdmaMode failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d] Failed to enable DMA, channel=%d\n", pMvSataAdapter->adapterId, channelNum); return -1; } MV_ERROR("RR18xx [%d,%d]: channel started successfully\n", pMvSataAdapter->adapterId, channelNum); #ifndef FOR_DEMO set_fail_led(pMvSataAdapter, channelNum, 0); #endif return 0; } static void hptmv_handle_event(void * data, int flag) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)data; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_U8 channelIndex; /* mvOsSemTake(&pMvSataAdapter->semaphore); */ for (channelIndex = 0; channelIndex < MV_SATA_CHANNELS_NUM; channelIndex++) { switch(pAdapter->sataEvents[channelIndex]) { case SATA_EVENT_CHANNEL_CONNECTED: /* Handle only connects */ if (flag == 1) break; KdPrint(("RR18xx [%d,%d]: new device connected\n", pMvSataAdapter->adapterId, channelIndex)); hptmv_init_channel(pAdapter, channelIndex); if (mvSataConfigureChannel( pMvSataAdapter, channelIndex) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to configure\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { /*mvSataChannelHardReset(pMvSataAdapter, channel);*/ if (start_channel( pAdapter, channelIndex)) { MV_ERROR("RR18xx [%d,%d]Failed to start channel\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { device_change(pAdapter, channelIndex, TRUE); } } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_CHANNEL_DISCONNECTED: /* Handle only disconnects */ if (flag == 0) break; KdPrint(("RR18xx [%d,%d]: device disconnected\n", pMvSataAdapter->adapterId, channelIndex)); /* Flush pending commands */ if(pMvSataAdapter->sataChannel[channelIndex]) { _VBUS_INST(&pAdapter->VBus) mvSataFlushDmaQueue (pMvSataAdapter, channelIndex, MV_FLUSH_TYPE_CALLBACK); CheckPendingCall(_VBUS_P0); mvSataRemoveChannel(pMvSataAdapter,channelIndex); hptmv_free_channel(pAdapter, channelIndex); pMvSataAdapter->sataChannel[channelIndex] = NULL; KdPrint(("RR18xx [%d,%d]: channel removed\n", pMvSataAdapter->adapterId, channelIndex)); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); } else { KdPrint(("RR18xx [%d,%d]: channel already removed!!\n", pMvSataAdapter->adapterId, channelIndex)); } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_NO_CHANGE: break; default: break; } } /* mvOsSemRelease(&pMvSataAdapter->semaphore); */ } #define EVENT_CONNECT 1 #define EVENT_DISCONNECT 0 static void hptmv_handle_event_connect(void *data) { hptmv_handle_event (data, 0); } static void hptmv_handle_event_disconnect(void *data) { hptmv_handle_event (data, 1); } static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2) { IAL_ADAPTER_T *pAdapter = pMvSataAdapter->IALData; switch (eventType) { case MV_EVENT_TYPE_SATA_CABLE: { MV_U8 channel = param2; if (param1 == EVENT_CONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_CONNECTED; KdPrint(("RR18xx [%d,%d]: device connected event received\n", pMvSataAdapter->adapterId, channel)); /* Delete previous timers (if multiple drives connected in the same time */ pAdapter->event_timer_connect = timeout(hptmv_handle_event_connect, pAdapter, 10*hz); } else if (param1 == EVENT_DISCONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_DISCONNECTED; KdPrint(("RR18xx [%d,%d]: device disconnected event received \n", pMvSataAdapter->adapterId, channel)); device_change(pAdapter, channel, FALSE); /* Delete previous timers (if multiple drives disconnected in the same time */ /*pAdapter->event_timer_disconnect = timeout(hptmv_handle_event_disconnect, pAdapter, 10*hz); */ /*It is not necessary to wait, handle it directly*/ hptmv_handle_event_disconnect(pAdapter); } else { MV_ERROR("RR18xx: illigal value for param1(%d) at " "connect/disconect event, host=%d\n", param1, pMvSataAdapter->adapterId ); } } break; case MV_EVENT_TYPE_ADAPTER_ERROR: KdPrint(("RR18xx: DEVICE error event received, pci cause " "reg=%x, don't how to handle this\n", param1)); return MV_TRUE; default: MV_ERROR("RR18xx[%d]: unknown event type (%d)\n", pMvSataAdapter->adapterId, eventType); return MV_FALSE; } return MV_TRUE; } static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter) { pAdapter->requestsArrayBaseAddr = (MV_U8 *)contigmalloc(REQUESTS_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->requestsArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA request" " queues\n", pAdapter->mvSataAdapter.adapterId); return -1; } pAdapter->requestsArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->requestsArrayBaseAddr); pAdapter->requestsArrayBaseAlignedAddr = pAdapter->requestsArrayBaseAddr; pAdapter->requestsArrayBaseAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->requestsArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1)); pAdapter->requestsArrayBaseDmaAlignedAddr = pAdapter->requestsArrayBaseDmaAddr; pAdapter->requestsArrayBaseDmaAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1); if ((pAdapter->requestsArrayBaseDmaAlignedAddr - pAdapter->requestsArrayBaseDmaAddr) != (pAdapter->requestsArrayBaseAlignedAddr - pAdapter->requestsArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); return -1; } /* response queues */ pAdapter->responsesArrayBaseAddr = (MV_U8 *)contigmalloc(RESPONSES_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->responsesArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response" " queues\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr); pAdapter->responsesArrayBaseAlignedAddr = pAdapter->responsesArrayBaseAddr; pAdapter->responsesArrayBaseAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->responsesArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1)); pAdapter->responsesArrayBaseDmaAlignedAddr = pAdapter->responsesArrayBaseDmaAddr; pAdapter->responsesArrayBaseDmaAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1); if ((pAdapter->responsesArrayBaseDmaAlignedAddr - pAdapter->responsesArrayBaseDmaAddr) != (pAdapter->responsesArrayBaseAlignedAddr - pAdapter->responsesArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Response Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } return 0; } static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter) { contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); } static PVOID AllocatePRDTable(IAL_ADAPTER_T *pAdapter) { PVOID ret; if (pAdapter->pFreePRDLink) { KdPrint(("pAdapter->pFreePRDLink:%p\n",pAdapter->pFreePRDLink)); ret = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = *(void**)ret; return ret; } return NULL; } static void FreePRDTable(IAL_ADAPTER_T *pAdapter, PVOID PRDTable) { *(void**)PRDTable = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = PRDTable; } extern PVDevice fGetFirstChild(PVDevice pLogical); extern void fResetBootMark(PVDevice pLogical); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter) { PVDevice pPhysical, pLogical; PVBus pVBus; int i,j; for(i=0;iVDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->vf_online==0) { pPhysical->vf_bootmark = pLogical->vf_bootmark = 0; continue; } if (pLogical->VDeviceType==VD_SPARE || pPhysical!=fGetFirstChild(pLogical)) continue; pVBus = &pAdapter->VBus; if(pVBus) { j=0; while(jpVDevice[j]) j++; if(jpVDevice[j] = pLogical; pLogical->pVBus = pVBus; if (j>0 && pLogical->vf_bootmark) { if (pVBus->pVDevice[0]->vf_bootmark) { fResetBootMark(pLogical); } else { do { pVBus->pVDevice[j] = pVBus->pVDevice[j-1]; } while (--j); pVBus->pVDevice[0] = pLogical; } } } } } } PVDevice GetSpareDisk(_VBUS_ARG PVDevice pArray) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pArray->pVBus->OsExt; LBA_T capacity = LongDiv(pArray->VDeviceCapacity, pArray->u.array.bArnMember-1); LBA_T thiscap, maxcap = MAX_LBA_T; PVDevice pVDevice, pFind = NULL; int i; for(i=0;iVDevices[i]; if(!pVDevice) continue; thiscap = pArray->vf_format_v2? pVDevice->u.disk.dDeRealCapacity : pVDevice->VDeviceCapacity; /* find the smallest usable spare disk */ if (pVDevice->VDeviceType==VD_SPARE && pVDevice->u.disk.df_on_line && thiscap < maxcap && thiscap >= capacity) { maxcap = pVDevice->VDeviceCapacity; pFind = pVDevice; } } return pFind; } /****************************************************************** * IO ATA Command *******************************************************************/ int HPTLIBAPI fDeReadWrite(PDevice pDev, ULONG Lba, UCHAR Cmd, void *tmpBuffer) { return mvReadWrite(pDev->mv, Lba, Cmd, tmpBuffer); } void HPTLIBAPI fDeSelectMode(PDevice pDev, UCHAR NewMode) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; UCHAR mvMode; /* 508x don't use MW-DMA? */ if (NewMode>4 && NewMode<8) NewMode = 4; pDev->bDeModeSetting = NewMode; if (NewMode<=4) mvMode = MV_ATA_TRANSFER_PIO_0 + NewMode; else mvMode = MV_ATA_TRANSFER_UDMA_0 + (NewMode-8); /*To fix 88i8030 bug*/ if (mvMode > MV_ATA_TRANSFER_UDMA_0 && mvMode < MV_ATA_TRANSFER_UDMA_4) mvMode = MV_ATA_TRANSFER_UDMA_0; mvSataDisableChannelDma(pSataAdapter, channelIndex); /* Flush pending commands */ mvSataFlushDmaQueue (pSataAdapter, channelIndex, MV_FLUSH_TYPE_NONE); if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_TRANSFER, mvMode, 0, 0, 0) == MV_FALSE) { KdPrint(("channel %d: Set Features failed\n", channelIndex)); } /* Enable EDMA */ if (mvSataEnableChannelDma(pSataAdapter, channelIndex) == MV_FALSE) KdPrint(("Failed to enable DMA, channel=%d", channelIndex)); } int HPTLIBAPI fDeSetTCQ(PDevice pDev, int enable, int depth) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if (enable) { if (pSataChannel->queuedDMA == MV_EDMA_MODE_NOT_QUEUED && (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))) { UCHAR depth = ((pSataChannel->identifyDevice[IDEN_QUEUE_DEPTH]) & 0x1f) + 1; channelInfo->queueDepth = (depth==32)? 31 : depth; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_QUEUED, depth); ret = 1; } } else { if (pSataChannel->queuedDMA != MV_EDMA_MODE_NOT_QUEUED) { channelInfo->queueDepth = 2; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_NOT_QUEUED, 0); ret = 1; } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetNCQ(PDevice pDev, int enable, int depth) { return 0; } int HPTLIBAPI fDeSetWriteCache(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x20))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetReadAhead(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x40))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } #ifdef SUPPORT_ARRAY #define IdeRegisterVDevice fCheckArray #else void IdeRegisterVDevice(PDevice pDev) { PVDevice pVDev = Map2pVDevice(pDev); pVDev->VDeviceType = pDev->df_atapi? VD_ATAPI : pDev->df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->vf_online = 1; pVDev->VDeviceCapacity = pDev->dDeRealCapacity; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; } #endif static __inline PBUS_DMAMAP dmamap_get(struct IALAdapter * pAdapter) { PBUS_DMAMAP p = pAdapter->pbus_dmamap_list; if (p) pAdapter->pbus_dmamap_list = p-> next; return p; } static __inline void dmamap_put(PBUS_DMAMAP p) { p->next = p->pAdapter->pbus_dmamap_list; p->pAdapter->pbus_dmamap_list = p; } /*Since mtx not provide the initialize when declare, so we Final init here to initialize the global mtx*/ #if __FreeBSD_version >= 500000 #define override_kernel_driver() static void hpt_init(void *dummy) { override_kernel_driver(); mtx_init(&driver_lock, "hptsleeplock", NULL, MTX_DEF); } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); #endif static int num_adapters = 0; static int init_adapter(IAL_ADAPTER_T *pAdapter) { PVBus _vbus_p = &pAdapter->VBus; MV_SATA_ADAPTER *pMvSataAdapter; int i, channel, rid; PVDevice pVDev; intrmask_t oldspl = lock_driver(); pAdapter->next = 0; if(gIal_Adapter == 0){ gIal_Adapter = pAdapter; pCurAdapter = gIal_Adapter; } else { pCurAdapter->next = pAdapter; pCurAdapter = pAdapter; } pAdapter->outstandingCommands = 0; pMvSataAdapter = &(pAdapter->mvSataAdapter); _vbus_p->OsExt = (void *)pAdapter; pMvSataAdapter->IALData = pAdapter; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (MAX_SG_DESCRIPTORS-1), /* maxsize */ MAX_SG_DESCRIPTORS, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &driver_lock, /* lockfuncarg */ #endif &pAdapter->io_dma_parent /* tag */)) { return ENXIO; } if (hptmv_allocate_edma_queues(pAdapter)) { MV_ERROR("RR18xx: Failed to allocate memory for EDMA queues\n"); unlock_driver(oldspl); return ENOMEM; } /* also map EPROM address */ rid = 0x10; if (!(pAdapter->mem_res = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, 0, ~0, MV_SATA_PCI_BAR0_SPACE_SIZE+0x40000, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { MV_ERROR("RR18xx: Failed to remap memory space\n"); hptmv_free_edma_queues(pAdapter); unlock_driver(oldspl); return ENXIO; } else { KdPrint(("RR18xx: io base address 0x%p\n", pMvSataAdapter->adapterIoBaseAddress)); } pMvSataAdapter->adapterId = num_adapters++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = pci_read_config(pAdapter->hpt_dev, PCIR_REVID, 1); pMvSataAdapter->pciConfigDeviceId = pci_get_device(pAdapter->hpt_dev); /* init RR18xx */ pMvSataAdapter->intCoalThre[0]= 1; pMvSataAdapter->intCoalThre[1]= 1; pMvSataAdapter->intTimeThre[0] = 1; pMvSataAdapter->intTimeThre[1] = 1; pMvSataAdapter->pciCommand = 0x0107E371; pMvSataAdapter->pciSerrMask = 0xd77fe6ul; pMvSataAdapter->pciInterruptMask = 0xd77fe6ul; pMvSataAdapter->mvSataEventNotify = hptmv_event_notify; if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { MV_ERROR("RR18xx[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); unregister: bus_release_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, rid, pAdapter->mem_res); hptmv_free_edma_queues(pAdapter); unlock_driver(oldspl); return ENXIO; } pAdapter->ver_601 = pMvSataAdapter->pcbVersion; #ifndef FOR_DEMO set_fail_leds(pMvSataAdapter, 0); #endif /* setup command blocks */ KdPrint(("Allocate command blocks\n")); _vbus_(pFreeCommands) = 0; pAdapter->pCommandBlocks = malloc(sizeof(struct _Command) * MAX_COMMAND_BLOCKS_FOR_EACH_VBUS, M_DEVBUF, M_NOWAIT); KdPrint(("pCommandBlocks:%p\n",pAdapter->pCommandBlocks)); if (!pAdapter->pCommandBlocks) { MV_ERROR("insufficient memory\n"); goto unregister; } for (i=0; ipCommandBlocks[i])); } /*Set up the bus_dmamap*/ pAdapter->pbus_dmamap = (PBUS_DMAMAP)malloc (sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM, M_DEVBUF, M_NOWAIT); if(!pAdapter->pbus_dmamap) { MV_ERROR("insufficient memory\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); goto unregister; } memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; for (i=0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); if(bus_dmamap_create(pAdapter->io_dma_parent, 0, &pmap->dma_map)) { MV_ERROR("Can not allocate dma map\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); free(pAdapter->pbus_dmamap, M_DEVBUF); goto unregister; } } /* setup PRD Tables */ KdPrint(("Allocate PRD Tables\n")); pAdapter->pFreePRDLink = 0; pAdapter->prdTableAddr = (PUCHAR)contigmalloc( (PRD_ENTRIES_SIZE*PRD_TABLES_FOR_VBUS + 32), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); KdPrint(("prdTableAddr:%p\n",pAdapter->prdTableAddr)); if (!pAdapter->prdTableAddr) { MV_ERROR("insufficient PRD Tables\n"); goto unregister; } pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; for (i=0; ipFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); PRDTable += PRD_ENTRIES_SIZE; } } /* enable the adapter interrupts */ /* configure and start the connected channels*/ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pAdapter->mvChannel[channel].online = MV_FALSE; if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_TRUE) { KdPrint(("RR18xx[%d]: channel %d is connected\n", pMvSataAdapter->adapterId, channel)); if (hptmv_init_channel(pAdapter, channel) == 0) { if (mvSataConfigureChannel(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx[%d]: Failed to configure channel" " %d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } else { if (start_channel(pAdapter, channel)) { MV_ERROR("RR18xx[%d]: Failed to start channel," " channel=%d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } pAdapter->mvChannel[channel].online = MV_TRUE; /* mvSataChannelSetEdmaLoopBackMode(pMvSataAdapter, channel, MV_TRUE);*/ } } } KdPrint(("pAdapter->mvChannel[channel].online:%x, channel:%d\n", pAdapter->mvChannel[channel].online, channel)); } #ifdef SUPPORT_ARRAY for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } #endif KdPrint(("Initialize Devices\n")); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel) { init_vdev_params(pAdapter, channel); IdeRegisterVDevice(&pAdapter->VDevices[channel].u.disk); } } #ifdef SUPPORT_ARRAY CheckArrayCritical(_VBUS_P0); #endif _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); for (channel=0;channelpVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); } #if defined(SUPPORT_ARRAY) && defined(_RAID5N_) init_raid5_memory(_VBUS_P0); _vbus_(r5).enable_write_back = 1; printf("RR18xx: RAID5 write-back %s\n", _vbus_(r5).enable_write_back? "enabled" : "disabled"); #endif mvSataUnmaskAdapterInterrupt(pMvSataAdapter); unlock_driver(oldspl); return 0; } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pMvSataAdapter->IALData; mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channel)== MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Hard reser the SATA channel\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Connect Device\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; }else { MV_ERROR("channel %d: perform recalibrate command", channel); if (!mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, MV_NON_UDMA_PROTOCOL_NON_DATA, MV_FALSE, NULL, /* pBuffer*/ 0, /* count */ 0, /*features*/ /* sectorCount */ 0, 0, /* lbaLow */ 0, /* lbaMid */ /* lbaHigh */ 0, 0, /* device */ /* command */ 0x10)) MV_ERROR("channel %d: recalibrate failed", channel); /* Set transfer mode */ if((mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxPioModeSupported, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) ) { MV_ERROR("channel %d: Set Features failed", channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("Failed to enable DMA, channel=%d", channel); hptmv_free_channel(pAdapter, channel); return -1; } } return 0; } static int fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } return 0; } void fCompleteAllCommandsSynchronously(PVBus _vbus_p) { UINT cont; ULONG ticks = 0; MV_U8 channel; MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; do { check_cmds: cont = 0; CheckPendingCall(_VBUS_P0); #ifdef _RAID5N_ dataxfer_poll(); xor_poll(); #endif for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { while (pMvSataChannel->outstandingCommands) { if (!mvSataInterruptServiceRoutine(pMvSataAdapter)) { StallExec(1000); if (ticks++ > 3000) { MvSataResetChannel(pMvSataAdapter,channel); goto check_cmds; } } else ticks = 0; } cont = 1; } } } while (cont); } void fResetVBus(_VBUS_ARG0) { KdPrint(("fMvResetBus(%p)", _vbus_p)); /* some commands may already finished. */ CheckPendingCall(_VBUS_P0); fResetActiveCommands(_vbus_p); /* * the other pending commands may still be finished successfully. */ fCompleteAllCommandsSynchronously(_vbus_p); /* Now there should be no pending commands. No more action needed. */ CheckIdleCall(_VBUS_P0); KdPrint(("fMvResetBus() done")); } /*No rescan function*/ void fRescanAllDevice(_VBUS_ARG0) { } static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct) { PCommand pCmd = (PCommand) commandId; _VBUS_INST(pCmd->pVDevice->pVBus) if (pCmd->uScratch.sata_param.prdAddr) FreePRDTable(pMvSataAdapter->IALData,pCmd->uScratch.sata_param.prdAddr); switch (comp_type) { case MV_COMPLETION_TYPE_NORMAL: pCmd->Result = RETURN_SUCCESS; break; case MV_COMPLETION_TYPE_ABORT: pCmd->Result = RETURN_BUS_RESET; break; case MV_COMPLETION_TYPE_ERROR: MV_ERROR("IAL: COMPLETION ERROR, adapter %d, channel %d, flags=%x\n", pMvSataAdapter->adapterId, channelNum, responseFlags); if (responseFlags & 4) { MV_ERROR("ATA regs: error %x, sector count %x, LBA low %x, LBA mid %x," " LBA high %x, device %x, status %x\n", registerStruct->errorRegister, registerStruct->sectorCountRegister, registerStruct->lbaLowRegister, registerStruct->lbaMidRegister, registerStruct->lbaHighRegister, registerStruct->deviceRegister, registerStruct->statusRegister); } /*We can't do handleEdmaError directly here, because CommandCompletionCB is called by * mv's ISR, if we retry the command, than the internel data structure may be destroyed*/ pCmd->uScratch.sata_param.responseFlags = responseFlags; pCmd->uScratch.sata_param.bIdeStatus = registerStruct->statusRegister; pCmd->uScratch.sata_param.errorRegister = registerStruct->errorRegister; pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)handleEdmaError,pCmd); return TRUE; default: MV_ERROR(" Unknown completion type (%d)\n", comp_type); return MV_FALSE; } if (pCmd->uCmd.Ide.Command == IDE_COMMAND_VERIFY && pCmd->uScratch.sata_param.cmd_priv > 1) { pCmd->uScratch.sata_param.cmd_priv --; return TRUE; } pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return TRUE; } void fDeviceSendCommand(_VBUS_ARG PCommand pCmd) { MV_SATA_EDMA_PRD_ENTRY *pPRDTable = 0; MV_SATA_ADAPTER *pMvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; PVDevice pVDevice = pCmd->pVDevice; PDevice pDevice = &pVDevice->u.disk; LBA_T Lba = pCmd->uCmd.Ide.Lba; USHORT nSector = pCmd->uCmd.Ide.nSectors; MV_QUEUE_COMMAND_RESULT result; MV_QUEUE_COMMAND_INFO commandInfo; MV_UDMA_COMMAND_PARAMS *pUdmaParams = &commandInfo.commandParams.udmaCommand; MV_NONE_UDMA_COMMAND_PARAMS *pNoUdmaParams = &commandInfo.commandParams.NoneUdmaCommand; MV_BOOLEAN is48bit; MV_U8 channel; int i=0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); if (!pDevice->df_on_line) { MV_ERROR("Device is offline"); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } pDevice->HeadPosition = pCmd->uCmd.Ide.Lba + pCmd->uCmd.Ide.nSectors; pMvSataChannel = pDevice->mv; pMvSataAdapter = pMvSataChannel->mvSataAdapter; channel = pMvSataChannel->channelNumber; /* old RAID0 has hidden lba. Remember to clear dDeHiddenLba when delete array! */ Lba += pDevice->dDeHiddenLba; /* check LBA */ if (Lba+nSector-1 > pDevice->dDeRealCapacity) { pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* * always use 48bit LBA if drive supports it. * Some Seagate drives report error if you use a 28-bit command * to access sector 0xfffffff. */ is48bit = pMvSataChannel->lba48Address; switch (pCmd->uCmd.Ide.Command) { case IDE_COMMAND_READ: case IDE_COMMAND_WRITE: if (pDevice->bDeModeSetting<8) goto pio; commandInfo.type = MV_QUEUED_COMMAND_TYPE_UDMA; pUdmaParams->isEXT = is48bit; pUdmaParams->numOfSectors = nSector; pUdmaParams->lowLBAAddress = Lba; pUdmaParams->highLBAAddress = 0; pUdmaParams->prdHighAddr = 0; pUdmaParams->callBack = CommandCompletionCB; pUdmaParams->commandId = (MV_VOID_PTR )pCmd; if(pCmd->uCmd.Ide.Command == IDE_COMMAND_READ) pUdmaParams->readWrite = MV_UDMA_TYPE_READ; else pUdmaParams->readWrite = MV_UDMA_TYPE_WRITE; if (pCmd->pSgTable && pCmd->cf_physical_sg) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 0)) { pio: mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); if (pCmd->pSgTable && pCmd->cf_physical_sg==0) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 1)) { pCmd->Result = RETURN_NEED_LOGICAL_SG; goto finish_cmd; } } do { ULONG size = tmpSg->wSgSize? tmpSg->wSgSize : 0x10000; ULONG_PTR addr = tmpSg->dSgAddress; if (size & 0x1ff) { pCmd->Result = RETURN_INVALID_REQUEST; goto finish_cmd; } if (mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, (pCmd->cf_data_out)?MV_NON_UDMA_PROTOCOL_PIO_DATA_OUT:MV_NON_UDMA_PROTOCOL_PIO_DATA_IN, is48bit, (MV_U16_PTR)addr, size >> 1, /* count */ 0, /* features N/A */ (MV_U16)(size>>9), /*sector count*/ (MV_U16)( (is48bit? (MV_U16)((Lba >> 16) & 0xFF00) : 0 ) | (UCHAR)(Lba & 0xFF) ), /*lbalow*/ (MV_U16)((Lba >> 8) & 0xFF), /* lbaMid */ (MV_U16)((Lba >> 16) & 0xFF),/* lbaHigh */ (MV_U8)(0x40 | (is48bit ? 0 : (UCHAR)(Lba >> 24) & 0xFF )),/* device */ (MV_U8)(is48bit ? (pCmd->cf_data_in?IDE_COMMAND_READ_EXT:IDE_COMMAND_WRITE_EXT):pCmd->uCmd.Ide.Command) )==MV_FALSE) { pCmd->Result = RETURN_IDE_ERROR; goto finish_cmd; } Lba += size>>9; if(Lba & 0xF0000000) is48bit = MV_TRUE; } while ((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pCmd->Result = RETURN_SUCCESS; finish_cmd: mvSataEnableChannelDma(pMvSataAdapter,channel); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } } pPRDTable = (MV_SATA_EDMA_PRD_ENTRY *) AllocatePRDTable(pMvSataAdapter->IALData); KdPrint(("pPRDTable:%p\n",pPRDTable)); if (!pPRDTable) { pCmd->Result = RETURN_DEVICE_BUSY; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); HPT_ASSERT(0); return; } do{ pPRDTable[i].highBaseAddr = (sizeof(tmpSg->dSgAddress)>4 ? (MV_U32)(tmpSg->dSgAddress>>32) : 0); pPRDTable[i].flags = (MV_U16)tmpSg->wSgFlag; pPRDTable[i].byteCount = (MV_U16)tmpSg->wSgSize; pPRDTable[i].lowBaseAddr = (MV_U32)tmpSg->dSgAddress; pPRDTable[i].reserved = 0; i++; }while((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pUdmaParams->prdLowAddr = (ULONG)fOsPhysicalAddress(pPRDTable); if ((pUdmaParams->numOfSectors == 256) && (pMvSataChannel->lba48Address == MV_FALSE)) { pUdmaParams->numOfSectors = 0; } pCmd->uScratch.sata_param.prdAddr = (PVOID)pPRDTable; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK) { queue_failed: switch (result) { case MV_QUEUE_COMMAND_RESULT_BAD_LBA_ADDRESS: MV_ERROR("IAL Error: Edma Queue command failed. Bad LBA " "LBA[31:0](0x%08x)\n", pUdmaParams->lowLBAAddress); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_QUEUED_MODE_DISABLED: MV_ERROR("IAL Error: Edma Queue command failed. EDMA" " disabled adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); mvSataEnableChannelDma(pMvSataAdapter,channel); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_FULL: MV_ERROR("IAL Error: Edma Queue command failed. Queue is" " Full adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); pCmd->Result = RETURN_DEVICE_BUSY; break; case MV_QUEUE_COMMAND_RESULT_BAD_PARAMS: MV_ERROR("IAL Error: Edma Queue command failed. (Bad " "Params), pMvSataAdapter: %p, pSataChannel: %p.\n", pMvSataAdapter, pMvSataAdapter->sataChannel[channel]); pCmd->Result = RETURN_IDE_ERROR; break; default: MV_ERROR("IAL Error: Bad result value (%d) from queue" " command\n", result); pCmd->Result = RETURN_IDE_ERROR; } if(pPRDTable) FreePRDTable(pMvSataAdapter->IALData,pPRDTable); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); } pDevice->QueueLength++; return; case IDE_COMMAND_VERIFY: commandInfo.type = MV_QUEUED_COMMAND_TYPE_NONE_UDMA; pNoUdmaParams->bufPtr = NULL; pNoUdmaParams->callBack = CommandCompletionCB; pNoUdmaParams->commandId = (MV_VOID_PTR)pCmd; pNoUdmaParams->count = 0; pNoUdmaParams->features = 0; pNoUdmaParams->protocolType = MV_NON_UDMA_PROTOCOL_NON_DATA; pCmd->uScratch.sata_param.cmd_priv = 1; if (pMvSataChannel->lba48Address == MV_TRUE){ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS_EXT; pNoUdmaParams->isEXT = MV_TRUE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(((Lba & 0xff000000) >> 16)| (Lba & 0xff)); pNoUdmaParams->sectorCount = nSector; pNoUdmaParams->device = 0x40; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } return; } else{ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS; pNoUdmaParams->isEXT = MV_FALSE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(Lba & 0xff); pNoUdmaParams->sectorCount = 0xff & nSector; pNoUdmaParams->device = (MV_U8)(0x40 | ((Lba & 0xf000000) >> 24)); pNoUdmaParams->callBack = CommandCompletionCB; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); /*FIXME: how about the commands already queued? but marvel also forgets to consider this*/ if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } } break; default: pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); break; } } /********************************************************** * * Probe the hostadapter. * **********************************************************/ static int hpt_probe(device_t dev) { if ((pci_get_vendor(dev) == MV_SATA_VENDOR_ID) && (pci_get_device(dev) == MV_SATA_DEVICE_ID_5081 #ifdef FOR_DEMO || pci_get_device(dev) == MV_SATA_DEVICE_ID_5080 #endif )) { KdPrintI((CONTROLLER_NAME " found\n")); device_set_desc(dev, CONTROLLER_NAME); return 0; } else return(ENXIO); } /*********************************************************** * * Auto configuration: attach and init a host adapter. * ***********************************************************/ static int hpt_attach(device_t dev) { IAL_ADAPTER_T * pAdapter = device_get_softc(dev); int rid; union ccb *ccb; struct cam_devq *devq; struct cam_sim *hpt_vsim; printf("%s Version %s \n", DRIVER_NAME, DRIVER_VERSION); if (!pAdapter) { pAdapter = (IAL_ADAPTER_T *)malloc(sizeof (IAL_ADAPTER_T), M_DEVBUF, M_NOWAIT); #if __FreeBSD_version > 410000 device_set_softc(dev, (void *)pAdapter); #else device_set_driver(dev, (driver_t *)pAdapter); #endif } if (!pAdapter) return (ENOMEM); bzero(pAdapter, sizeof(IAL_ADAPTER_T)); pAdapter->hpt_dev = dev; rid = init_adapter(pAdapter); if (rid) return rid; rid = 0; if ((pAdapter->hpt_irq = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); } #if __FreeBSD_version <700000 if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM, hpt_intr, pAdapter, &pAdapter->hpt_intr)) #else if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM, NULL, hpt_intr, pAdapter, &pAdapter->hpt_intr)) #endif { hpt_printk(("can't set up interrupt\n")); free(pAdapter, M_DEVBUF); return(ENXIO); } if((ccb = (union ccb *)malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK)) != (union ccb*)NULL) { bzero(ccb, sizeof(*ccb)); ccb->ccb_h.pinfo.priority = 1; ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; } else { return ENOMEM; } /* * Create the device queue for our SIM(s). */ if((devq = cam_simq_alloc(8/*MAX_QUEUE_COMM*/)) == NULL) { KdPrint(("ENXIO\n")); return ENOMEM; } /* * Construct our SIM entry */ #if __FreeBSD_version <700000 hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), 1, 8, devq); #else hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), &Giant, 1, 8, devq); #endif if (hpt_vsim == NULL) { cam_simq_free(devq); return ENOMEM; } #if __FreeBSD_version <700000 if (xpt_bus_register(hpt_vsim, 0) != CAM_SUCCESS) #else if (xpt_bus_register(hpt_vsim, dev, 0) != CAM_SUCCESS) #endif { cam_sim_free(hpt_vsim, /*free devq*/ TRUE); hpt_vsim = NULL; return ENXIO; } if(xpt_create_path(&pAdapter->path, /*periph */ NULL, cam_sim_path(hpt_vsim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(hpt_vsim)); cam_sim_free(hpt_vsim, /*free_devq*/TRUE); hpt_vsim = NULL; return ENXIO; } xpt_setup_ccb(&(ccb->ccb_h), pAdapter->path, /*priority*/5); ccb->ccb_h.func_code = XPT_SASYNC_CB; ccb->csa.event_enable = AC_LOST_DEVICE; ccb->csa.callback = hpt_async; ccb->csa.callback_arg = hpt_vsim; xpt_action((union ccb *)ccb); free(ccb, M_DEVBUF); if (device_get_unit(dev) == 0) { /* Start the work thread. XXX */ launch_worker_thread(); } return 0; } static int hpt_detach(device_t dev) { return (EBUSY); } /*************************************************************** * The poll function is used to simulate the interrupt when * the interrupt subsystem is not functioning. * ***************************************************************/ static void hpt_poll(struct cam_sim *sim) { hpt_intr((void *)cam_sim_softc(sim)); } /**************************************************************** * Name: hpt_intr * Description: Interrupt handler. ****************************************************************/ static void hpt_intr(void *arg) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)arg; intrmask_t oldspl = lock_driver(); /* KdPrintI(("----- Entering Isr() -----\n")); */ if (mvSataInterruptServiceRoutine(&pAdapter->mvSataAdapter) == MV_TRUE) { _VBUS_INST(&pAdapter->VBus) CheckPendingCall(_VBUS_P0); } /* KdPrintI(("----- Leaving Isr() -----\n")); */ unlock_driver(oldspl); } /********************************************************** * Asynchronous Events *********************************************************/ #if (!defined(UNREFERENCED_PARAMETER)) #define UNREFERENCED_PARAMETER(x) (void)(x) #endif static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { /* debug XXXX */ panic("Here"); UNREFERENCED_PARAMETER(callback_arg); UNREFERENCED_PARAMETER(code); UNREFERENCED_PARAMETER(path); UNREFERENCED_PARAMETER(arg); } static void FlushAdapter(IAL_ADAPTER_T *pAdapter) { int i; hpt_printk(("flush all devices\n")); /* flush all devices */ for (i=0; iVBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } } static int hpt_shutdown(device_t dev) { IAL_ADAPTER_T *pAdapter; pAdapter = device_get_softc(dev); if (pAdapter == NULL) return (EINVAL); EVENTHANDLER_DEREGISTER(shutdown_final, pAdapter->eh); FlushAdapter(pAdapter); /* give the flush some time to happen, *otherwise "shutdown -p now" will make file system corrupted */ DELAY(1000 * 1000 * 5); return 0; } void Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) if (mWaitingForIdle(_VBUS_P0)) { CheckIdleCall(_VBUS_P0); #ifdef SUPPORT_ARRAY { int i; PVDevice pArray; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { KdPrint(("auto rebuild.\n")); pArray->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); } } } #endif } /* launch the awaiting commands blocked by mWaitingForIdle */ while(pAdapter->pending_Q!= NULL) { _VBUS_INST(&pAdapter->VBus) union ccb *ccb = (union ccb *)pAdapter->pending_Q->ccb_h.ccb_ccb_ptr; hpt_free_ccb(&pAdapter->pending_Q, ccb); CallAfterReturn(_VBUS_P (DPC_PROC)OsSendCommand, ccb); } } static void ccb_done(union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T * pAdapter = pmap->pAdapter; KdPrintI(("ccb_done: ccb %p status %x\n", ccb, ccb->ccb_h.status)); dmamap_put(pmap); xpt_done(ccb); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) { if(DPC_Request_Nums == 0) Check_Idle_Call(pAdapter); } } /**************************************************************** * Name: hpt_action * Description: Process a queued command from the CAM layer. * Parameters: sim - Pointer to SIM object * ccb - Pointer to SCSI command structure. ****************************************************************/ void hpt_action(struct cam_sim *sim, union ccb *ccb) { intrmask_t oldspl; IAL_ADAPTER_T * pAdapter = (IAL_ADAPTER_T *) cam_sim_softc(sim); PBUS_DMAMAP pmap; _VBUS_INST(&pAdapter->VBus) CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("hpt_action\n")); KdPrint(("hpt_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ { /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.target_id >= MAX_VDEVICE_PER_VBUS || pAdapter->VBus.pVDevice[ccb->ccb_h.target_id]==0) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } oldspl = lock_driver(); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); pmap = dmamap_get(pAdapter); HPT_ASSERT(pmap); ccb->ccb_adapter = pmap; memset((void *)pmap->psg, 0, sizeof(pmap->psg)); if (mWaitingForIdle(_VBUS_P0)) hpt_queue_ccb(&pAdapter->pending_Q, ccb); else OsSendCommand(_VBUS_P ccb); unlock_driver(oldspl); /* KdPrint(("leave scsiio\n")); */ break; } case XPT_RESET_BUS: KdPrint(("reset bus\n")); oldspl = lock_driver(); fResetVBus(_VBUS_P0); unlock_driver(oldspl); xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: - { +#if __FreeBSD_version >= 500000 + cam_calc_geometry(&ccb->ccg, 1); +#else + { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; + } +#endif xpt_done(ccb); break; - } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; /* Not necessary to reset bus */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = MAX_VDEVICE_PER_VBUS; cpi->max_lun = 0; cpi->initiator_id = MAX_VDEVICE_PER_VBUS; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: KdPrint(("invalid cmd\n")); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } /* KdPrint(("leave hpt_action..............\n")); */ } /* shall be called at lock_driver() */ static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb) { if(*ccb_Q == NULL) ccb->ccb_h.ccb_ccb_ptr = ccb; else { ccb->ccb_h.ccb_ccb_ptr = (*ccb_Q)->ccb_h.ccb_ccb_ptr; (*ccb_Q)->ccb_h.ccb_ccb_ptr = (char *)ccb; } *ccb_Q = ccb; } /* shall be called at lock_driver() */ static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) { union ccb *TempCCB; TempCCB = *ccb_Q; if(ccb->ccb_h.ccb_ccb_ptr == ccb) /*it means SCpnt is the last one in CURRCMDs*/ *ccb_Q = NULL; else { while(TempCCB->ccb_h.ccb_ccb_ptr != (char *)ccb) TempCCB = (union ccb *)TempCCB->ccb_h.ccb_ccb_ptr; TempCCB->ccb_h.ccb_ccb_ptr = ccb->ccb_h.ccb_ccb_ptr; if(*ccb_Q == ccb) *ccb_Q = TempCCB; } } #ifdef SUPPORT_ARRAY /*************************************************************************** * Function: hpt_worker_thread * Description: Do background rebuilding. Execute in kernel thread context. * Returns: None ***************************************************************************/ static void hpt_worker_thread(void) { intrmask_t oldspl; for(;;) { while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; oldspl = lock_driver(); p = DpcQueue[DpcQueue_First]; DpcQueue_First++; DpcQueue_First %= MAX_DPC; DPC_Request_Nums++; unlock_driver(oldspl); p.dpc(p.pAdapter, p.arg, p.flags); oldspl = lock_driver(); DPC_Request_Nums--; /* since we may have prevented Check_Idle_Call, do it here */ if (DPC_Request_Nums==0) { if (p.pAdapter->outstandingCommands == 0) { _VBUS_INST(&p.pAdapter->VBus); Check_Idle_Call(p.pAdapter); CheckPendingCall(_VBUS_P0); } } unlock_driver(oldspl); /*Schedule out*/ #if (__FreeBSD_version < 500000) YIELD_THREAD; #else #if (__FreeBSD_version > 700033) pause("sched", 1); #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "sched", 1); #endif #endif if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) { /* abort rebuilding process. */ IAL_ADAPTER_T *pAdapter; PVDevice pArray; PVBus _vbus_p; int i; pAdapter = gIal_Adapter; while(pAdapter != 0){ _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp==0) continue; else if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) { pArray->u.array.rf_abort_rebuild = 1; } } pAdapter = pAdapter->next; } } } /*Remove this debug option*/ /* #ifdef DEBUG if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) #if (__FreeBSD_version > 700033) pause("hptrdy", 2*hz); #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "hptrdy", 2*hz); #endif #endif */ #if (__FreeBSD_version >= 800002) kproc_suspend_check(curproc); #elif (__FreeBSD_version >= 500043) kthread_suspend_check(curproc); #else kproc_suspend_loop(curproc); #endif #if (__FreeBSD_version > 700033) pause("hptrdy", 2*hz); /* wait for something to do */ #else tsleep((caddr_t)hpt_worker_thread, PPAUSE, "hptrdy", 2*hz); /* wait for something to do */ #endif } } static struct proc *hptdaemonproc; static struct kproc_desc hpt_kp = { "hpt_wt", hpt_worker_thread, &hptdaemonproc }; /*Start this thread in the hpt_attach, to prevent kernel from loading it without our controller.*/ static void launch_worker_thread(void) { IAL_ADAPTER_T *pAdapTemp; kproc_start(&hpt_kp); for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { _VBUS_INST(&pAdapTemp->VBus) int i; PVDevice pVDev; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pVDev->u.array.rf_need_rebuild && !pVDev->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapTemp, pVDev, (UCHAR)((pVDev->u.array.CriticalMembers || pVDev->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } } /* * hpt_worker_thread needs to be suspended after shutdown sync, when fs sync finished. */ #if (__FreeBSD_version < 500043) EVENTHANDLER_REGISTER(shutdown_post_sync, shutdown_kproc, hptdaemonproc, SHUTDOWN_PRI_FIRST); #else EVENTHANDLER_REGISTER(shutdown_post_sync, kproc_shutdown, hptdaemonproc, SHUTDOWN_PRI_FIRST); #endif } /* *SYSINIT(hptwt, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, launch_worker_thread, NULL); */ #endif /********************************************************************************/ int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { union ccb *ccb = (union ccb *)pCmd->pOrgCommand; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pSg[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr; pSg[idx].wSgSize = sgList[idx].ds_len; pSg[idx].wSgFlag = (idx==ccb->csio.sglist_cnt-1)? SG_FLAG_EOT : 0; } } else { pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; pSg->wSgSize = ccb->csio.dxfer_len; pSg->wSgFlag = SG_FLAG_EOT; } return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } /*******************************************************************************/ ULONG HPTLIBAPI GetStamp(void) { /* * the system variable, ticks, can't be used since it hasn't yet been active * when our driver starts (ticks==0, it's a invalid stamp value) */ ULONG stamp; do { stamp = random(); } while (stamp==0); return stamp; } static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev) { int i; IDENTIFY_DATA2 *pIdentify = (IDENTIFY_DATA2*)pVDev->u.disk.mv->identifyDevice; inquiryData->DeviceType = T_DIRECT; /*DIRECT_ACCESS_DEVICE*/ inquiryData->AdditionalLength = (UCHAR)(sizeof(INQUIRYDATA) - 5); #ifndef SERIAL_CMDS inquiryData->CommandQueue = 1; #endif switch(pVDev->VDeviceType) { case VD_SINGLE_DISK: case VD_ATAPI: case VD_REMOVABLE: /* Set the removable bit, if applicable. */ if ((pVDev->u.disk.df_removable_drive) || (pIdentify->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; /* Fill in vendor identification fields. */ for (i = 0; i < 20; i += 2) { inquiryData->VendorId[i] = ((PUCHAR)pIdentify->ModelNumber)[i + 1]; inquiryData->VendorId[i+1] = ((PUCHAR)pIdentify->ModelNumber)[i]; } /* Initialize unused portion of product id. */ for (i = 0; i < 4; i++) inquiryData->ProductId[12+i] = ' '; /* firmware revision */ for (i = 0; i < 4; i += 2) { inquiryData->ProductRevisionLevel[i] = ((PUCHAR)pIdentify->FirmwareRevision)[i+1]; inquiryData->ProductRevisionLevel[i+1] = ((PUCHAR)pIdentify->FirmwareRevision)[i]; } break; default: memcpy(&inquiryData->VendorId, "RR18xx ", 8); #ifdef SUPPORT_ARRAY switch(pVDev->VDeviceType){ case VD_RAID_0: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 1/0 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 0 Array ", 16); break; case VD_RAID_1: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 0/1 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 1 Array ", 16); break; case VD_RAID_5: memcpy(&inquiryData->ProductId, "RAID 5 Array ", 16); break; case VD_JBOD: memcpy(&inquiryData->ProductId, "JBOD Array ", 16); break; } #endif memcpy(&inquiryData->ProductRevisionLevel, "3.00", 4); break; } } static void hpt_timeout(void *arg) { _VBUS_INST(&((PBUS_DMAMAP)((union ccb *)arg)->ccb_adapter)->pAdapter->VBus) intrmask_t oldspl = lock_driver(); fResetVBus(_VBUS_P0); unlock_driver(oldspl); } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCommand pCmd = (PCommand)arg; union ccb *ccb = pCmd->pOrgCommand; struct ccb_hdr *ccb_h = &ccb->ccb_h; PBUS_DMAMAP pmap = (PBUS_DMAMAP) ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; FPSCAT_GATH psg = pCmd->pSgTable; int idx; _VBUS_INST(pVDev->pVBus) HPT_ASSERT(pCmd->cf_physical_sg); if (error || nsegs == 0) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); for (idx = 0; idx < nsegs; idx++, psg++) { psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; psg->wSgSize = segs[idx].ds_len; psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ } /* psg[-1].wSgFlag = SG_FLAG_EOT; */ if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz); pVDev->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); } static void HPTLIBAPI OsSendCommand(_VBUS_ARG union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; struct ccb_hdr *ccb_h = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; KdPrintI(("OsSendCommand: ccb %p cdb %x-%x-%x\n", ccb, *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[0], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[4], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[8] )); pAdapter->outstandingCommands++; if (pVDev == NULL || pVDev->vf_online == 0) { ccb->ccb_h.status = CAM_REQ_INVALID; ccb_done(ccb); goto Command_Complished; } switch(ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: /* FALLTHROUGH */ ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: ZeroMemory(ccb->csio.data_ptr, ccb->csio.dxfer_len); SetInquiryData((PINQUIRYDATA)ccb->csio.data_ptr, pVDev); ccb_h->status = CAM_REQ_CMP; break; case READ_CAPACITY: { UCHAR *rbuf=csio->data_ptr; unsigned int cap; if (pVDev->VDeviceCapacity > 0xfffffffful) { cap = 0xfffffffful; } else { cap = pVDev->VDeviceCapacity - 1; } rbuf[0] = (UCHAR)(cap>>24); rbuf[1] = (UCHAR)(cap>>16); rbuf[2] = (UCHAR)(cap>>8); rbuf[3] = (UCHAR)cap; /* Claim 512 byte blocks (big-endian). */ rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb_h->status = CAM_REQ_CMP; break; } case 0x9e: /*SERVICE_ACTION_IN*/ { UCHAR *rbuf = csio->data_ptr; LBA_T cap = pVDev->VDeviceCapacity - 1; rbuf[0] = (UCHAR)(cap>>56); rbuf[1] = (UCHAR)(cap>>48); rbuf[2] = (UCHAR)(cap>>40); rbuf[3] = (UCHAR)(cap>>32); rbuf[4] = (UCHAR)(cap>>24); rbuf[5] = (UCHAR)(cap>>16); rbuf[6] = (UCHAR)(cap>>8); rbuf[7] = (UCHAR)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb_h->status = CAM_REQ_CMP; break; } case READ_6: case WRITE_6: case READ_10: case WRITE_10: case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ case 0x13: case 0x2f: { UCHAR Cdb[16]; UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, Cdb, CdbLength); } else { KdPrintE(("ERROR!!!\n")); ccb->ccb_h.status = CAM_REQ_INVALID; break; } } else { bcopy(csio->cdb_io.cdb_bytes, Cdb, CdbLength); } pCmd->pOrgCommand = ccb; pCmd->pVDevice = pVDev; pCmd->pfnCompletion = fOsCommandDone; pCmd->pfnBuildSgl = fOsBuildSgl; pCmd->pSgTable = pmap->psg; switch (Cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((ULONG)Cdb[1] << 16) | ((ULONG)Cdb[2] << 8) | (ULONG)Cdb[3]; pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[4]; break; case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Lba = (HPT_U64)Cdb[2] << 56 | (HPT_U64)Cdb[3] << 48 | (HPT_U64)Cdb[4] << 40 | (HPT_U64)Cdb[5] << 32 | (HPT_U64)Cdb[6] << 24 | (HPT_U64)Cdb[7] << 16 | (HPT_U64)Cdb[8] << 8 | (HPT_U64)Cdb[9]; pCmd->uCmd.Ide.nSectors = (USHORT)Cdb[12] << 8 | (USHORT)Cdb[13]; break; default: pCmd->uCmd.Ide.Lba = (ULONG)Cdb[5] | ((ULONG)Cdb[4] << 8) | ((ULONG)Cdb[3] << 16) | ((ULONG)Cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[8] | ((USHORT)Cdb[7]<<8); break; } switch (Cdb[0]) { case READ_6: case READ_10: case 0x88: /* READ_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_READ; pCmd->cf_data_in = 1; break; case WRITE_6: case WRITE_10: case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_WRITE; pCmd->cf_data_out = 1; break; case 0x13: case 0x2f: pCmd->uCmd.Ide.Command = IDE_COMMAND_VERIFY; break; } /*///////////////////////// */ if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->cf_physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->pSgTable[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr; pCmd->pSgTable[idx].wSgSize = sgList[idx].ds_len; pCmd->pSgTable[idx].wSgFlag= (idx==ccb->csio.sglist_cnt-1)?SG_FLAG_EOT: 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz); pVDev->pfnSendCommand(_VBUS_P pCmd); } else { int error; pCmd->cf_physical_sg = 1; error = bus_dmamap_load(pAdapter->io_dma_parent, pmap->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d\n", error)); if (error && error!=EINPROGRESS) { hpt_printk(("bus_dmamap_load error %d\n", error)); FreeCommand(_VBUS_P pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; dmamap_put(pmap); pAdapter->outstandingCommands--; xpt_done(ccb); } } goto Command_Complished; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ccb_done(ccb); Command_Complished: CheckPendingCall(_VBUS_P0); return; } static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd) { union ccb *ccb = pCmd->pOrgCommand; PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; KdPrint(("fOsCommandDone(pcmd=%p, result=%d)\n", pCmd, pCmd->Result)); untimeout(hpt_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pAdapter->io_dma_parent, pmap->dma_map); FreeCommand(_VBUS_P pCmd); ccb_done(ccb); } int hpt_queue_dpc(HPT_DPC dpc, IAL_ADAPTER_T * pAdapter, void *arg, UCHAR flags) { int p; p = (DpcQueue_Last + 1) % MAX_DPC; if (p==DpcQueue_First) { KdPrint(("DPC Queue full!\n")); return -1; } DpcQueue[DpcQueue_Last].dpc = dpc; DpcQueue[DpcQueue_Last].pAdapter = pAdapter; DpcQueue[DpcQueue_Last].arg = arg; DpcQueue[DpcQueue_Last].flags = flags; DpcQueue_Last = p; return 0; } #ifdef _RAID5N_ /* * Allocate memory above 16M, otherwise we may eat all low memory for ISA devices. * How about the memory for 5081 request/response array and PRD table? */ void *os_alloc_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void *os_alloc_dma_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void os_free_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void os_free_dma_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void DoXor1(ULONG *p0, ULONG *p1, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ = *p1++ ^ *p2++; } void DoXor2(ULONG *p0, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ ^= *p2++; } #endif Index: head/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- head/sys/dev/hptrr/hptrr_osm_bsd.c (revision 227911) +++ head/sys/dev/hptrr/hptrr_osm_bsd.c (revision 227912) @@ -1,1408 +1,1414 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * - * $FreeBSD$ */ + +#include +__FBSDID("$FreeBSD$"); + #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 1; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static int hpt_probe(device_t dev) { PCI_ID pci_id; HIM *him; int i; PHBA hba; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (ENXIO); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); hba = (PHBA)device_get_softc(dev); memset(hba, 0, sizeof(HBA)); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; return 0; } } } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); #if __FreeBSD_version >=440000 pci_enable_busmaster(dev); #endif pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); pSg[idx].size = sgList[idx].ds_len; pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } } else { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; } return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error || nsegs == 0) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } else { int error; pCmd->flags.physical_sg = 1; error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: +#if __FreeBSD_version >= 500000 + cam_calc_geometry(&ccb->ccg, 1); +#else ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; +#endif break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { hpt_pci_intr(cam_sim_softc(sim)); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; i=503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif #if __FreeBSD_version<600034 #if __FreeBSD_version>501000 .d_maj = MAJOR_AUTO, #else .d_maj = HPT_DEV_MAJOR, #endif #endif }; static struct intr_config_hook hpt_ich; /* * hpt_final_init will be called after all hpt_attach. */ static void hpt_final_init(void *dummy) { int i; PVBUS_EXT vbus_ext; PVBUS vbus; PHBA hba; /* Clear the config hook */ config_intrhook_disestablish(&hpt_ich); /* allocate memory */ i = 0; ldm_for_each_vbus(vbus, vbus_ext) { if (hpt_alloc_mem(vbus_ext)) { os_printk("out of memory"); return; } i++; } if (!i) { if (bootverbose) os_printk("no controller detected."); return; } /* initializing hardware */ ldm_for_each_vbus(vbus, vbus_ext) { /* make timer available here */ callout_handle_init(&vbus_ext->timer); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; #if (__FreeBSD_version >= 500000) mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF); #endif if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ #if __FreeBSD_version>502000 busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ #endif &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if __FreeBSD_version > 700025 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &Giant, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, os_max_queue_comm, /*tagged*/8, devq); #endif if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } #if __FreeBSD_version > 700044 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { #else if (xpt_bus_register(vbus_ext->sim, 0) != CAM_SUCCESS) { #endif os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #if __FreeBSD_version > 700025 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) #else hpt_pci_intr, vbus_ext, &hba->irq_handle)) #endif { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) && (__FreeBSD_version >= 503000) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), - { 0, 0 } + DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); #if __FreeBSD_version>503000 typedef struct cdev * ioctl_dev_t; #else typedef dev_t ioctl_dev_t; #endif #if __FreeBSD_version >= 500000 typedef struct thread * ioctl_thread_t; #else typedef struct proc * ioctl_thread_t; #endif static int hpt_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t td) { return 0; } static int hpt_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int fflag, ioctl_thread_t td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version >= 500000) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version >= 500000) mtx_unlock(&Giant); #endif return(0); }