Index: head/sys/dev/arcmsr/arcmsr.c =================================================================== --- head/sys/dev/arcmsr/arcmsr.c (revision 295789) +++ head/sys/dev/arcmsr/arcmsr.c (revision 295790) @@ -1,4562 +1,4562 @@ /* ******************************************************************************** ** OS : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) ** SATA/SAS RAID HOST Adapter ******************************************************************************** ******************************************************************************** ** ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203 ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #else #include #include #include #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "arcmsr version 1.30.00.00 2015-11-30" #include /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer); static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void *arg); static void arcmsr_srb_timeout(void *arg); static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), #if __FreeBSD_version >= 803000 DEVMETHOD_END #else { 0, 0 } #endif }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (0); } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* disable all outbound interrupt */ intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_D: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); } break; } return (intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_D: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ARCMSR_HBDMU_ALL_INT_ENABLE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask); CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); acb->outbound_int_enable = mask; } break; } } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; do { for(Index=0; Index < 100; Index++) { if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb) { int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hbd_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_flush_hbd_cache(acb); } break; } } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim *sim; sim = (struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id = xpt_path_target_id(path); target_lun = xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } // printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb *pccb = srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(pccb->csio.sense_len) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_abort_hbd_allcmd(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb = srb->acb; union ccb *pccb = srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag == 1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < (acb->maxOutstanding -10))) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); acb->pktReturnCount++; xpt_done(pccb); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun] == ARECA_RAID_GONE) { acb->devstate[target][lun] = ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun] == ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknown error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void *arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); if(srb->srb_state == ARCMSR_SRB_START) { cmd = srb->pccb->csio.cdb_io.cdb_bytes[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->isr_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb = phbbmu->done_qbuffer[i]) != 0) { phbbmu->done_qbuffer[i] = 0; error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i] = 0; }/*drain reply FIFO*/ phbbmu->doneq_index = 0; phbbmu->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_D: { arcmsr_hbd_postqueue_isr(acb); } break; } } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb; u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb *pccb = srb->pccb; struct ccb_scsiio *pcsio = &pccb->csio; u_int32_t arccdbsize = 0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus = 0; arcmsr_cdb->TargetID = pccb->ccb_h.target_id; arcmsr_cdb->LUN = pccb->ccb_h.target_lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb = srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount = 0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; srb->srb_flags |= SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0; i < nseg; i++) { /* Get the physical address of the current data pointer */ length = arcmsr_htole32(dm_segs[i].ds_len); address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); if(address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size = 0, tmplength = length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; span4G = (u_int64_t)address_lo + tmplength; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0 = 0x100000000-address_lo; pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR; address_hi = address_hi+1; address_lo = 0; tmplength = tmplength - (u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length = tmplength | IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength = pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size = arccdbsize; arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state = ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index = phbbmu->postq_index; ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index] = 0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index] = cdb_phyaddr_low; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index = index; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int16_t index_stripped; u_int16_t postq_index; struct InBound_SRB *pinbound_srb; ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock); postq_index = phbdmu->postq_index; pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF]; pinbound_srb->addressHigh = srb->cdb_phyaddr_high; pinbound_srb->addressLow = srb->cdb_phyaddr_low; pinbound_srb->length = srb->arc_cdb_size >> 2; arcmsr_cdb->Context = srb->cdb_phyaddr_low; if (postq_index & 0x4000) { index_stripped = postq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = postq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index); ARCMSR_LOCK_RELEASE(&acb->postDone_lock); } break; } } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* let IOP know data has been read */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_D: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_D: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_stop_hbd_bgrb(acb); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim *psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->isr_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t iop_len, data_len; iop_data = (u_int32_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; if ( iop_len > 0 ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return (0); data_len = iop_len; while(data_len >= 4) { *buf2++ = *iop_data++; data_len -= 4; } if(data_len) *buf2 = *iop_data; buf2 = (u_int32_t *)buf1; } while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *buf1; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } if(buf2) free( (u_int8_t *)buf2, M_DEVBUF); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *iop_data; u_int32_t iop_len; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer)); } iop_data = (u_int8_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *iop_data; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; int my_empty_len; /*check this iop data if overflow my rqbuffer*/ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); prbuffer = arcmsr_get_iop_rqbuffer(acb); my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & (ARCMSR_MAX_QBUFFER-1); if(my_empty_len >= prbuffer->data_len) { if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } else { acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t allxfer_len = 0, data_len; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int32_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *buf1 = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } pwbuffer->data_len = allxfer_len; data_len = allxfer_len; buf1 = (u_int8_t *)buf2; while(data_len >= 4) { *iop_data++ = *buf2++; data_len -= 4; } if(data_len) *iop_data = *buf2; free( buf1, M_DEVBUF); arcmsr_iop_message_wrote(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int32_t allxfer_len=0; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { arcmsr_Write_data_2iop_wqbuffer_D(acb); return; } if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *iop_data = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) { arcmsr_Write_data_2iop_wqbuffer(acb); } if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x," "failure status=%x\n", ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_D: devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference = *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check = (1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target] = *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* clear interrupts */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) { if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */ } doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index = phbbmu->doneq_index; while((flag_srb = phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling = 0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); do { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); if (flag_srb == 0xFFFFFFFF) break; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); throttling++; if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); throttling = 0; } } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR); } /* ********************************************************************** ** ********************************************************************** */ static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu) { uint16_t doneq_index, index_stripped; doneq_index = phbdmu->doneq_index; if (doneq_index & 0x4000) { index_stripped = doneq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = doneq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } return (phbdmu->doneq_index); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int32_t outbound_write_pointer; u_int32_t addressLow; uint16_t doneq_index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) & ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0) return; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) { doneq_index = arcmsr_get_doneq_index(phbdmu); addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; } CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR); CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intStatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intStatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell); WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); if(!host_interrupt_status) { /*it must be share irq*/ return; } do { /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; u_int32_t intmask_org; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable; if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) { /*it must be share irq*/ return; } /* disable outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) { arcmsr_hbd_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hbd_postqueue_isr(acb); } /* enable all outbound interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE); // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_handle_hbd_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); } break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; case ACB_ADAPTER_TYPE_D: CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb != NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; u_int32_t retvalue = EINVAL; pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { /*copy READ QBUFFER to srb*/ pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if(wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); if(my_empty_len >= user_len) { while(user_len > 0) { /*copy srb data to wqbuffer*/ pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t *hello_string = "Hello! I am ARCMSR"; u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (retvalue); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); srb->srb_state = ARCMSR_SRB_DONE; srb->srb_flags = 0; acb->srbworkingQ[acb->workingsrb_doneindex] = srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; ARCMSR_LOCK_RELEASE(&acb->srb_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb = NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); workingsrb_doneindex = acb->workingsrb_doneindex; workingsrb_startindex = acb->workingsrb_startindex; srb = acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex != workingsrb_startindex) { acb->workingsrb_startindex = workingsrb_startindex; } else { srb = NULL; } ARCMSR_LOCK_RELEASE(&acb->srb_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; /* 4 bytes: Areca io control code */ if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } } else { /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return (retvalue); } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb; union ccb *pccb; int target, lun; pccb = srb->pccb; target = pccb->ccb_h.target_id; lun = pccb->ccb_h.target_lun; acb->pktRequestCount++; if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun] == ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; cmd = pccb->csio.cdb_io.cdb_bytes[0]; block_cmd = cmd & 0x0f; if(block_cmd == 0x08 || block_cmd == 0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount >= acb->maxOutstanding) { if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0) { xpt_freeze_simq(acb->psim, 1); acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; } pccb->ccb_h.status &= ~CAM_SIM_QUEUED; pccb->ccb_h.status |= CAM_REQUEUE_REQ; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset_sbt(&srb->ccb_callout, SBT_1MS * (pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)), 0, arcmsr_srb_timeout, srb, 0); srb->srb_flags |= SRB_FLAG_TIMER_START; } } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i = 0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { if(srb->pccb == abortccb) { srb->srb_state = ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , (uintmax_t)abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry = 0; acb->num_resets++; acb->acb_flags |= ACB_F_BUS_RESET; while(acb->srboutstandingcount != 0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb *pccb) { if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(pccb); return; } pccb->ccb_h.status |= CAM_REQ_CMP; switch (pccb->csio.cdb_io.cdb_bytes[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer = pccb->csio.data_ptr; inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) { struct AdapterControlBlock *acb; acb = (struct AdapterControlBlock *) cam_sim_softc(psim); if(acb == NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target = pccb->ccb_h.target_id; int error; if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb = arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; pccb->ccb_h.arcmsr_ccbacb_ptr = acb; srb->pccb = pccb; error = bus_dmamap_load_ccb(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } case XPT_TARGET_IO: { /* target mode not yet support vendor specific commands. */ pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id = cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G) cpi->base_transfer_speed = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cpi->base_transfer_speed = 600000; else cpi->base_transfer_speed = 300000; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } cpi->protocol = PROTO_SCSI; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb = pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts = &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_sas *sas; scsi = &cts->proto_specific.scsi; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; scsi->valid = CTS_SCSI_VALID_TQ; cts->protocol = PROTO_SCSI; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cts->protocol_version = SCSI_REV_SPC2; cts->transport_version = 0; cts->transport = XPORT_SAS; sas = &cts->xport_specific.sas; sas->valid = CTS_SAS_VALID_SPEED; if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G) sas->bitrate = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) sas->bitrate = 600000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G) sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport_version = 2; cts->transport = XPORT_SPI; spi = &cts->xport_specific.spi; spi->flags = CTS_SPI_FLAGS_DISC_ENB; if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) spi->sync_period = 1; else spi->sync_period = 2; spi->sync_offset = 32; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; } } #else { cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cts->sync_period = 1; else cts->sync_period = 2; cts->sync_offset = 32; cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } #if __FreeBSD_version >= 500000 cam_calc_geometry(&pccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; } #endif xpt_done(pccb); break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags |= ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_start_hbd_bgrb(acb); break; } } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) == 0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index = phbbmu->doneq_index; if((flag_srb = phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int32_t outbound_write_pointer; u_int16_t error, doneq_index; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } doneq_index = arcmsr_get_doneq_index(phbdmu); flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_polling_hbd_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_get_hbd_config(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_D: { while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */ CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */ } break; case ACB_ADAPTER_TYPE_D: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; u_int32_t srb_phyaddr_lo32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr; srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index = 0; phbbmu->doneq_index = 0; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; case ACB_ADAPTER_TYPE_D: { u_int32_t post_queue_phyaddr, done_queue_phyaddr; struct HBD_MessageUnit0 *phbdmu; phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->postq_index = 0; phbdmu->doneq_index = 0x40FF; post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, post_qbuffer); done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, done_qbuffer); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } break; } return (TRUE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb = arg; struct CommandControlBlock *srb_tmp; u_int32_t i; unsigned long srb_phyaddr = (unsigned long)segs->ds_addr; acb->srb_phyaddr.phyaddr = srb_phyaddr; srb_tmp = (struct CommandControlBlock *)acb->uncacheptr; for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D)) { srb_tmp->cdb_phyaddr_low = srb_phyaddr; srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16); } else srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5; srb_tmp->acb = acb; acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp; srb_phyaddr = srb_phyaddr + SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE); } acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_init(struct AdapterControlBlock *acb) { ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock"); ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock"); ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock"); ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock"); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb) { ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->postDone_lock); ARCMSR_LOCK_DESTROY(&acb->srb_lock); ARCMSR_LOCK_DESTROY(&acb->isr_lock); } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; u_int32_t vendor_dev_id; vendor_dev_id = pci_get_devid(dev); acb->vendor_device_id = vendor_dev_id; acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch (vendor_dev_id) { case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: { acb->adapter_type = ACB_ADAPTER_TYPE_C; if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883) acb->adapter_bus_speed = ACB_BUS_SPEED_12G; else acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1214: { acb->adapter_type = ACB_ADAPTER_TYPE_D; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0)); } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1203: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type = ACB_ADAPTER_TYPE_A; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } #if __FreeBSD_version >= 700000 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), #else if(bus_dma_tag_create( /*PCI parent*/ NULL, #endif /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->isr_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster */ pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; u_long size; if (vendor_dev_id == PCIDevVenIDARC1203) size = sizeof(struct HBB_DOORBELL_1203); else size = sizeof(struct HBB_DOORBELL); for(i=0; i < 2; i++) { if(i == 0) { acb->sys_res_arcmsr[i] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, size, RF_ACTIVE); } else { acb->sys_res_arcmsr[i] = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i] == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb = (struct CommandControlBlock *)acb->uncacheptr; acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1]; if (vendor_dev_id == PCIDevVenIDARC1203) { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask); } else { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask); } } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0 = PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu; u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBD_MessageUnit), RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE); phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0; i < ARCMSR_MAX_TARGETID; i++) { for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) { acb->devstate[i][j] = ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } arcmsr_mutex_init(acb); acb->pci_dev = dev; acb->pci_unit = unit; if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); arcmsr_mutex_destroy(acb); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid = 0; - irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); + irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); arcmsr_mutex_destroy(acb); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres = irqres; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq = cam_simq_alloc(acb->maxOutstanding); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = arcmsr_async; csa.callback_arg = acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->isr_lock); /* Create the control device. */ acb->ioctl_dev = make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1 = acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; u_int16_t sub_device_id; static char buf[256]; char x_type[]={"unknown"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch(id = pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA 3G"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: if (sub_device_id == ARECA_SUB_DEV_ID_1883) type = "SAS 12G"; else type = "SAS 6G"; break; case PCIDevVenIDARC1214: case PCIDevVenIDARC1203: type = "SATA 6G"; break; default: type = x_type; raid6 = 0; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n", type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount != 0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; ARCMSR_LOCK_RELEASE(&acb->isr_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->isr_lock); arcmsr_mutex_destroy(acb); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: head/sys/dev/cy/cy_isa.c =================================================================== --- head/sys/dev/cy/cy_isa.c (revision 295789) +++ head/sys/dev/cy/cy_isa.c (revision 295790) @@ -1,150 +1,150 @@ /*- * cyclades cyclom-y serial driver * Andrew Herbert , 17 August 1993 * * Copyright (c) 1993 Andrew Herbert. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Andrew Herbert may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL I BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Cyclades Y ISA serial interface driver */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include static int cy_isa_attach(device_t dev); static int cy_isa_probe(device_t dev); static device_method_t cy_isa_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, cy_isa_probe), DEVMETHOD(device_attach, cy_isa_attach), { 0, 0 } }; static driver_t cy_isa_driver = { cy_driver_name, cy_isa_methods, 0, }; DRIVER_MODULE(cy, isa, cy_isa_driver, cy_devclass, 0, 0); static int cy_isa_probe(device_t dev) { struct resource *mem_res; cy_addr iobase; int error, mem_rid; if (isa_get_logicalid(dev) != 0) /* skip PnP probes */ return (ENXIO); mem_rid = 0; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); return (ENXIO); } iobase = rman_get_virtual(mem_res); /* Cyclom-16Y hardware reset (Cyclom-8Ys don't care) */ cy_inb(iobase, CY16_RESET, 0); /* XXX? */ DELAY(500); /* wait for the board to get its act together */ /* this is needed to get the board out of reset */ cy_outb(iobase, CY_CLEAR_INTR, 0, 0); DELAY(500); error = (cy_units(iobase, 0) == 0 ? ENXIO : 0); bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (error); } static int cy_isa_attach(device_t dev) { struct resource *irq_res, *mem_res; void *irq_cookie, *vaddr, *vsc; int irq_rid, mem_rid; irq_res = NULL; mem_res = NULL; mem_rid = 0; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; } vaddr = rman_get_virtual(mem_res); vsc = cyattach_common(vaddr, 0); if (vsc == NULL) { device_printf(dev, "no ports found!\n"); goto fail; } irq_rid = 0; - irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, + irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); goto fail; } if (bus_setup_intr(dev, irq_res, INTR_TYPE_TTY, cyintr, NULL, vsc, &irq_cookie) != 0) { device_printf(dev, "interrupt setup failed\n"); goto fail; } return (0); fail: if (irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); if (mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (ENXIO); } Index: head/sys/dev/cy/cy_pci.c =================================================================== --- head/sys/dev/cy/cy_pci.c (revision 295789) +++ head/sys/dev/cy/cy_pci.c (revision 295790) @@ -1,192 +1,192 @@ /*- * Copyright (c) 1996, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Cyclades Y PCI serial interface driver */ #include __FBSDID("$FreeBSD$"); #include "opt_cy_pci_fastintr.h" #include #include #include #include #include #include #include #include #include #define CY_PCI_BASE_ADDR0 0x10 #define CY_PCI_BASE_ADDR1 0x14 #define CY_PCI_BASE_ADDR2 0x18 #define CY_PLX_9050_ICS 0x4c #define CY_PLX_9060_ICS 0x68 #define CY_PLX_9050_ICS_IENABLE 0x040 #define CY_PLX_9050_ICS_LOCAL_IENABLE 0x001 #define CY_PLX_9050_ICS_LOCAL_IPOLARITY 0x002 #define CY_PLX_9060_ICS_IENABLE 0x100 #define CY_PLX_9060_ICS_LOCAL_IENABLE 0x800 /* Cyclom-Y Custom Register for PLX ID. */ #define PLX_VER 0x3400 #define PLX_9050 0x0b #define PLX_9060 0x0c #define PLX_9080 0x0d static int cy_pci_attach(device_t dev); static int cy_pci_probe(device_t dev); static device_method_t cy_pci_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, cy_pci_probe), DEVMETHOD(device_attach, cy_pci_attach), { 0, 0 } }; static driver_t cy_pci_driver = { cy_driver_name, cy_pci_methods, 0, }; DRIVER_MODULE(cy, pci, cy_pci_driver, cy_devclass, 0, 0); MODULE_DEPEND(cy, pci, 1, 1, 1); static int cy_pci_probe(dev) device_t dev; { u_int32_t device_id; device_id = pci_get_devid(dev); device_id &= ~0x00060000; if (device_id != 0x0100120e && device_id != 0x0101120e) return (ENXIO); device_set_desc(dev, "Cyclades Cyclom-Y Serial Adapter"); return (BUS_PROBE_DEFAULT); } static int cy_pci_attach(dev) device_t dev; { struct resource *ioport_res, *irq_res, *mem_res; void *irq_cookie, *vaddr, *vsc; u_int32_t ioport; int irq_setup, ioport_rid, irq_rid, mem_rid; u_char plx_ver; ioport_res = NULL; irq_res = NULL; mem_res = NULL; ioport_rid = CY_PCI_BASE_ADDR1; - ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + ioport_res = bus_alloc_resource_(dev, SYS_RES_IOPORT, &ioport_rid, + RF_ACTIVE); if (ioport_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); goto fail; } ioport = rman_get_start(ioport_res); mem_rid = CY_PCI_BASE_ADDR2; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; } vaddr = rman_get_virtual(mem_res); vsc = cyattach_common(vaddr, 1); if (vsc == NULL) { device_printf(dev, "no ports found!\n"); goto fail; } irq_rid = 0; - irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, + irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); goto fail; } #ifdef CY_PCI_FASTINTR irq_setup = bus_setup_intr(dev, irq_res, INTR_TYPE_TTY, cyintr, NULL, vsc, &irq_cookie); #else irq_setup = ENXIO; #endif if (irq_setup != 0) irq_setup = bus_setup_intr(dev, irq_res, INTR_TYPE_TTY, NULL, (driver_intr_t *)cyintr, vsc, &irq_cookie); if (irq_setup != 0) { device_printf(dev, "interrupt setup failed\n"); goto fail; } /* * Enable the "local" interrupt input to generate a * PCI interrupt. */ plx_ver = *((u_char *)vaddr + PLX_VER) & 0x0f; switch (plx_ver) { case PLX_9050: outw(ioport + CY_PLX_9050_ICS, CY_PLX_9050_ICS_IENABLE | CY_PLX_9050_ICS_LOCAL_IENABLE | CY_PLX_9050_ICS_LOCAL_IPOLARITY); break; case PLX_9060: case PLX_9080: default: /* Old board, use PLX_9060 values. */ outw(ioport + CY_PLX_9060_ICS, inw(ioport + CY_PLX_9060_ICS) | CY_PLX_9060_ICS_IENABLE | CY_PLX_9060_ICS_LOCAL_IENABLE); break; } return (0); fail: if (ioport_res != NULL) bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res); if (irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); if (mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, mem_rid, mem_res); return (ENXIO); } Index: head/sys/dev/ed/if_ed_pccard.c =================================================================== --- head/sys/dev/ed/if_ed_pccard.c (revision 295789) +++ head/sys/dev/ed/if_ed_pccard.c (revision 295790) @@ -1,1251 +1,1251 @@ /*- * Copyright (c) 2005, M. Warner Losh * Copyright (c) 1995, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Notes for adding media support. Each chipset is somewhat different * from the others. Linux has a table of OIDs that it uses to see what * supports the misc register of the NS83903. But a sampling of datasheets * I could dig up on cards I own paints a different picture. * * Chipset specific details: * NS 83903/902A paired * ccr base 0x1020 * id register at 0x1000: 7-3 = 0, 2-0 = 1. * (maybe this test is too week) * misc register at 0x018: * 6 WAIT_TOUTENABLE enable watchdog timeout * 3 AUI/TPI 1 AUX, 0 TPI * 2 loopback * 1 gdlink (tpi mode only) 1 tp good, 0 tp bad * 0 0-no mam, 1 mam connected * * NS83926 appears to be a NS pcmcia glue chip used on the IBM Ethernet II * and the NEC PC9801N-J12 ccr base 0x2000! * * winbond 289c926 * ccr base 0xfd0 * cfb (am 0xff2): * 0-1 PHY01 00 TPI, 01 10B2, 10 10B5, 11 TPI (reduced squ) * 2 LNKEN 0 - enable link and auto switch, 1 disable * 3 LNKSTS TPI + LNKEN=0 + link good == 1, else 0 * sr (am 0xff4) * 88 00 88 00 88 00, etc * * TMI tc3299a (cr PHY01 == 0) * ccr base 0x3f8 * cra (io 0xa) * crb (io 0xb) * 0-1 PHY01 00 auto, 01 res, 10 10B5, 11 TPI * 2 GDLINK 1 disable checking of link * 6 LINK 0 bad link, 1 good link * * EN5017A, EN5020 no data, but very popular * Other chips? * NetBSD supports RTL8019, but none have surfaced that I can see */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include "pccarddevs.h" /* * NE-2000 based PC Cards have a number of ways to get the MAC address. * Some cards encode this as a FUNCE. Others have this in the ROMs the * same way that ISA cards do. Some have it encoded in the attribute * memory somewhere that isn't in the CIS. Some new chipsets have it * in special registers in the ASIC part of the chip. * * For those cards that have the MAC adress stored in attribute memory * outside of a FUNCE entry in the CIS, nearly all of them have it at * a fixed offset (0xff0). We use that offset as a source of last * resource if other offsets have failed. This is the address of the * National Semiconductor DP83903A, which is the only chip's datasheet * I've found. */ #define ED_DEFAULT_MAC_OFFSET 0xff0 static const struct ed_product { struct pccard_product prod; int flags; #define NE2000DVF_DL100XX 0x0001 /* chip is D-Link DL10019/22 */ #define NE2000DVF_AX88X90 0x0002 /* chip is ASIX AX88[17]90 */ #define NE2000DVF_TC5299J 0x0004 /* chip is Tamarack TC5299J */ #define NE2000DVF_TOSHIBA 0x0008 /* Toshiba DP83902A */ #define NE2000DVF_ENADDR 0x0100 /* Get MAC from attr mem */ #define NE2000DVF_ANYFUNC 0x0200 /* Allow any function type */ #define NE2000DVF_MODEM 0x0400 /* Has a modem/serial */ int enoff; } ed_pccard_products[] = { { PCMCIA_CARD(ACCTON, EN2212), 0}, { PCMCIA_CARD(ACCTON, EN2216), 0}, { PCMCIA_CARD(ALLIEDTELESIS, LA_PCM), 0}, { PCMCIA_CARD(AMBICOM, AMB8002), 0}, { PCMCIA_CARD(AMBICOM, AMB8002T), 0}, { PCMCIA_CARD(AMBICOM, AMB8010), 0}, { PCMCIA_CARD(AMBICOM, AMB8010_ALT), 0}, { PCMCIA_CARD(AMBICOM, AMB8610), 0}, { PCMCIA_CARD(BILLIONTON, CFLT10N), 0}, { PCMCIA_CARD(BILLIONTON, LNA100B), NE2000DVF_AX88X90}, { PCMCIA_CARD(BILLIONTON, LNT10TB), 0}, { PCMCIA_CARD(BILLIONTON, LNT10TN), 0}, { PCMCIA_CARD(BROMAX, AXNET), NE2000DVF_AX88X90}, { PCMCIA_CARD(BROMAX, IPORT), 0}, { PCMCIA_CARD(BROMAX, IPORT2), 0}, { PCMCIA_CARD(BUFFALO, LPC2_CLT), 0}, { PCMCIA_CARD(BUFFALO, LPC3_CLT), 0}, { PCMCIA_CARD(BUFFALO, LPC3_CLX), NE2000DVF_AX88X90}, { PCMCIA_CARD(BUFFALO, LPC4_TX), NE2000DVF_AX88X90}, { PCMCIA_CARD(BUFFALO, LPC4_CLX), NE2000DVF_AX88X90}, { PCMCIA_CARD(BUFFALO, LPC_CF_CLT), 0}, { PCMCIA_CARD(CNET, NE2000), 0}, { PCMCIA_CARD(COMPEX, AX88190), NE2000DVF_AX88X90}, { PCMCIA_CARD(COMPEX, LANMODEM), 0}, { PCMCIA_CARD(COMPEX, LINKPORT_ENET_B), 0}, { PCMCIA_CARD(COREGA, ETHER_II_PCC_T), 0}, { PCMCIA_CARD(COREGA, ETHER_II_PCC_TD), 0}, { PCMCIA_CARD(COREGA, ETHER_PCC_T), 0}, { PCMCIA_CARD(COREGA, ETHER_PCC_TD), 0}, { PCMCIA_CARD(COREGA, FAST_ETHER_PCC_TX), NE2000DVF_DL100XX}, { PCMCIA_CARD(COREGA, FETHER_PCC_TXD), NE2000DVF_AX88X90}, { PCMCIA_CARD(COREGA, FETHER_PCC_TXF), NE2000DVF_DL100XX}, { PCMCIA_CARD(COREGA, FETHER_II_PCC_TXD), NE2000DVF_AX88X90}, { PCMCIA_CARD(COREGA, LAPCCTXD), 0}, { PCMCIA_CARD(DAYNA, COMMUNICARD_E_1), 0}, { PCMCIA_CARD(DAYNA, COMMUNICARD_E_2), 0}, { PCMCIA_CARD(DLINK, DE650), NE2000DVF_ANYFUNC }, { PCMCIA_CARD(DLINK, DE660), 0 }, { PCMCIA_CARD(DLINK, DE660PLUS), 0}, { PCMCIA_CARD(DYNALINK, L10C), 0}, { PCMCIA_CARD(EDIMAX, EP4000A), 0}, { PCMCIA_CARD(EPSON, EEN10B), 0}, { PCMCIA_CARD(EXP, THINLANCOMBO), 0}, { PCMCIA_CARD(GLOBALVILLAGE, LANMODEM), 0}, { PCMCIA_CARD(GREY_CELL, TDK3000), 0}, { PCMCIA_CARD(GREY_CELL, DMF650TX), NE2000DVF_ANYFUNC | NE2000DVF_DL100XX | NE2000DVF_MODEM}, { PCMCIA_CARD(GVC, NIC_2000P), 0}, { PCMCIA_CARD(IBM, HOME_AND_AWAY), 0}, { PCMCIA_CARD(IBM, INFOMOVER), 0}, { PCMCIA_CARD(IODATA3, PCLAT), 0}, { PCMCIA_CARD(KINGSTON, CIO10T), 0}, { PCMCIA_CARD(KINGSTON, KNE2), 0}, { PCMCIA_CARD(LANTECH, FASTNETTX), NE2000DVF_AX88X90}, /* Same ID for many different cards, including generic NE2000 */ { PCMCIA_CARD(LINKSYS, COMBO_ECARD), NE2000DVF_DL100XX | NE2000DVF_AX88X90}, { PCMCIA_CARD(LINKSYS, ECARD_1), 0}, { PCMCIA_CARD(LINKSYS, ECARD_2), 0}, { PCMCIA_CARD(LINKSYS, ETHERFAST), NE2000DVF_DL100XX}, { PCMCIA_CARD(LINKSYS, TRUST_COMBO_ECARD), 0}, { PCMCIA_CARD(MACNICA, ME1_JEIDA), 0}, { PCMCIA_CARD(MAGICRAM, ETHER), 0}, { PCMCIA_CARD(MELCO, LPC3_CLX), NE2000DVF_AX88X90}, { PCMCIA_CARD(MELCO, LPC3_TX), NE2000DVF_AX88X90}, { PCMCIA_CARD(MELCO2, LPC2_T), 0}, { PCMCIA_CARD(MELCO2, LPC2_TX), 0}, { PCMCIA_CARD(MITSUBISHI, B8895), NE2000DVF_ANYFUNC}, /* NG */ { PCMCIA_CARD(MICRORESEARCH, MR10TPC), 0}, { PCMCIA_CARD(NDC, ND5100_E), 0}, { PCMCIA_CARD(NETGEAR, FA410TXC), NE2000DVF_DL100XX}, /* Same ID as DLINK DFE-670TXD. 670 has DL10022, fa411 has ax88790 */ { PCMCIA_CARD(NETGEAR, FA411), NE2000DVF_AX88X90 | NE2000DVF_DL100XX}, { PCMCIA_CARD(NEXTCOM, NEXTHAWK), 0}, { PCMCIA_CARD(NEWMEDIA, LANSURFER), NE2000DVF_ANYFUNC}, { PCMCIA_CARD(NEWMEDIA, LIVEWIRE), 0}, { PCMCIA_CARD(OEM2, 100BASE), NE2000DVF_AX88X90}, { PCMCIA_CARD(OEM2, ETHERNET), 0}, { PCMCIA_CARD(OEM2, FAST_ETHERNET), NE2000DVF_AX88X90}, { PCMCIA_CARD(OEM2, NE2000), 0}, { PCMCIA_CARD(PLANET, SMARTCOM2000), 0 }, { PCMCIA_CARD(PREMAX, PE200), 0}, { PCMCIA_CARD(PSION, LANGLOBAL), NE2000DVF_ANYFUNC | NE2000DVF_AX88X90 | NE2000DVF_MODEM}, { PCMCIA_CARD(RACORE, ETHERNET), 0}, { PCMCIA_CARD(RACORE, FASTENET), NE2000DVF_AX88X90}, { PCMCIA_CARD(RACORE, 8041TX), NE2000DVF_AX88X90 | NE2000DVF_TC5299J}, { PCMCIA_CARD(RELIA, COMBO), 0}, { PCMCIA_CARD(RIOS, PCCARD3), 0}, { PCMCIA_CARD(RPTI, EP400), 0}, { PCMCIA_CARD(RPTI, EP401), 0}, { PCMCIA_CARD(SMC, EZCARD), 0}, { PCMCIA_CARD(SOCKET, EA_ETHER), 0}, { PCMCIA_CARD(SOCKET, ES_1000), 0}, { PCMCIA_CARD(SOCKET, LP_ETHER), 0}, { PCMCIA_CARD(SOCKET, LP_ETHER_CF), 0}, { PCMCIA_CARD(SOCKET, LP_ETH_10_100_CF), NE2000DVF_DL100XX}, { PCMCIA_CARD(SVEC, COMBOCARD), 0}, { PCMCIA_CARD(SVEC, LANCARD), 0}, { PCMCIA_CARD(TAMARACK, ETHERNET), 0}, { PCMCIA_CARD(TDK, CFE_10), 0}, { PCMCIA_CARD(TDK, LAK_CD031), 0}, { PCMCIA_CARD(TDK, DFL5610WS), 0}, { PCMCIA_CARD(TELECOMDEVICE, LM5LT), 0 }, { PCMCIA_CARD(TELECOMDEVICE, TCD_HPC100), NE2000DVF_AX88X90}, { PCMCIA_CARD(TJ, PTJ_LAN_T), 0 }, { PCMCIA_CARD(TOSHIBA2, LANCT00A), NE2000DVF_ANYFUNC | NE2000DVF_TOSHIBA}, { PCMCIA_CARD(ZONET, ZEN), 0}, { { NULL } } }; /* * MII bit-bang glue */ static uint32_t ed_pccard_dl100xx_mii_bitbang_read(device_t dev); static void ed_pccard_dl100xx_mii_bitbang_write(device_t dev, uint32_t val); static const struct mii_bitbang_ops ed_pccard_dl100xx_mii_bitbang_ops = { ed_pccard_dl100xx_mii_bitbang_read, ed_pccard_dl100xx_mii_bitbang_write, { ED_DL100XX_MII_DATAOUT, /* MII_BIT_MDO */ ED_DL100XX_MII_DATAIN, /* MII_BIT_MDI */ ED_DL100XX_MII_CLK, /* MII_BIT_MDC */ ED_DL100XX_MII_DIROUT, /* MII_BIT_DIR_HOST_PHY */ 0 /* MII_BIT_DIR_PHY_HOST */ } }; static uint32_t ed_pccard_ax88x90_mii_bitbang_read(device_t dev); static void ed_pccard_ax88x90_mii_bitbang_write(device_t dev, uint32_t val); static const struct mii_bitbang_ops ed_pccard_ax88x90_mii_bitbang_ops = { ed_pccard_ax88x90_mii_bitbang_read, ed_pccard_ax88x90_mii_bitbang_write, { ED_AX88X90_MII_DATAOUT, /* MII_BIT_MDO */ ED_AX88X90_MII_DATAIN, /* MII_BIT_MDI */ ED_AX88X90_MII_CLK, /* MII_BIT_MDC */ 0, /* MII_BIT_DIR_HOST_PHY */ ED_AX88X90_MII_DIRIN /* MII_BIT_DIR_PHY_HOST */ } }; static uint32_t ed_pccard_tc5299j_mii_bitbang_read(device_t dev); static void ed_pccard_tc5299j_mii_bitbang_write(device_t dev, uint32_t val); static const struct mii_bitbang_ops ed_pccard_tc5299j_mii_bitbang_ops = { ed_pccard_tc5299j_mii_bitbang_read, ed_pccard_tc5299j_mii_bitbang_write, { ED_TC5299J_MII_DATAOUT, /* MII_BIT_MDO */ ED_TC5299J_MII_DATAIN, /* MII_BIT_MDI */ ED_TC5299J_MII_CLK, /* MII_BIT_MDC */ 0, /* MII_BIT_DIR_HOST_PHY */ ED_AX88X90_MII_DIRIN /* MII_BIT_DIR_PHY_HOST */ } }; /* * PC Card (PCMCIA) specific code. */ static int ed_pccard_probe(device_t); static int ed_pccard_attach(device_t); static void ed_pccard_tick(struct ed_softc *); static int ed_pccard_dl100xx(device_t dev, const struct ed_product *); static void ed_pccard_dl100xx_mii_reset(struct ed_softc *sc); static int ed_pccard_ax88x90(device_t dev, const struct ed_product *); static int ed_miibus_readreg(device_t dev, int phy, int reg); static int ed_ifmedia_upd(struct ifnet *); static void ed_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int ed_pccard_tc5299j(device_t dev, const struct ed_product *); static void ed_pccard_print_entry(const struct ed_product *pp) { int i; printf("Product entry: "); if (pp->prod.pp_name) printf("name='%s',", pp->prod.pp_name); printf("vendor=%#x,product=%#x", pp->prod.pp_vendor, pp->prod.pp_product); for (i = 0; i < 4; i++) if (pp->prod.pp_cis[i]) printf(",CIS%d='%s'", i, pp->prod.pp_cis[i]); printf("\n"); } static int ed_pccard_probe(device_t dev) { const struct ed_product *pp, *pp2; int error, first = 1; uint32_t fcn = PCCARD_FUNCTION_UNSPEC; /* Make sure we're a network function */ error = pccard_get_function(dev, &fcn); if (error != 0) return (error); if ((pp = (const struct ed_product *) pccard_product_lookup(dev, (const struct pccard_product *) ed_pccard_products, sizeof(ed_pccard_products[0]), NULL)) != NULL) { if (pp->prod.pp_name != NULL) device_set_desc(dev, pp->prod.pp_name); /* * Some devices don't ID themselves as network, but * that's OK if the flags say so. */ if (!(pp->flags & NE2000DVF_ANYFUNC) && fcn != PCCARD_FUNCTION_NETWORK) return (ENXIO); /* * Some devices match multiple entries. Report that * as a warning to help cull the table */ pp2 = pp; while ((pp2 = (const struct ed_product *)pccard_product_lookup( dev, (const struct pccard_product *)(pp2 + 1), sizeof(ed_pccard_products[0]), NULL)) != NULL) { if (first) { device_printf(dev, "Warning: card matches multiple entries. Report to imp@freebsd.org\n"); ed_pccard_print_entry(pp); first = 0; } ed_pccard_print_entry(pp2); } return (0); } return (ENXIO); } static int ed_pccard_rom_mac(device_t dev, uint8_t *enaddr) { struct ed_softc *sc = device_get_softc(dev); uint8_t romdata[32], sum; int i; /* * Read in the rom data at location 0. Since there are no * NE-1000 based PC Card devices, we'll assume we're 16-bit. * * In researching what format this takes, I've found that the * following appears to be true for multiple cards based on * observation as well as datasheet digging. * * Data is stored in some ROM and is copied out 8 bits at a time * into 16-bit wide locations. This means that the odd locations * of the ROM are not used (and can be either 0 or ff). * * The contents appears to be as follows: * PROM RAM * Offset Offset What * 0 0 ENETADDR 0 * 1 2 ENETADDR 1 * 2 4 ENETADDR 2 * 3 6 ENETADDR 3 * 4 8 ENETADDR 4 * 5 10 ENETADDR 5 * 6-13 12-26 Reserved (varies by manufacturer) * 14 28 0x57 * 15 30 0x57 * * Some manufacturers have another image of enetaddr from * PROM offset 0x10 to 0x15 with 0x42 in 0x1e and 0x1f, but * this doesn't appear to be universally documented in the * datasheets. Some manufactuers have a card type, card config * checksums, etc encoded into PROM offset 6-13, but deciphering it * requires more knowledge about the exact underlying chipset than * we possess (and maybe can possess). */ ed_pio_readmem(sc, 0, romdata, 32); if (bootverbose) device_printf(dev, "ROM DATA: %32D\n", romdata, " "); if (romdata[28] != 0x57 || romdata[30] != 0x57) return (0); for (i = 0, sum = 0; i < ETHER_ADDR_LEN; i++) sum |= romdata[i * 2]; if (sum == 0) return (0); for (i = 0; i < ETHER_ADDR_LEN; i++) enaddr[i] = romdata[i * 2]; return (1); } static int ed_pccard_add_modem(device_t dev) { device_printf(dev, "Need to write this code\n"); return 0; } static int ed_pccard_kick_phy(struct ed_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; mii = device_get_softc(sc->miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); return (mii_mediachg(mii)); } static int ed_pccard_media_ioctl(struct ed_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->miibus == NULL) return (EINVAL); mii = device_get_softc(sc->miibus); return (ifmedia_ioctl(sc->ifp, ifr, &mii->mii_media, command)); } static void ed_pccard_mediachg(struct ed_softc *sc) { struct mii_data *mii; if (sc->miibus == NULL) return; mii = device_get_softc(sc->miibus); mii_mediachg(mii); } static int ed_pccard_attach(device_t dev) { u_char sum; u_char enaddr[ETHER_ADDR_LEN]; const struct ed_product *pp; int error, i, flags, port_rid, modem_rid; struct ed_softc *sc = device_get_softc(dev); u_long size; static uint16_t *intr_vals[] = {NULL, NULL}; sc->dev = dev; if ((pp = (const struct ed_product *) pccard_product_lookup(dev, (const struct pccard_product *) ed_pccard_products, sizeof(ed_pccard_products[0]), NULL)) == NULL) { printf("Can't find\n"); return (ENXIO); } modem_rid = port_rid = -1; if (pp->flags & NE2000DVF_MODEM) { for (i = 0; i < 4; i++) { size = bus_get_resource_count(dev, SYS_RES_IOPORT, i); if (size == ED_NOVELL_IO_PORTS) port_rid = i; else if (size == 8) modem_rid = i; } if (port_rid == -1) { device_printf(dev, "Cannot locate my ports!\n"); return (ENXIO); } } else { port_rid = 0; } /* Allocate the port resource during setup. */ error = ed_alloc_port(dev, port_rid, ED_NOVELL_IO_PORTS); if (error) { printf("alloc_port failed\n"); return (error); } if (rman_get_size(sc->port_res) == ED_NOVELL_IO_PORTS / 2) { port_rid++; - sc->port_res2 = bus_alloc_resource(dev, SYS_RES_IOPORT, - &port_rid, 0ul, ~0ul, 1, RF_ACTIVE); + sc->port_res2 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, + &port_rid, RF_ACTIVE); if (sc->port_res2 == NULL || rman_get_size(sc->port_res2) != ED_NOVELL_IO_PORTS / 2) { error = ENXIO; goto bad; } } error = ed_alloc_irq(dev, 0, 0); if (error) goto bad; /* * Determine which chipset we are. Almost all the PC Card chipsets * have the Novel ASIC and NIC offsets. There's 2 known cards that * follow the WD80x3 conventions, which are handled as a special case. */ sc->asic_offset = ED_NOVELL_ASIC_OFFSET; sc->nic_offset = ED_NOVELL_NIC_OFFSET; error = ENXIO; flags = device_get_flags(dev); if (error != 0) error = ed_pccard_dl100xx(dev, pp); if (error != 0) error = ed_pccard_ax88x90(dev, pp); if (error != 0) error = ed_pccard_tc5299j(dev, pp); if (error != 0) { error = ed_probe_Novell_generic(dev, flags); printf("Novell generic probe failed: %d\n", error); } if (error != 0 && (pp->flags & NE2000DVF_TOSHIBA)) { flags |= ED_FLAGS_TOSH_ETHER; flags |= ED_FLAGS_PCCARD; sc->asic_offset = ED_WD_ASIC_OFFSET; sc->nic_offset = ED_WD_NIC_OFFSET; error = ed_probe_WD80x3_generic(dev, flags, intr_vals); } if (error) goto bad; /* * There are several ways to get the MAC address for the card. * Some of the above probe routines can fill in the enaddr. If * not, we run through a number of 'well known' locations: * (1) From the PC Card FUNCE * (2) From offset 0 in the shared memory * (3) From a hinted offset in attribute memory * (4) From 0xff0 in attribute memory * If we can't get a non-zero MAC address from this list, we fail. */ for (i = 0, sum = 0; i < ETHER_ADDR_LEN; i++) sum |= sc->enaddr[i]; if (sum == 0) { pccard_get_ether(dev, enaddr); if (bootverbose) device_printf(dev, "CIS MAC %6D\n", enaddr, ":"); for (i = 0, sum = 0; i < ETHER_ADDR_LEN; i++) sum |= enaddr[i]; if (sum == 0 && ed_pccard_rom_mac(dev, enaddr)) { if (bootverbose) device_printf(dev, "ROM mac %6D\n", enaddr, ":"); sum++; } if (sum == 0 && pp->flags & NE2000DVF_ENADDR) { for (i = 0; i < ETHER_ADDR_LEN; i++) { pccard_attr_read_1(dev, pp->enoff + i * 2, enaddr + i); sum |= enaddr[i]; } if (bootverbose) device_printf(dev, "Hint %x MAC %6D\n", pp->enoff, enaddr, ":"); } if (sum == 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) { pccard_attr_read_1(dev, ED_DEFAULT_MAC_OFFSET + i * 2, enaddr + i); sum |= enaddr[i]; } if (bootverbose) device_printf(dev, "Fallback MAC %6D\n", enaddr, ":"); } if (sum == 0) { device_printf(dev, "Cannot extract MAC address.\n"); ed_release_resources(dev); return (ENXIO); } bcopy(enaddr, sc->enaddr, ETHER_ADDR_LEN); } error = ed_attach(dev); if (error) goto bad; if (sc->chip_type == ED_CHIP_TYPE_DL10019 || sc->chip_type == ED_CHIP_TYPE_DL10022) { /* Try to attach an MII bus, but ignore errors. */ ed_pccard_dl100xx_mii_reset(sc); (void)mii_attach(dev, &sc->miibus, sc->ifp, ed_ifmedia_upd, ed_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); } else if (sc->chip_type == ED_CHIP_TYPE_AX88190 || sc->chip_type == ED_CHIP_TYPE_AX88790 || sc->chip_type == ED_CHIP_TYPE_TC5299J) { error = mii_attach(dev, &sc->miibus, sc->ifp, ed_ifmedia_upd, ed_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto bad; } } if (sc->miibus != NULL) { sc->sc_tick = ed_pccard_tick; sc->sc_mediachg = ed_pccard_mediachg; sc->sc_media_ioctl = ed_pccard_media_ioctl; ed_pccard_kick_phy(sc); } else { ed_gen_ifmedia_init(sc); } if (modem_rid != -1) ed_pccard_add_modem(dev); error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, edintr, sc, &sc->irq_handle); if (error) { device_printf(dev, "setup intr failed %d \n", error); goto bad; } return (0); bad: ed_detach(dev); return (error); } /* * Probe the Ethernet MAC addrees for PCMCIA Linksys EtherFast 10/100 * and compatible cards (DL10019C Ethernet controller). */ static int ed_pccard_dl100xx(device_t dev, const struct ed_product *pp) { struct ed_softc *sc = device_get_softc(dev); u_char sum; uint8_t id; u_int memsize; int i, error; if (!(pp->flags & NE2000DVF_DL100XX)) return (ENXIO); if (bootverbose) device_printf(dev, "Trying DL100xx\n"); error = ed_probe_Novell_generic(dev, device_get_flags(dev)); if (bootverbose && error) device_printf(dev, "Novell generic probe failed: %d\n", error); if (error != 0) return (error); /* * Linksys registers(offset from ASIC base) * * 0x04-0x09 : Physical Address Register 0-5 (PAR0-PAR5) * 0x0A : Card ID Register (CIR) * 0x0B : Check Sum Register (SR) */ for (sum = 0, i = 0x04; i < 0x0c; i++) sum += ed_asic_inb(sc, i); if (sum != 0xff) { if (bootverbose) device_printf(dev, "Bad checksum %#x\n", sum); return (ENXIO); /* invalid DL10019C */ } if (bootverbose) device_printf(dev, "CIR is %d\n", ed_asic_inb(sc, 0xa)); for (i = 0; i < ETHER_ADDR_LEN; i++) sc->enaddr[i] = ed_asic_inb(sc, 0x04 + i); ed_nic_outb(sc, ED_P0_DCR, ED_DCR_WTS | ED_DCR_FT1 | ED_DCR_LS); id = ed_asic_inb(sc, 0xf); sc->isa16bit = 1; /* * Hard code values based on the datasheet. We're NE-2000 compatible * NIC with 24kb of packet memory starting at 24k offset. These * cards also work with 16k at 16k, but don't work with 24k at 16k * or 32k at 16k. */ sc->type = ED_TYPE_NE2000; sc->mem_start = 24 * 1024; memsize = sc->mem_size = 24 * 1024; sc->mem_end = sc->mem_start + memsize; sc->tx_page_start = memsize / ED_PAGE_SIZE; sc->txb_cnt = 3; sc->rec_page_start = sc->tx_page_start + sc->txb_cnt * ED_TXBUF_SIZE; sc->rec_page_stop = sc->tx_page_start + memsize / ED_PAGE_SIZE; sc->mem_ring = sc->mem_start + sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE; ed_nic_outb(sc, ED_P0_PSTART, sc->mem_start / ED_PAGE_SIZE); ed_nic_outb(sc, ED_P0_PSTOP, sc->mem_end / ED_PAGE_SIZE); sc->vendor = ED_VENDOR_NOVELL; sc->chip_type = (id & 0x90) == 0x90 ? ED_CHIP_TYPE_DL10022 : ED_CHIP_TYPE_DL10019; sc->type_str = ((id & 0x90) == 0x90) ? "DL10022" : "DL10019"; sc->mii_bitbang_ops = &ed_pccard_dl100xx_mii_bitbang_ops; return (0); } /* MII bit-twiddling routines for cards using Dlink chipset */ static void ed_pccard_dl100xx_mii_reset(struct ed_softc *sc) { if (sc->chip_type != ED_CHIP_TYPE_DL10022) return; ed_asic_outb(sc, ED_DL100XX_MIIBUS, ED_DL10022_MII_RESET2); DELAY(10); ed_asic_outb(sc, ED_DL100XX_MIIBUS, ED_DL10022_MII_RESET2 | ED_DL10022_MII_RESET1); DELAY(10); ed_asic_outb(sc, ED_DL100XX_MIIBUS, ED_DL10022_MII_RESET2); DELAY(10); ed_asic_outb(sc, ED_DL100XX_MIIBUS, ED_DL10022_MII_RESET2 | ED_DL10022_MII_RESET1); DELAY(10); ed_asic_outb(sc, ED_DL100XX_MIIBUS, 0); } static void ed_pccard_dl100xx_mii_bitbang_write(device_t dev, uint32_t val) { struct ed_softc *sc; sc = device_get_softc(dev); ed_asic_outb(sc, ED_DL100XX_MIIBUS, val); ed_asic_barrier(sc, ED_DL100XX_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static uint32_t ed_pccard_dl100xx_mii_bitbang_read(device_t dev) { struct ed_softc *sc; uint32_t val; sc = device_get_softc(dev); val = ed_asic_inb(sc, ED_DL100XX_MIIBUS); ed_asic_barrier(sc, ED_DL100XX_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static void ed_pccard_ax88x90_reset(struct ed_softc *sc) { int i; /* Reset Card */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STP | ED_CR_PAGE_0); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); ed_asic_outb(sc, ED_NOVELL_RESET, ed_asic_inb(sc, ED_NOVELL_RESET)); /* Wait for the RST bit to assert, but cap it at 10ms */ for (i = 10000; !(ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) && i > 0; i--) continue; ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RST); /* ACK INTR */ if (i == 0) device_printf(sc->dev, "Reset didn't finish\n"); } /* * Probe and vendor-specific initialization routine for ax88x90 boards */ static int ed_probe_ax88x90_generic(device_t dev, int flags) { struct ed_softc *sc = device_get_softc(dev); u_int memsize; static char test_pattern[32] = "THIS is A memory TEST pattern"; char test_buffer[32]; ed_pccard_ax88x90_reset(sc); DELAY(10*1000); /* Make sure that we really have an 8390 based board */ if (!ed_probe_generic8390(sc)) return (ENXIO); sc->vendor = ED_VENDOR_NOVELL; sc->mem_shared = 0; sc->cr_proto = ED_CR_RD2; /* * This prevents packets from being stored in the NIC memory when the * readmem routine turns on the start bit in the CR. We write some * bytes in word mode and verify we can read them back. If we can't * then we don't have an AX88x90 chip here. */ sc->isa16bit = 1; ed_nic_outb(sc, ED_P0_RCR, ED_RCR_MON); ed_nic_outb(sc, ED_P0_DCR, ED_DCR_WTS | ED_DCR_FT1 | ED_DCR_LS); ed_pio_writemem(sc, test_pattern, 16384, sizeof(test_pattern)); ed_pio_readmem(sc, 16384, test_buffer, sizeof(test_pattern)); if (bcmp(test_pattern, test_buffer, sizeof(test_pattern)) != 0) return (ENXIO); /* * Hard code values based on the datasheet. We're NE-2000 compatible * NIC with 16kb of packet memory starting at 16k offset. */ sc->type = ED_TYPE_NE2000; memsize = sc->mem_size = 16*1024; sc->mem_start = 16 * 1024; if (ed_asic_inb(sc, ED_AX88X90_TEST) != 0) sc->chip_type = ED_CHIP_TYPE_AX88790; else { sc->chip_type = ED_CHIP_TYPE_AX88190; /* * The AX88190 (not A) has external 64k SRAM. Probe for this * here. Most of the cards I have either use the AX88190A * part, or have only 32k SRAM for some reason, so I don't * know if this works or not. */ ed_pio_writemem(sc, test_pattern, 32768, sizeof(test_pattern)); ed_pio_readmem(sc, 32768, test_buffer, sizeof(test_pattern)); if (bcmp(test_pattern, test_buffer, sizeof(test_pattern)) == 0) { sc->mem_start = 2*1024; memsize = sc->mem_size = 62 * 1024; } } sc->mem_end = sc->mem_start + memsize; sc->tx_page_start = memsize / ED_PAGE_SIZE; if (sc->mem_size > 16 * 1024) sc->txb_cnt = 3; else sc->txb_cnt = 2; sc->rec_page_start = sc->tx_page_start + sc->txb_cnt * ED_TXBUF_SIZE; sc->rec_page_stop = sc->tx_page_start + memsize / ED_PAGE_SIZE; sc->mem_ring = sc->mem_start + sc->txb_cnt * ED_PAGE_SIZE * ED_TXBUF_SIZE; ed_nic_outb(sc, ED_P0_PSTART, sc->mem_start / ED_PAGE_SIZE); ed_nic_outb(sc, ED_P0_PSTOP, sc->mem_end / ED_PAGE_SIZE); /* Get the mac before we go -- It's just at 0x400 in "SRAM" */ ed_pio_readmem(sc, 0x400, sc->enaddr, ETHER_ADDR_LEN); /* clear any pending interrupts that might have occurred above */ ed_nic_outb(sc, ED_P0_ISR, 0xff); sc->sc_write_mbufs = ed_pio_write_mbufs; return (0); } static int ed_pccard_ax88x90_check_mii(device_t dev, struct ed_softc *sc) { int i, id; /* * All AX88x90 devices have MII and a PHY, so we use this to weed out * chips that would otherwise make it through the tests we have after * this point. */ for (i = 0; i < 32; i++) { id = ed_miibus_readreg(dev, i, MII_BMSR); if (id != 0 && id != 0xffff) break; } /* * Found one, we're good. */ if (i != 32) return (0); /* * Didn't find anything, so try to power up and try again. The PHY * may be not responding because we're in power down mode. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) return (ENXIO); pccard_ccr_write_1(dev, PCCARD_CCR_STATUS, PCCARD_CCR_STATUS_PWRDWN); for (i = 0; i < 32; i++) { id = ed_miibus_readreg(dev, i, MII_BMSR); if (id != 0 && id != 0xffff) break; } /* * Still no joy? We're AFU, punt. */ if (i == 32) return (ENXIO); return (0); } /* * Special setup for AX88[17]90 */ static int ed_pccard_ax88x90(device_t dev, const struct ed_product *pp) { int error; int iobase; struct ed_softc *sc = device_get_softc(dev); if (!(pp->flags & NE2000DVF_AX88X90)) return (ENXIO); if (bootverbose) device_printf(dev, "Checking AX88x90\n"); /* * Set the IOBASE Register. The AX88x90 cards are potentially * multifunction cards, and thus requires a slight workaround. * We write the address the card is at, on the off chance that this * card is not MFC. * XXX I'm not sure that this is still needed... */ iobase = rman_get_start(sc->port_res); pccard_ccr_write_1(dev, PCCARD_CCR_IOBASE0, iobase & 0xff); pccard_ccr_write_1(dev, PCCARD_CCR_IOBASE1, (iobase >> 8) & 0xff); error = ed_probe_ax88x90_generic(dev, device_get_flags(dev)); if (error) { if (bootverbose) device_printf(dev, "probe ax88x90 failed %d\n", error); return (error); } sc->mii_bitbang_ops = &ed_pccard_ax88x90_mii_bitbang_ops; error = ed_pccard_ax88x90_check_mii(dev, sc); if (error) return (error); sc->vendor = ED_VENDOR_NOVELL; sc->type = ED_TYPE_NE2000; if (sc->chip_type == ED_CHIP_TYPE_AX88190) sc->type_str = "AX88190"; else sc->type_str = "AX88790"; return (0); } static void ed_pccard_ax88x90_mii_bitbang_write(device_t dev, uint32_t val) { struct ed_softc *sc; sc = device_get_softc(dev); ed_asic_outb(sc, ED_AX88X90_MIIBUS, val); ed_asic_barrier(sc, ED_AX88X90_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static uint32_t ed_pccard_ax88x90_mii_bitbang_read(device_t dev) { struct ed_softc *sc; uint32_t val; sc = device_get_softc(dev); val = ed_asic_inb(sc, ED_AX88X90_MIIBUS); ed_asic_barrier(sc, ED_AX88X90_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Special setup for TC5299J */ static int ed_pccard_tc5299j(device_t dev, const struct ed_product *pp) { int error, i, id; char *ts; struct ed_softc *sc = device_get_softc(dev); if (!(pp->flags & NE2000DVF_TC5299J)) return (ENXIO); if (bootverbose) device_printf(dev, "Checking Tc5299j\n"); error = ed_probe_Novell_generic(dev, device_get_flags(dev)); if (bootverbose) device_printf(dev, "Novell generic probe failed: %d\n", error); if (error != 0) return (error); /* * Check to see if we have a MII PHY ID at any address. All TC5299J * devices have MII and a PHY, so we use this to weed out chips that * would otherwise make it through the tests we have after this point. */ sc->mii_bitbang_ops = &ed_pccard_tc5299j_mii_bitbang_ops; for (i = 0; i < 32; i++) { id = ed_miibus_readreg(dev, i, MII_PHYIDR1); if (id != 0 && id != 0xffff) break; } if (i == 32) return (ENXIO); ts = "TC5299J"; if (ed_pccard_rom_mac(dev, sc->enaddr) == 0) return (ENXIO); sc->vendor = ED_VENDOR_NOVELL; sc->type = ED_TYPE_NE2000; sc->chip_type = ED_CHIP_TYPE_TC5299J; sc->type_str = ts; return (0); } static void ed_pccard_tc5299j_mii_bitbang_write(device_t dev, uint32_t val) { struct ed_softc *sc; sc = device_get_softc(dev); /* We are already on page 3. */ ed_nic_outb(sc, ED_TC5299J_MIIBUS, val); ed_nic_barrier(sc, ED_TC5299J_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static uint32_t ed_pccard_tc5299j_mii_bitbang_read(device_t dev) { struct ed_softc *sc; uint32_t val; sc = device_get_softc(dev); /* We are already on page 3. */ val = ed_asic_inb(sc, ED_TC5299J_MIIBUS); ed_nic_barrier(sc, ED_TC5299J_MIIBUS, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * MII bus support routines. */ static int ed_miibus_readreg(device_t dev, int phy, int reg) { struct ed_softc *sc; int val; uint8_t cr = 0; sc = device_get_softc(dev); /* * The AX88790 has an interesting quirk. It has an internal phy that * needs a special bit set to access, but can also have additional * external PHYs set for things like HomeNET media. When accessing * the internal PHY, a bit has to be set, when accessing the external * PHYs, it must be clear. See Errata 1, page 51, in the AX88790 * datasheet for more details. * * Also, PHYs above 16 appear to be phantoms on some cards, but not * others. Registers read for this are often the same as prior values * read. Filter all register requests to 17-31. */ if (sc->chip_type == ED_CHIP_TYPE_AX88790) { if (phy > 0x10) return (0); if (phy == 0x10) ed_asic_outb(sc, ED_AX88X90_GPIO, ED_AX88X90_GPIO_INT_PHY); else ed_asic_outb(sc, ED_AX88X90_GPIO, 0); ed_asic_barrier(sc, ED_AX88X90_GPIO, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } else if (sc->chip_type == ED_CHIP_TYPE_TC5299J) { /* Select page 3. */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); cr = ed_nic_inb(sc, ED_P0_CR); ed_nic_outb(sc, ED_P0_CR, cr | ED_CR_PAGE_3); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } val = mii_bitbang_readreg(dev, sc->mii_bitbang_ops, phy, reg); if (sc->chip_type == ED_CHIP_TYPE_TC5299J) { /* Restore prior page. */ ed_nic_outb(sc, ED_P0_CR, cr); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } return (val); } static int ed_miibus_writereg(device_t dev, int phy, int reg, int data) { struct ed_softc *sc; uint8_t cr = 0; sc = device_get_softc(dev); /* See ed_miibus_readreg for details */ if (sc->chip_type == ED_CHIP_TYPE_AX88790) { if (phy > 0x10) return (0); if (phy == 0x10) ed_asic_outb(sc, ED_AX88X90_GPIO, ED_AX88X90_GPIO_INT_PHY); else ed_asic_outb(sc, ED_AX88X90_GPIO, 0); ed_asic_barrier(sc, ED_AX88X90_GPIO, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } else if (sc->chip_type == ED_CHIP_TYPE_TC5299J) { /* Select page 3. */ ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); cr = ed_nic_inb(sc, ED_P0_CR); ed_nic_outb(sc, ED_P0_CR, cr | ED_CR_PAGE_3); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } mii_bitbang_writereg(dev, sc->mii_bitbang_ops, phy, reg, data); if (sc->chip_type == ED_CHIP_TYPE_TC5299J) { /* Restore prior page. */ ed_nic_outb(sc, ED_P0_CR, cr); ed_nic_barrier(sc, ED_P0_CR, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } return (0); } static int ed_ifmedia_upd(struct ifnet *ifp) { struct ed_softc *sc; int error; sc = ifp->if_softc; ED_LOCK(sc); if (sc->miibus == NULL) { ED_UNLOCK(sc); return (ENXIO); } error = ed_pccard_kick_phy(sc); ED_UNLOCK(sc); return (error); } static void ed_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ed_softc *sc; struct mii_data *mii; sc = ifp->if_softc; ED_LOCK(sc); if (sc->miibus == NULL) { ED_UNLOCK(sc); return; } mii = device_get_softc(sc->miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; ED_UNLOCK(sc); } static void ed_child_detached(device_t dev, device_t child) { struct ed_softc *sc; sc = device_get_softc(dev); if (child == sc->miibus) sc->miibus = NULL; } static void ed_pccard_tick(struct ed_softc *sc) { struct mii_data *mii; int media = 0; ED_ASSERT_LOCKED(sc); if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); media = mii->mii_media_status; mii_tick(mii); if (mii->mii_media_status & IFM_ACTIVE && media != mii->mii_media_status) { if (sc->chip_type == ED_CHIP_TYPE_DL10022) { ed_asic_outb(sc, ED_DL10022_DIAG, (mii->mii_media_active & IFM_FDX) ? ED_DL10022_COLLISON_DIS : 0); #ifdef notyet } else if (sc->chip_type == ED_CHIP_TYPE_DL10019) { write_asic(sc, ED_DL10019_MAGIC, (mii->mii_media_active & IFM_FDX) ? DL19FDUPLX : 0); #endif } } } } static device_method_t ed_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ed_pccard_probe), DEVMETHOD(device_attach, ed_pccard_attach), DEVMETHOD(device_detach, ed_detach), /* Bus interface */ DEVMETHOD(bus_child_detached, ed_child_detached), /* MII interface */ DEVMETHOD(miibus_readreg, ed_miibus_readreg), DEVMETHOD(miibus_writereg, ed_miibus_writereg), DEVMETHOD_END }; static driver_t ed_pccard_driver = { "ed", ed_pccard_methods, sizeof(struct ed_softc) }; DRIVER_MODULE(ed, pccard, ed_pccard_driver, ed_devclass, 0, NULL); DRIVER_MODULE(miibus, ed, miibus_driver, miibus_devclass, 0, NULL); MODULE_DEPEND(ed, miibus, 1, 1, 1); MODULE_DEPEND(ed, ether, 1, 1, 1); PCCARD_PNP_INFO(ed_pccard_products); Index: head/sys/dev/fb/s3_pci.c =================================================================== --- head/sys/dev/fb/s3_pci.c (revision 295789) +++ head/sys/dev/fb/s3_pci.c (revision 295790) @@ -1,567 +1,567 @@ /*- * Copyright (c) 2000 Alcove - Nicolas Souchu * All rights reserved. * * Code based on Peter Horton patch. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* Enable LFB on S3 cards that has only VESA 1.2 BIOS */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define S3PCI_DEBUG 1 #define PCI_S3_VENDOR_ID 0x5333 #define S3_CONFIG_IO 0x3c0 /* VGA standard config io ports */ #define S3_CONFIG_IO_SIZE 0x20 #define S3_ENHANCED_IO 0x4ae8 /* Extended config register */ #define S3_ENHANCED_IO_SIZE 1 #define S3_CRTC_ADDR 0x14 #define S3_CRTC_VALUE 0x15 #define PCI_BASE_MEMORY 0x10 #define outb_p(value, offset) bus_space_write_1(sc->st, sc->sh, offset, value) #define inb_p(offset) (bus_space_read_1(sc->st, sc->sh, offset)) #define outb_enh(value, offset) bus_space_write_1(sc->enh_st, sc->enh_sh, \ offset, value) #define inb_enh(offset) (bus_space_read_1(sc->enh_st, sc->enh_sh, offset)) struct s3pci_softc { bus_space_tag_t st; bus_space_handle_t sh; bus_space_tag_t enh_st; bus_space_handle_t enh_sh; struct resource *port_res; struct resource *enh_res; struct resource *mem_res; u_long mem_base; u_long mem_size; }; static int s3lfb_error(void); static vi_probe_t s3lfb_probe; static vi_init_t s3lfb_init; static vi_get_info_t s3lfb_get_info; static vi_query_mode_t s3lfb_query_mode; static vi_set_mode_t s3lfb_set_mode; static vi_save_font_t s3lfb_save_font; static vi_load_font_t s3lfb_load_font; static vi_show_font_t s3lfb_show_font; static vi_save_palette_t s3lfb_save_palette; static vi_load_palette_t s3lfb_load_palette; static vi_set_border_t s3lfb_set_border; static vi_save_state_t s3lfb_save_state; static vi_load_state_t s3lfb_load_state; static vi_set_win_org_t s3lfb_set_origin; static vi_read_hw_cursor_t s3lfb_read_hw_cursor; static vi_set_hw_cursor_t s3lfb_set_hw_cursor; static vi_set_hw_cursor_shape_t s3lfb_set_hw_cursor_shape; static vi_blank_display_t s3lfb_blank_display; static vi_mmap_t s3lfb_mmap; static vi_ioctl_t s3lfb_ioctl; static vi_clear_t s3lfb_clear; static vi_fill_rect_t s3lfb_fill_rect; static vi_bitblt_t s3lfb_bitblt; static vi_diag_t s3lfb_diag; static video_switch_t s3lfbvidsw = { s3lfb_probe, s3lfb_init, s3lfb_get_info, s3lfb_query_mode, s3lfb_set_mode, s3lfb_save_font, s3lfb_load_font, s3lfb_show_font, s3lfb_save_palette, s3lfb_load_palette, s3lfb_set_border, s3lfb_save_state, s3lfb_load_state, s3lfb_set_origin, s3lfb_read_hw_cursor, s3lfb_set_hw_cursor, s3lfb_set_hw_cursor_shape, s3lfb_blank_display, s3lfb_mmap, s3lfb_ioctl, s3lfb_clear, s3lfb_fill_rect, s3lfb_bitblt, s3lfb_error, s3lfb_error, s3lfb_diag, }; static video_switch_t *prevvidsw; static device_t s3pci_dev = NULL; static int s3lfb_probe(int unit, video_adapter_t **adpp, void *arg, int flags) { return (*prevvidsw->probe)(unit, adpp, arg, flags); } static int s3lfb_init(int unit, video_adapter_t *adp, int flags) { return (*prevvidsw->init)(unit, adp, flags); } static int s3lfb_get_info(video_adapter_t *adp, int mode, video_info_t *info) { #if 0 device_t dev = s3pci_dev; /* XXX */ struct s3pci_softc *sc = (struct s3pci_softc *)device_get_softc(dev); #endif int error; if ((error = (*prevvidsw->get_info)(adp, mode, info))) return error; #if 0 /* Don't use linear addressing with text modes */ if ((mode > M_VESA_BASE) && (info->vi_flags & V_INFO_GRAPHICS) && !(info->vi_flags & V_INFO_LINEAR)) { info->vi_flags |= V_INFO_LINEAR; info->vi_buffer = sc->mem_base; } else { info->vi_buffer = 0; } #endif return 0; } static int s3lfb_query_mode(video_adapter_t *adp, video_info_t *info) { return (*prevvidsw->query_mode)(adp, info); } static vm_offset_t s3lfb_map_buffer(u_int paddr, size_t size) { vm_offset_t vaddr; u_int off; off = paddr - trunc_page(paddr); vaddr = (vm_offset_t)pmap_mapdev(paddr - off, size + off); return (vaddr + off); } static int s3lfb_set_mode(video_adapter_t *adp, int mode) { device_t dev = s3pci_dev; /* XXX */ struct s3pci_softc *sc = (struct s3pci_softc *)device_get_softc(dev); #if 0 unsigned char tmp; #endif int error; /* First, set the mode as if it was a classic VESA card */ if ((error = (*prevvidsw->set_mode)(adp, mode))) return error; /* If not in a linear mode (according to s3lfb_get_info() called * by vesa_set_mode in the (*vidsw[adp->va_index]->get_info)... * sequence, return with no error */ #if 0 if (!(adp->va_info.vi_flags & V_INFO_LINEAR)) return 0; #endif if ((mode <= M_VESA_BASE) || !(adp->va_info.vi_flags & V_INFO_GRAPHICS) || (adp->va_info.vi_flags & V_INFO_LINEAR)) return 0; /* Ok, now apply the configuration to the card */ outb_p(0x38, S3_CRTC_ADDR); outb_p(0x48, S3_CRTC_VALUE); outb_p(0x39, S3_CRTC_ADDR); outb_p(0xa5, S3_CRTC_VALUE); /* check that CR47 is read/write */ #if 0 outb_p(0x47, S3_CRTC_ADDR); outb_p(0xff, S3_CRTC_VALUE); tmp = inb_p(S3_CRTC_VALUE); outb_p(0x00, S3_CRTC_VALUE); if ((tmp != 0xff) || (inb_p(S3_CRTC_VALUE))) { /* lock S3 registers */ outb_p(0x39, S3_CRTC_ADDR); outb_p(0x5a, S3_CRTC_VALUE); outb_p(0x38, S3_CRTC_ADDR); outb_p(0x00, S3_CRTC_VALUE); return ENXIO; } #endif /* enable enhanced register access */ outb_p(0x40, S3_CRTC_ADDR); outb_p(inb_p(S3_CRTC_VALUE) | 1, S3_CRTC_VALUE); /* enable enhanced functions */ outb_enh(inb_enh(0) | 1, 0x0); /* enable enhanced mode memory mapping */ outb_p(0x31, S3_CRTC_ADDR); outb_p(inb_p(S3_CRTC_VALUE) | 8, S3_CRTC_VALUE); /* enable linear frame buffer and set address window to max */ outb_p(0x58, S3_CRTC_ADDR); outb_p(inb_p(S3_CRTC_VALUE) | 0x13, S3_CRTC_VALUE); /* disabled enhanced register access */ outb_p(0x40, S3_CRTC_ADDR); outb_p(inb_p(S3_CRTC_VALUE) & ~1, S3_CRTC_VALUE); /* lock S3 registers */ outb_p(0x39, S3_CRTC_ADDR); outb_p(0x5a, S3_CRTC_VALUE); outb_p(0x38, S3_CRTC_ADDR); outb_p(0x00, S3_CRTC_VALUE); adp->va_info.vi_flags |= V_INFO_LINEAR; adp->va_info.vi_buffer = sc->mem_base; adp->va_buffer = s3lfb_map_buffer(adp->va_info.vi_buffer, adp->va_info.vi_buffer_size); adp->va_buffer_size = adp->va_info.vi_buffer_size; adp->va_window = adp->va_buffer; adp->va_window_size = adp->va_info.vi_buffer_size/adp->va_info.vi_planes; adp->va_window_gran = adp->va_info.vi_buffer_size/adp->va_info.vi_planes; return 0; } static int s3lfb_save_font(video_adapter_t *adp, int page, int fontsize, int fontwidth, u_char *data, int ch, int count) { return (*prevvidsw->save_font)(adp, page, fontsize, fontwidth, data, ch, count); } static int s3lfb_load_font(video_adapter_t *adp, int page, int fontsize, int fontwidth, u_char *data, int ch, int count) { return (*prevvidsw->load_font)(adp, page, fontsize, fontwidth, data, ch, count); } static int s3lfb_show_font(video_adapter_t *adp, int page) { return (*prevvidsw->show_font)(adp, page); } static int s3lfb_save_palette(video_adapter_t *adp, u_char *palette) { return (*prevvidsw->save_palette)(adp, palette); } static int s3lfb_load_palette(video_adapter_t *adp, u_char *palette) { return (*prevvidsw->load_palette)(adp, palette); } static int s3lfb_set_border(video_adapter_t *adp, int color) { return (*prevvidsw->set_border)(adp, color); } static int s3lfb_save_state(video_adapter_t *adp, void *p, size_t size) { return (*prevvidsw->save_state)(adp, p, size); } static int s3lfb_load_state(video_adapter_t *adp, void *p) { return (*prevvidsw->load_state)(adp, p); } static int s3lfb_set_origin(video_adapter_t *adp, off_t offset) { return (*prevvidsw->set_win_org)(adp, offset); } static int s3lfb_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { return (*prevvidsw->read_hw_cursor)(adp, col, row); } static int s3lfb_set_hw_cursor(video_adapter_t *adp, int col, int row) { return (*prevvidsw->set_hw_cursor)(adp, col, row); } static int s3lfb_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { return (*prevvidsw->set_hw_cursor_shape)(adp, base, height, celsize, blink); } static int s3lfb_blank_display(video_adapter_t *adp, int mode) { return (*prevvidsw->blank_display)(adp, mode); } static int s3lfb_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { return (*prevvidsw->mmap)(adp, offset, paddr, prot, memattr); } static int s3lfb_clear(video_adapter_t *adp) { return (*prevvidsw->clear)(adp); } static int s3lfb_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { return (*prevvidsw->fill_rect)(adp, val, x, y, cx, cy); } static int s3lfb_bitblt(video_adapter_t *adp,...) { return (*prevvidsw->bitblt)(adp); /* XXX */ } static int s3lfb_ioctl(video_adapter_t *adp, u_long cmd, caddr_t arg) { return (*prevvidsw->ioctl)(adp, cmd, arg); } static int s3lfb_diag(video_adapter_t *adp, int level) { return (*prevvidsw->diag)(adp, level); } static int s3lfb_error(void) { return 1; } /***********************************/ /* PCI detection/attachement stuff */ /***********************************/ static int s3pci_probe(device_t dev) { u_int32_t vendor, class, subclass, device_id; device_id = pci_get_devid(dev); vendor = device_id & 0xffff; class = pci_get_class(dev); subclass = pci_get_subclass(dev); if ((class != PCIC_DISPLAY) || (subclass != PCIS_DISPLAY_VGA) || (vendor != PCI_S3_VENDOR_ID)) return ENXIO; device_set_desc(dev, "S3 graphic card"); bus_set_resource(dev, SYS_RES_IOPORT, 0, S3_CONFIG_IO, S3_CONFIG_IO_SIZE); bus_set_resource(dev, SYS_RES_IOPORT, 1, S3_ENHANCED_IO, S3_ENHANCED_IO_SIZE); return BUS_PROBE_DEFAULT; }; static int s3pci_attach(device_t dev) { struct s3pci_softc* sc = (struct s3pci_softc*)device_get_softc(dev); video_adapter_t *adp; #if 0 unsigned char tmp; #endif int rid, i; if (s3pci_dev) { printf("%s: driver already attached!\n", __func__); goto error; } /* Allocate resources */ rid = 0; - if (!(sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0ul, ~0ul, 0, RF_ACTIVE | RF_SHAREABLE))) { + if (!(sc->port_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE))) { printf("%s: port resource allocation failed!\n", __func__); goto error; } sc->st = rman_get_bustag(sc->port_res); sc->sh = rman_get_bushandle(sc->port_res); rid = 1; - if (!(sc->enh_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0ul, ~0ul, 0, RF_ACTIVE | RF_SHAREABLE))) { + if (!(sc->enh_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE))) { printf("%s: enhanced port resource allocation failed!\n", __func__); goto error; } sc->enh_st = rman_get_bustag(sc->enh_res); sc->enh_sh = rman_get_bushandle(sc->enh_res); rid = PCI_BASE_MEMORY; if (!(sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) { printf("%s: mem resource allocation failed!\n", __func__); goto error; } /* The memory base address will be our LFB base address */ /* sc->mem_base = (u_long)rman_get_virtual(sc->mem_res); */ sc->mem_base = bus_get_resource_start(dev, SYS_RES_MEMORY, rid); sc->mem_size = bus_get_resource_count(dev, SYS_RES_MEMORY, rid); /* Attach the driver to the VGA/VESA framework */ for (i = 0; (adp = vid_get_adapter(i)) != NULL; ++i) { if (adp->va_type == KD_VGA) break; } /* If the VESA module hasn't been loaded, or VGA doesn't * exist, abort */ if ((adp == NULL) || !(adp->va_flags & V_ADP_VESA)) { printf("%s: VGA adapter not found or VESA module not loaded!\n", __func__); goto error; } /* Replace the VESA video switch by owers */ prevvidsw = vidsw[adp->va_index]; vidsw[adp->va_index] = &s3lfbvidsw; /* Remember who we are on the bus */ s3pci_dev = (void *)dev; /* XXX */ return 0; error: if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, PCI_BASE_MEMORY, sc->mem_res); if (sc->enh_res) bus_release_resource(dev, SYS_RES_IOPORT, 1, sc->enh_res); if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); return ENXIO; }; static device_method_t s3pci_methods[] = { DEVMETHOD(device_probe, s3pci_probe), DEVMETHOD(device_attach, s3pci_attach), {0,0} }; static driver_t s3pci_driver = { "s3pci", s3pci_methods, sizeof(struct s3pci_softc), }; static devclass_t s3pci_devclass; DRIVER_MODULE(s3pci, pci, s3pci_driver, s3pci_devclass, 0, 0); Index: head/sys/dev/fdc/fdc_pccard.c =================================================================== --- head/sys/dev/fdc/fdc_pccard.c (revision 295789) +++ head/sys/dev/fdc/fdc_pccard.c (revision 295790) @@ -1,142 +1,141 @@ /*- * Copyright (c) 2004-2005 M. Warner Losh. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "pccarddevs.h" static int fdc_pccard_probe(device_t); static int fdc_pccard_attach(device_t); static const struct pccard_product fdc_pccard_products[] = { PCMCIA_CARD(YEDATA, EXTERNAL_FDD), }; static int fdc_pccard_alloc_resources(device_t dev, struct fdc_data *fdc) { struct resource *res; int rid, i; rid = 0; - res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, 1, - RF_ACTIVE); + res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "cannot alloc I/O port range\n"); return (ENXIO); } for (i = 0; i < FDC_MAXREG; i++) { fdc->resio[i] = res; fdc->ridio[i] = rid; fdc->ioff[i] = i; fdc->ioh[i] = rman_get_bushandle(res); } fdc->iot = rman_get_bustag(res); fdc->rid_irq = 0; fdc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &fdc->rid_irq, RF_ACTIVE | RF_SHAREABLE); if (fdc->res_irq == NULL) { device_printf(dev, "cannot reserve interrupt line\n"); return (ENXIO); } return (0); } static int fdc_pccard_probe(device_t dev) { if (pccard_product_lookup(dev, fdc_pccard_products, sizeof(fdc_pccard_products[0]), NULL) != NULL) { device_set_desc(dev, "PC Card Floppy"); return (0); } return (ENXIO); } static int fdc_pccard_attach(device_t dev) { int error; struct fdc_data *fdc; device_t child; fdc = device_get_softc(dev); fdc->flags = FDC_NODMA | FDC_NOFAST; fdc->fdct = FDC_NE765; error = fdc_pccard_alloc_resources(dev, fdc); if (error == 0) error = fdc_attach(dev); if (error == 0) { child = fdc_add_child(dev, "fd", -1); device_set_flags(child, 0x24); error = bus_generic_attach(dev); } if (error) fdc_release_resources(fdc); return (error); } static device_method_t fdc_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fdc_pccard_probe), DEVMETHOD(device_attach, fdc_pccard_attach), DEVMETHOD(device_detach, fdc_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, fdc_print_child), DEVMETHOD(bus_read_ivar, fdc_read_ivar), DEVMETHOD(bus_write_ivar, fdc_write_ivar), /* Our children never use any other bus interface methods. */ { 0, 0 } }; static driver_t fdc_pccard_driver = { "fdc", fdc_pccard_methods, sizeof(struct fdc_data) }; DRIVER_MODULE(fdc, pccard, fdc_pccard_driver, fdc_devclass, 0, 0); PCCARD_PNP_INFO(fdc_pccard_products); Index: head/sys/dev/hpt27xx/hpt27xx_osm_bsd.c =================================================================== --- head/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 295789) +++ head/sys/dev/hpt27xx/hpt27xx_osm_bsd.c (revision 295790) @@ -1,1514 +1,1514 @@ /*- * Copyright (c) 2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_lock_vbus(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { hpt_unlock_vbus(vbus_ext); return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); hpt_unlock_vbus(vbus_ext); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } #if (__FreeBSD_version >= 1000510) callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); #endif free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result)); #if (__FreeBSD_version >= 1000510) callout_stop(&ext->timeout); #else untimeout(hpt_timeout, pCmd, ccb->ccb_h.timeout_ch); #endif switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; #if (__FreeBSD_version >= 1000510) if(logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } #else bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; int idx; if(logical) { if (ccb->ccb_h.flags & CAM_DATA_PHYS) panic("physical address unsupported"); if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) panic("physical address unsupported"); for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr); pSg[idx].size = sgList[idx].ds_len; pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } } else { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; } return TRUE; } #endif /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } #if (__FreeBSD_version >= 1000510) callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); #else ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); #endif ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; #if (__FreeBSD_version < 1000510) if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { int idx; bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr; if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) pCmd->flags.physical_sg = 1; for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) { pCmd->psg[idx].addr.bus = sgList[idx].ds_addr; pCmd->psg[idx].size = sgList[idx].ds_len; pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0; } ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT); ldm_queue_cmd(pCmd); } else #endif { int error; pCmd->flags.physical_sg = 1; #if (__FreeBSD_version >= 1000510) error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #else error = bus_dmamap_load(vbus_ext->io_dmat, ext->dma_map, ccb->csio.data_ptr, ccb->csio.dxfer_len, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); #endif KdPrint(("<8>bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); #if (__FreeBSD_version >= 1000510) hpt_assert_vbus_locked(vbus_ext); #endif switch (ccb->ccb_h.func_code) { #if (__FreeBSD_version < 1000510) case XPT_SCSI_IO: hpt_lock_vbus(vbus_ext); hpt_scsi_io(vbus_ext, ccb); hpt_unlock_vbus(vbus_ext); return; case XPT_RESET_BUS: hpt_lock_vbus(vbus_ext); ldm_reset_vbus((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); break; #else case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; #endif case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { #if (__FreeBSD_version < 1000510) hpt_pci_intr(cam_sim_softc(sim)); #else PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); #endif } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("<8>hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); #if (__FreeBSD_version < 1000510) callout_handle_init(&vbus_ext->timer); #else callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); #endif if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } #if (__FreeBSD_version >= 1000510) callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); #endif } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } #if (__FreeBSD_version >= 1000510) vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); #else vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &Giant, os_max_queue_comm, /*tagged*/8, devq); #endif unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { hpt_unlock_vbus(vbus_ext); os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); vbus_ext->sim = NULL; return ; } xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); hpt_unlock_vbus(vbus_ext); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } #if (__FreeBSD_version >= 1000510) if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, #else if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, #endif NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } #if __FreeBSD_version < 1000510 mtx_lock(&Giant); #endif hpt_do_ioctl(&ioctl_args); #if __FreeBSD_version < 1000510 mtx_unlock(&Giant); #endif if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; #if (__FreeBSD_version < 1000510) mtx_lock(&Giant); #endif ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } #if (__FreeBSD_version < 1000510) if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(vbus_ext->sim), #else if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), #endif CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } #if (__FreeBSD_version < 1000510) mtx_unlock(&Giant); #endif return(0); } Index: head/sys/dev/hptiop/hptiop.c =================================================================== --- head/sys/dev/hptiop/hptiop.c (revision 295789) +++ head/sys/dev/hptiop/hptiop.c (revision 295790) @@ -1,2854 +1,2854 @@ /* * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const char driver_name[] = "hptiop"; static const char driver_version[] = "v1.9"; static devclass_t hptiop_devclass; static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec); static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_rescan_bus(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba); static int hptiop_get_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba); static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); static int hptiop_probe(device_t dev); static int hptiop_attach(device_t dev); static int hptiop_detach(device_t dev); static int hptiop_shutdown(device_t dev); static void hptiop_action(struct cam_sim *sim, union ccb *ccb); static void hptiop_poll(struct cam_sim *sim); static void hptiop_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hptiop_pci_intr(void *arg); static void hptiop_release_resource(struct hpt_iop_hba *hba); static void hptiop_reset_adapter(void *argv); static d_open_t hptiop_open; static d_close_t hptiop_close; static d_ioctl_t hptiop_ioctl; static struct cdevsw hptiop_cdevsw = { .d_open = hptiop_open, .d_close = hptiop_close, .d_ioctl = hptiop_ioctl, .d_name = driver_name, .d_version = D_VERSION, }; #define hba_from_dev(dev) \ ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value) #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset)) static int hptiop_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); if (hba==NULL) return ENXIO; if (hba->flag & HPT_IOCTL_FLAG_OPEN) return EBUSY; hba->flag |= HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int flags, ioctl_thread_t proc) { int ret = EFAULT; struct hpt_iop_hba *hba = hba_from_dev(dev); mtx_lock(&Giant); switch (cmd) { case HPT_DO_IOCONTROL: ret = hba->ops->do_ioctl(hba, (struct hpt_iop_ioctl_param *)data); break; case HPT_SCAN_BUS: ret = hptiop_rescan_bus(hba); break; } mtx_unlock(&Giant); return ret; } static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) { u_int64_t p; u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); if (outbound_tail != outbound_head) { bus_space_read_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, outbound_q[outbound_tail]), (u_int32_t *)&p, 2); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); return p; } else return 0; } static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) { u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); u_int32_t head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; bus_space_write_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), (u_int32_t *)&p, 2); BUS_SPACE_WRT4_MV2(inbound_head, head); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); } static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MV2(inbound_msg, msg); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg); BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a); } static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) { u_int32_t req=0; int i; for (i = 0; i < millisec; i++) { req = BUS_SPACE_RD4_ITL(inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; DELAY(1000); } if (req!=IOPMU_QUEUE_EMPTY) { BUS_SPACE_WRT4_ITL(outbound_queue, req); BUS_SPACE_RD4_ITL(outbound_intstatus); return 0; } return -1; } static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, u_int32_t index) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req=0; union ccb *ccb; u_int8_t *cdb; u_int32_t result, temp, dxfer; u_int64_t temp64; if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { srb = hba->srb[index & ~(u_int32_t) (IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; req = (struct hpt_iop_request_scsi_command *)srb; if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) result = IOP_RESULT_SUCCESS; else result = req->header.result; } else { srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; result = req->header.result; } dxfer = req->dataxfer_length; goto srb_complete; } /*iop req*/ temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, type)); result = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, result)); switch(temp) { case IOP_REQUEST_TYPE_IOCTL_COMMAND: { temp64 = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); wakeup((void *)((unsigned long)hba->u.itl.mu + index)); break; } case IOP_REQUEST_TYPE_SCSI_COMMAND: bus_space_read_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); srb = (struct hpt_iop_srb *)(unsigned long)temp64; dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, dataxfer_length)); srb_complete: ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } switch (result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (dxfer < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - dxfer; else ccb->csio.sense_resid = 0; if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ bus_space_read_region_1(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, sg_list), (u_int8_t *)&ccb->csio.sense_data, MIN(dxfer, sizeof(ccb->csio.sense_data))); } else { memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(dxfer, sizeof(ccb->csio.sense_data))); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) BUS_SPACE_WRT4_ITL(outbound_queue, index); ccb->csio.resid = ccb->csio.dxfer_len - dxfer; hptiop_free_srb(hba, srb); xpt_done(ccb); break; } } static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) { u_int32_t req, temp; while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header *p; p = (struct hpt_iop_request_header *) ((char *)hba->u.itl.mu + req); temp = bus_space_read_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, flags)); if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { u_int64_t temp64; bus_space_read_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) { hptiop_request_callback_itl(hba, req); } else { temp64 = 1; bus_space_write_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); } } else hptiop_request_callback_itl(hba, req); } } } static int hptiop_intr_itl(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_ITL(outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); KdPrint(("hptiop: received outbound msg %x\n", msg)); BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, u_int64_t _tag) { u_int32_t context = (u_int32_t)_tag; if (context & MVIOP_CMD_TYPE_SCSI) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); } else if (context & MVIOP_CMD_TYPE_IOCTL) { struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup(req); } else if (context & (MVIOP_CMD_TYPE_SET_CONFIG | MVIOP_CMD_TYPE_GET_CONFIG)) hba->config_done = 1; else { device_printf(hba->pcidev, "wrong callback type\n"); } } static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba, u_int32_t _tag) { u_int32_t req_type = _tag & 0xf; struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->config_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: srb = hba->srb[(_tag >> 4) & 0xff]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; callout_stop(&srb->timeout); if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); break; case IOP_REQUEST_TYPE_IOCTL_COMMAND: if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr); break; default: device_printf(hba->pcidev, "wrong callback type\n"); break; } } static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) { u_int64_t req; while ((req = hptiop_mv_outbound_read(hba))) { if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { hptiop_request_callback_mv(hba, req); } } } } static int hptiop_intr_mv(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_MV0(outbound_doorbell); if (status) BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); KdPrint(("hptiop: received outbound msg %x\n", msg)); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_mv(hba); ret = 1; } return ret; } static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba) { u_int32_t status, _tag, cptr; int ret = 0; if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); } status = BUS_SPACE_RD4_MVFREY2(f0_doorbell); if (status) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status); if (status & CPU_TO_F0_DRBL_MSG_A_BIT) { u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a); hptiop_os_message_callback(hba, msg); } ret = 1; } status = BUS_SPACE_RD4_MVFREY2(isr_cause); if (status) { BUS_SPACE_WRT4_MVFREY2(isr_cause, status); do { cptr = *hba->u.mvfrey.outlist_cptr & 0xff; while (hba->u.mvfrey.outlist_rptr != cptr) { hba->u.mvfrey.outlist_rptr++; if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) { hba->u.mvfrey.outlist_rptr = 0; } _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val; hptiop_request_callback_mvfrey(hba, _tag); ret = 2; } } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); } if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); } return ret; } static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, u_int32_t req32, u_int32_t millisec) { u_int32_t i; u_int64_t temp64; BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); for (i = 0; i < millisec; i++) { hptiop_intr_itl(hba); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i; u_int64_t phy_addr; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy | (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; ((struct hpt_iop_request_get_config *)req)->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT; hptiop_mv_inbound_write(phy_addr, hba); BUS_SPACE_RD4_MV0(outbound_intmask); for (i = 0; i < millisec; i++) { hptiop_intr_mv(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i, index; u_int64_t phy_addr; struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy; reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); reqhdr->context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); for (i = 0; i < millisec; i++) { hptiop_intr_mvfrey(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec) { u_int32_t i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i=0; iops->iop_intr(hba); if (hba->msg_done) break; DELAY(1000); } return hba->msg_done? 0 : -1; } static int hptiop_get_config_itl(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { u_int32_t req32; config->header.size = sizeof(struct hpt_iop_request_get_config); config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_header) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } bus_space_read_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_get_config) >> 2); BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_get_config_mv(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_get_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } *config = *req; return 0; } static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; if (info->header.size != sizeof(struct hpt_iop_request_get_config) || info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) { KdPrint(("hptiop: header size %x/%x type %x/%x", info->header.size, (int)sizeof(struct hpt_iop_request_get_config), info->header.type, IOP_REQUEST_TYPE_GET_CONFIG)); return -1; } config->interface_version = info->interface_version; config->firmware_version = info->firmware_version; config->max_requests = info->max_requests; config->request_size = info->request_size; config->max_sg_count = info->max_sg_count; config->data_transfer_length = info->data_transfer_length; config->alignment_mask = info->alignment_mask; config->max_devices = info->max_devices; config->sdram_size = info->sdram_size; KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x", config->max_requests, config->request_size, config->data_transfer_length, config->max_devices, config->sdram_size)); return 0; } static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { u_int32_t req32; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; config->header.size = sizeof(struct hpt_iop_request_set_config); config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_set_config) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams) { u_int64_t temp64; struct hpt_iop_request_ioctl_command req; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; req.header.result = IOP_RESULT_PENDING; req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req.inbuf_size = pParams->nInBufferSize; req.outbuf_size = pParams->nOutBufferSize; req.bytes_returned = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); hptiop_lock_adapter(hba); BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); while (temp64) { if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); } hptiop_unlock_adapter(hba); return 0; } static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i, byte); } return 0; } static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i); if (copyout(&byte, (u_int8_t *)user + i, 1)) return -1; } return 0; } static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams) { u_int32_t req32; u_int32_t result; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return EFAULT; if (pParams->nInBufferSize) if (hptiop_bus_space_copyin(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf), (void *)pParams->lpInBuffer, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) goto invalid; result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.result)); if (result == IOP_RESULT_SUCCESS) { if (pParams->nOutBufferSize) if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf) + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) { if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), (void *)pParams->lpBytesReturned, sizeof(unsigned long))) goto invalid; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } else{ invalid: BUS_SPACE_WRT4_ITL(outbound_queue, req32); return EFAULT; } } static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t req_phy; int size = 0; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; size = size > 3 ? 3 : size; req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; hptiop_mv_inbound_write(req_phy, hba); BUS_SPACE_RD4_MV0(outbound_intmask); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mv(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t phy_addr; u_int32_t index; phy_addr = hba->ctlcfgcmd_phy; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); req->header.context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_rescan_bus(struct hpt_iop_hba * hba) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); return(0); } static bus_dmamap_callback_t hptiop_map_srb; static bus_dmamap_callback_t hptiop_post_scsi_command; static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg; static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop base adrress.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.itl.mu = (struct hpt_iopmu_itl *) rman_get_virtual(hba->bar0_res); if (!hba->u.itl.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc mem res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mv.regs = (struct hpt_iopmv_regs *) rman_get_virtual(hba->bar0_res); if (!hba->u.mv.regs) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); if (!hba->u.mv.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mvfrey.config = (struct hpt_iop_request_get_config *) rman_get_virtual(hba->bar0_res); if (!hba->u.mvfrey.config) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mvfrey.mu = (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res); if (!hba->u.mvfrey.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); } static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) { if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0x800 - 0x8, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, MVIOP_IOCTLCFG_SIZE, hptiop_mv_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba) { u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl); list_count >>= 16; if (list_count == 0) { return -1; } hba->u.mvfrey.list_count = list_count; hba->u.mvfrey.internal_mem_size = 0x800 + list_count * sizeof(struct mvfrey_inlist_entry) + list_count * sizeof(struct mvfrey_outlist_entry) + sizeof(int); if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, hba->u.mvfrey.internal_mem_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, hba->u.mvfrey.internal_mem_size, hptiop_mvfrey_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) { return 0; } static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba) { u_int32_t i = 100; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) return -1; /* wait 100ms for MCU ready */ while(i--) { DELAY(1000); } BUS_SPACE_WRT4_MVFREY2(inbound_base, hba->u.mvfrey.inlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(inbound_base_high, (hba->u.mvfrey.inlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_base, hba->u.mvfrey.outlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_base_high, (hba->u.mvfrey.outlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base, hba->u.mvfrey.outlist_cptr_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high, (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16); hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1; return 0; } /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hptiop_probe), DEVMETHOD(device_attach, hptiop_attach), DEVMETHOD(device_detach, hptiop_detach), DEVMETHOD(device_shutdown, hptiop_shutdown), { 0, 0 } }; static struct hptiop_adapter_ops hptiop_itl_ops = { .family = INTEL_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_itl, .internal_memalloc = 0, .internal_memfree = hptiop_internal_memfree_itl, .alloc_pci_res = hptiop_alloc_pci_res_itl, .release_pci_res = hptiop_release_pci_res_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = hptiop_get_config_itl, .set_config = hptiop_set_config_itl, .iop_intr = hptiop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .do_ioctl = hptiop_do_ioctl_itl, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .family = MV_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .alloc_pci_res = hptiop_alloc_pci_res_mv, .release_pci_res = hptiop_release_pci_res_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = hptiop_get_config_mv, .set_config = hptiop_set_config_mv, .iop_intr = hptiop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .do_ioctl = hptiop_do_ioctl_mv, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .family = MVFREY_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mvfrey, .internal_memalloc = hptiop_internal_memalloc_mvfrey, .internal_memfree = hptiop_internal_memfree_mvfrey, .alloc_pci_res = hptiop_alloc_pci_res_mvfrey, .release_pci_res = hptiop_release_pci_res_mvfrey, .enable_intr = hptiop_enable_intr_mvfrey, .disable_intr = hptiop_disable_intr_mvfrey, .get_config = hptiop_get_config_mvfrey, .set_config = hptiop_set_config_mvfrey, .iop_intr = hptiop_intr_mvfrey, .post_msg = hptiop_post_msg_mvfrey, .post_req = hptiop_post_req_mvfrey, .do_ioctl = hptiop_do_ioctl_mvfrey, .reset_comm = hptiop_reset_comm_mvfrey, }; static driver_t hptiop_pci_driver = { driver_name, driver_methods, sizeof(struct hpt_iop_hba) }; DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); MODULE_DEPEND(hptiop, cam, 1, 1, 1); static int hptiop_probe(device_t dev) { struct hpt_iop_hba *hba; u_int32_t id; static char buf[256]; int sas = 0; struct hptiop_adapter_ops *ops; if (pci_get_vendor(dev) != 0x1103) return (ENXIO); id = pci_get_device(dev); switch (id) { case 0x4520: case 0x4521: case 0x4522: sas = 1; case 0x3620: case 0x3622: case 0x3640: ops = &hptiop_mvfrey_ops; break; case 0x4210: case 0x4211: case 0x4310: case 0x4311: case 0x4320: case 0x4321: case 0x4322: sas = 1; case 0x3220: case 0x3320: case 0x3410: case 0x3520: case 0x3510: case 0x3511: case 0x3521: case 0x3522: case 0x3530: case 0x3540: case 0x3560: ops = &hptiop_itl_ops; break; case 0x3020: case 0x3120: case 0x3122: ops = &hptiop_mv_ops; break; default: return (ENXIO); } device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)); sprintf(buf, "RocketRAID %x %s Controller\n", id, sas ? "SAS" : "SATA"); device_set_desc_copy(dev, buf); hba = (struct hpt_iop_hba *)device_get_softc(dev); bzero(hba, sizeof(struct hpt_iop_hba)); hba->ops = ops; KdPrint(("hba->ops=%p\n", hba->ops)); return 0; } static int hptiop_attach(device_t dev) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; int rid = 0; struct cam_devq *devq; struct ccb_setasync ccb; u_int32_t unit = device_get_unit(dev); device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", unit, driver_version); KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), hba->ops)); pci_enable_busmaster(dev); hba->pcidev = dev; hba->pciunit = unit; if (hba->ops->alloc_pci_res(hba)) return ENXIO; if (hba->ops->iop_wait_ready(hba, 2000)) { device_printf(dev, "adapter is not ready\n"); goto release_pci_res; } mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->parent_dmat /* tag */)) { device_printf(dev, "alloc parent_dmat failed\n"); goto release_pci_res; } if (hba->ops->family == MV_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } } if (hba->ops->get_config(hba, &iop_config)) { device_printf(dev, "get iop config failed.\n"); goto get_config_failed; } hba->firmware_version = iop_config.firmware_version; hba->interface_version = iop_config.interface_version; hba->max_requests = iop_config.max_requests; hba->max_devices = iop_config.max_devices; hba->max_request_size = iop_config.request_size; hba->max_sg_count = iop_config.max_sg_count; if (hba->ops->family == MVFREY_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } if (hba->ops->reset_comm(hba)) { device_printf(dev, "reset comm failed\n"); goto get_config_failed; } } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ hba->max_sg_count, /* nsegments */ 0x20000, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &hba->lock, /* lockfuncarg */ &hba->io_dmat /* tag */)) { device_printf(dev, "alloc io_dmat failed\n"); goto get_config_failed; } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->srb_dmat /* tag */)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_io_dmat; } if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->srb_dmamap) != 0) { device_printf(dev, "srb bus_dmamem_alloc failed!\n"); goto destroy_srb_dmat; } if (bus_dmamap_load(hba->srb_dmat, hba->srb_dmamap, hba->uncached_ptr, (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, hptiop_map_srb, hba, 0)) { device_printf(dev, "bus_dmamap_load failed!\n"); goto srb_dmamem_free; } if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { device_printf(dev, "cam_simq_alloc failed\n"); goto srb_dmamap_unload; } hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, &hba->lock, hba->max_requests - 1, 1, devq); if (!hba->sim) { device_printf(dev, "cam_sim_alloc failed\n"); cam_simq_free(devq); goto srb_dmamap_unload; } hptiop_lock_adapter(hba); if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "xpt_bus_register failed\n"); goto free_cam_sim; } if (xpt_create_path(&hba->path, /*periph */ NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt_create_path failed\n"); goto deregister_xpt_bus; } hptiop_unlock_adapter(hba); bzero(&set_config, sizeof(set_config)); set_config.iop_id = unit; set_config.vbus_id = cam_sim_path(hba->sim); set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; if (hba->ops->set_config(hba, &set_config)) { device_printf(dev, "set iop config failed.\n"); goto free_hba_path; } xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, - &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, + &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hptiop_pci_intr, hba, &hba->irq_handle)) { device_printf(dev, "allocate intr function failed!\n"); goto free_irq_resource; } if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { device_printf(dev, "fail to start background task\n"); goto teartown_irq_resource; } hba->ops->enable_intr(hba); hba->initialized = 1; hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); return 0; teartown_irq_resource: bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); free_irq_resource: bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); hptiop_lock_adapter(hba); free_hba_path: xpt_free_path(hba->path); deregister_xpt_bus: xpt_bus_deregister(cam_sim_path(hba->sim)); free_cam_sim: cam_sim_free(hba->sim, /*free devq*/ TRUE); hptiop_unlock_adapter(hba); srb_dmamap_unload: if (hba->uncached_ptr) bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); srb_dmamem_free: if (hba->uncached_ptr) bus_dmamem_free(hba->srb_dmat, hba->uncached_ptr, hba->srb_dmamap); destroy_srb_dmat: if (hba->srb_dmat) bus_dma_tag_destroy(hba->srb_dmat); destroy_io_dmat: if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); get_config_failed: hba->ops->internal_memfree(hba); destroy_parent_tag: if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); release_pci_res: if (hba->ops->release_pci_res) hba->ops->release_pci_res(hba); return ENXIO; } static int hptiop_detach(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int i; int error = EBUSY; hptiop_lock_adapter(hba); for (i = 0; i < hba->max_devices; i++) if (hptiop_os_query_remove_device(hba, i)) { device_printf(dev, "%d file system is busy. id=%d", hba->pciunit, i); goto out; } if ((error = hptiop_shutdown(dev)) != 0) goto out; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) goto out; hptiop_unlock_adapter(hba); hptiop_release_resource(hba); return (0); out: hptiop_unlock_adapter(hba); return error; } static int hptiop_shutdown(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int error = 0; if (hba->flag & HPT_IOCTL_FLAG_OPEN) { device_printf(dev, "%d device is busy", hba->pciunit); return EBUSY; } hba->ops->disable_intr(hba); if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) error = EBUSY; return error; } static void hptiop_pci_intr(void *arg) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; hptiop_lock_adapter(hba); hba->ops->iop_intr(hba); hptiop_unlock_adapter(hba); } static void hptiop_poll(struct cam_sim *sim) { struct hpt_iop_hba *hba; hba = cam_sim_softc(sim); hba->ops->iop_intr(hba); } static void hptiop_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { } static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_ITL(outbound_intmask, ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); } static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG; BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); } static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_reset_adapter(void *argv) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000)) return; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000); } static void *hptiop_get_srb(struct hpt_iop_hba * hba) { struct hpt_iop_srb * srb; if (hba->srb_list) { srb = hba->srb_list; hba->srb_list = srb->next; return srb; } return NULL; } static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) { srb->next = hba->srb_list; hba->srb_list = srb; } static void hptiop_action(struct cam_sim *sim, union ccb *ccb) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; int error; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= hba->max_devices || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if ((srb = hptiop_get_srb(hba)) == NULL) { device_printf(hba->pcidev, "srb allocated failed"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } srb->ccb = ccb; error = bus_dmamap_load_ccb(hba->io_dmat, srb->dma_map, ccb, hptiop_post_scsi_command, srb, 0); if (error && error != EINPROGRESS) { device_printf(hba->pcidev, "%d bus_dmamap_load error %d", hba->pciunit, error); xpt_freeze_simq(hba->sim, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR; hptiop_free_srb(hba, srb); xpt_done(ccb); return; } return; case XPT_RESET_BUS: device_printf(hba->pcidev, "reset adapter"); hba->msg_done = 0; hptiop_reset_adapter(hba); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = hba->max_devices; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = hba->max_devices; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx; union ccb *ccb = srb->ccb; u_int8_t *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("ccb=%p %x-%x-%x\n", ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { u_int32_t iop_req32; struct hpt_iop_request_scsi_command req; iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (iop_req32 == IOPMU_QUEUE_EMPTY) { device_printf(hba->pcidev, "invaild req offset\n"); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req.sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req.cdb, ccb->csio.cdb_len); req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req.header.flags = 0; req.header.result = IOP_RESULT_PENDING; req.header.context = (u_int64_t)(unsigned long)srb; req.dataxfer_length = ccb->csio.dxfer_len; req.channel = 0; req.target = ccb->ccb_h.target_id; req.lun = ccb->ccb_h.target_lun; bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, (u_int8_t *)&req, req.header.size); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); } else { struct hpt_iop_request_scsi_command *req; req = (struct hpt_iop_request_scsi_command *)srb; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req->header.context = (u_int64_t)srb->index | IOPMU_QUEUE_ADDR_HOST_BIT; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { u_int32_t size_bits; if (req->header.size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (req->header.size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr | size_bits); } else BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr |IOPMU_QUEUE_ADDR_HOST_BIT); } } static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, size; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.context = (u_int64_t)srb->index << MVIOP_REQUEST_NUMBER_START_BIT | MVIOP_CMD_TYPE_SCSI; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; hptiop_mv_inbound_write(req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | (size > 3 ? 3 : size), hba); } static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, index; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((req_phy >> 16) & 0xffff0000); req->header.context = ((req_phy & 0xffffffff) << 32 ) | srb->index << 4 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = req_phy; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) { callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba); } } static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; union ccb *ccb = srb->ccb; struct hpt_iop_hba *hba = srb->hba; if (error || nsegs > hba->max_sg_count) { KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n", ccb->ccb_h.func_code, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, nsegs)); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } hba->ops->post_req(hba, srb, segs, nsegs); } static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); } static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; char *p; u_int64_t phy; u_int32_t list_count = hba->u.mvfrey.list_count; phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); hba->ctlcfgcmd_phy = phy; hba->ctlcfg_ptr = p; p += 0x800; phy += 0x800; hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba->u.mvfrey.inlist_phy = phy; p += list_count * sizeof(struct mvfrey_inlist_entry); phy += list_count * sizeof(struct mvfrey_inlist_entry); hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba->u.mvfrey.outlist_phy = phy; p += list_count * sizeof(struct mvfrey_outlist_entry); phy += list_count * sizeof(struct mvfrey_outlist_entry); hba->u.mvfrey.outlist_cptr = (u_int32_t *)p; hba->u.mvfrey.outlist_cptr_phy = phy; } static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; struct hpt_iop_srb *srb, *tmp_srb; int i; if (error || nsegs == 0) { device_printf(hba->pcidev, "hptiop_map_srb error"); return; } /* map srb */ srb = (struct hpt_iop_srb *) (((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F); for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { tmp_srb = (struct hpt_iop_srb *) ((char *)srb + i * HPT_SRB_MAX_SIZE); if (((unsigned long)tmp_srb & 0x1F) == 0) { if (bus_dmamap_create(hba->io_dmat, 0, &tmp_srb->dma_map)) { device_printf(hba->pcidev, "dmamap create failed"); return; } bzero(tmp_srb, sizeof(struct hpt_iop_srb)); tmp_srb->hba = hba; tmp_srb->index = i; if (hba->ctlcfg_ptr == 0) {/*itl iop*/ tmp_srb->phy_addr = (u_int64_t)(u_int32_t) (phy_addr >> 5); if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS; } else { tmp_srb->phy_addr = phy_addr; } callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0); hptiop_free_srb(hba, tmp_srb); hba->srb[i] = tmp_srb; phy_addr += HPT_SRB_MAX_SIZE; } else { device_printf(hba->pcidev, "invalid alignment"); return; } } } static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) { hba->msg_done = 1; } static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, int target_id) { struct cam_periph *periph = NULL; struct cam_path *path; int status, retval = 0; status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); if (status == CAM_REQ_CMP) { if ((periph = cam_periph_find(path, "da")) != NULL) { if (periph->refcount >= 1) { device_printf(hba->pcidev, "%d ," "target_id=0x%x," "refcount=%d", hba->pciunit, target_id, periph->refcount); retval = -1; } } xpt_free_path(path); } return retval; } static void hptiop_release_resource(struct hpt_iop_hba *hba) { int i; if (hba->ioctl_dev) destroy_dev(hba->ioctl_dev); if (hba->path) { struct ccb_setasync ccb; xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = 0; ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); xpt_free_path(hba->path); } if (hba->irq_handle) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); if (hba->sim) { hptiop_lock_adapter(hba); xpt_bus_deregister(cam_sim_path(hba->sim)); cam_sim_free(hba->sim, TRUE); hptiop_unlock_adapter(hba); } if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { struct hpt_iop_srb *srb = hba->srb[i]; if (srb->dma_map) bus_dmamap_destroy(hba->io_dmat, srb->dma_map); callout_drain(&srb->timeout); } if (hba->srb_dmat) { bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); bus_dma_tag_destroy(hba->srb_dmat); } if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); if (hba->irq_res) bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res); if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); mtx_destroy(&hba->lock); } Index: head/sys/dev/hptmv/entry.c =================================================================== --- head/sys/dev/hptmv/entry.c (revision 295789) +++ head/sys/dev/hptmv/entry.c (revision 295790) @@ -1,2992 +1,2992 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #ifdef DEBUG #ifdef DEBUG_LEVEL int hpt_dbg_level = DEBUG_LEVEL; #else int hpt_dbg_level = 0; #endif #endif #define MV_ERROR printf /* * CAM SIM entry points */ static int hpt_probe (device_t dev); static void launch_worker_thread(void); static int hpt_attach(device_t dev); static int hpt_detach(device_t dev); static int hpt_shutdown(device_t dev); static void hpt_poll(struct cam_sim *sim); static void hpt_intr(void *arg); static void hpt_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hpt_action(struct cam_sim *sim, union ccb *ccb); static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { __str(PROC_DIR_NAME), driver_methods, sizeof(IAL_ADAPTER_T) }; static devclass_t hpt_devclass; #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) __DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); MODULE_DEPEND(PROC_DIR_NAME, cam, 1, 1, 1); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev); static void HPTLIBAPI OsSendCommand (_VBUS_ARG union ccb * ccb); static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd); static void ccb_done(union ccb *ccb); static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter); static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void handleEdmaError(_VBUS_ARG PCommand pCmd); static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static int fResetActiveCommands(PVBus _vbus_p); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter); static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_handle_event_disconnect(void *data); static void hptmv_handle_event_connect(void *data); static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel); static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel); static int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical); static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct); static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static struct sx hptmv_list_lock; SX_SYSINIT(hptmv_list_lock, &hptmv_list_lock, "hptmv list"); IAL_ADAPTER_T *gIal_Adapter = 0; IAL_ADAPTER_T *pCurAdapter = 0; static MV_SATA_CHANNEL gMvSataChannels[MAX_VBUS][MV_SATA_CHANNELS_NUM]; typedef struct st_HPT_DPC { IAL_ADAPTER_T *pAdapter; void (*dpc)(IAL_ADAPTER_T *, void *, UCHAR); void *arg; UCHAR flags; } ST_HPT_DPC; #define MAX_DPC 16 UCHAR DPC_Request_Nums = 0; static ST_HPT_DPC DpcQueue[MAX_DPC]; static int DpcQueue_First=0; static int DpcQueue_Last = 0; static struct mtx DpcQueue_Lock; MTX_SYSINIT(hpmtv_dpc_lock, &DpcQueue_Lock, "hptmv dpc", MTX_DEF); char DRIVER_VERSION[] = "v1.16"; /******************************************************************************* * Name: hptmv_free_channel * * Description: free allocated queues for the given channel * * Parameters: pMvSataAdapter - pointer to the RR18xx controler this * channel connected to. * channelNum - channel number. * ******************************************************************************/ static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { HPT_ASSERT(channelNum < MV_SATA_CHANNELS_NUM); pAdapter->mvSataAdapter.sataChannel[channelNum] = NULL; } static void failDevice(PVDevice pVDev) { PVBus _vbus_p = pVDev->pVBus; IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; pVDev->u.disk.df_on_line = 0; pVDev->vf_online = 0; if (pVDev->pfnDeviceFailed) CallWhenIdle(_VBUS_P (DPC_PROC)pVDev->pfnDeviceFailed, pVDev); fNotifyGUI(ET_DEVICE_REMOVED, pVDev); #ifndef FOR_DEMO if (pAdapter->ver_601==2 && !pAdapter->beeping) { pAdapter->beeping = 1; BeepOn(pAdapter->mvSataAdapter.adapterIoBaseAddress); set_fail_led(&pAdapter->mvSataAdapter, pVDev->u.disk.mv->channelNumber, 1); } #endif } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel); static void handleEdmaError(_VBUS_ARG PCommand pCmd) { PDevice pDevice = &pCmd->pVDevice->u.disk; MV_SATA_ADAPTER * pSataAdapter = pDevice->mv->mvSataAdapter; if (!pDevice->df_on_line) { KdPrint(("Device is offline")); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } if (pCmd->RetryCount++>5) { hpt_printk(("too many retries on channel(%d)\n", pDevice->mv->channelNumber)); failed: failDevice(pCmd->pVDevice); pCmd->Result = RETURN_IDE_ERROR; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* reset the channel and retry the command */ if (MvSataResetChannel(pSataAdapter, pDevice->mv->channelNumber)) goto failed; fNotifyGUI(ET_DEVICE_ERROR, Map2pVDevice(pDevice)); hpt_printk(("Retry on channel(%d)\n", pDevice->mv->channelNumber)); fDeviceSendCommand(_VBUS_P pCmd); } /**************************************************************** * Name: hptmv_init_channel * * Description: allocate request and response queues for the EDMA of the * given channel and sets other fields. * * Parameters: * pAdapter - pointer to the emulated adapter data structure * channelNum - channel number. * Return: 0 on success, otherwise on failure ****************************************************************/ static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_CHANNEL *pMvSataChannel; dma_addr_t req_dma_addr; dma_addr_t rsp_dma_addr; if (channelNum >= MV_SATA_CHANNELS_NUM) { MV_ERROR("RR18xx[%d]: Bad channelNum=%d", pAdapter->mvSataAdapter.adapterId, channelNum); return -1; } pMvSataChannel = &gMvSataChannels[pAdapter->mvSataAdapter.adapterId][channelNum]; pAdapter->mvSataAdapter.sataChannel[channelNum] = pMvSataChannel; pMvSataChannel->channelNumber = channelNum; pMvSataChannel->lba48Address = MV_FALSE; pMvSataChannel->maxReadTransfer = MV_FALSE; pMvSataChannel->requestQueue = (struct mvDmaRequestQueueEntry *) (pAdapter->requestsArrayBaseAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE)); req_dma_addr = pAdapter->requestsArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE); KdPrint(("requestQueue addr is 0x%llX", (HPT_U64)(ULONG_PTR)req_dma_addr)); /* check the 1K alignment of the request queue*/ if (req_dma_addr & 0x3ff) { MV_ERROR("RR18xx[%d]: request queue allocated isn't 1 K aligned," " dma_addr=%llx channel=%d\n", pAdapter->mvSataAdapter.adapterId, (HPT_U64)(ULONG_PTR)req_dma_addr, channelNum); return -1; } pMvSataChannel->requestQueuePciLowAddress = req_dma_addr; pMvSataChannel->requestQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: request queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->requestQueue)); pMvSataChannel->responseQueue = (struct mvDmaResponseQueueEntry *) (pAdapter->responsesArrayBaseAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE)); rsp_dma_addr = pAdapter->responsesArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE); /* check the 256 alignment of the response queue*/ if (rsp_dma_addr & 0xff) { MV_ERROR("RR18xx[%d,%d]: response queue allocated isn't 256 byte " "aligned, dma_addr=%llx\n", pAdapter->mvSataAdapter.adapterId, channelNum, (HPT_U64)(ULONG_PTR)rsp_dma_addr); return -1; } pMvSataChannel->responseQueuePciLowAddress = rsp_dma_addr; pMvSataChannel->responseQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: response queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->responseQueue)); pAdapter->mvChannel[channelNum].online = MV_TRUE; return 0; } /****************************************************************************** * Name: hptmv_parse_identify_results * * Description: this functions parses the identify command results, checks * that the connected deives can be accesed by RR18xx EDMA, * and updates the channel stucture accordingly. * * Parameters: pMvSataChannel, pointer to the channel data structure. * * Returns: =0 ->success, < 0 ->failure. * ******************************************************************************/ static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel) { MV_U16 *iden = pMvSataChannel->identifyDevice; /*LBA addressing*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x200)) { KdPrint(("IAL Error in IDENTIFY info: LBA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "LBA supported")); } /*DMA support*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x100)) { KdPrint(("IAL Error in IDENTIFY info: DMA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "DMA supported")); } /* PIO */ if ((iden[IDEN_VALID] & 2) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find PIO mode\n")); return -1; } KdPrint(("%25s - 0x%02x\n", "PIO modes supported", iden[IDEN_PIO_MODE_SPPORTED] & 0xff)); /*UDMA*/ if ((iden[IDEN_VALID] & 4) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find UDMA mode\n")); return -1; } /* 48 bit address */ if ((iden[IDEN_SUPPORTED_COMMANDS2] & 0x400)) { KdPrint(("%25s - %s\n", "LBA48 addressing", "supported")); pMvSataChannel->lba48Address = MV_TRUE; } else { KdPrint(("%25s - %s\n", "LBA48 addressing", "Not supported")); pMvSataChannel->lba48Address = MV_FALSE; } return 0; } static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel) { PVDevice pVDev = &pAdapter->VDevices[channel]; MV_SATA_CHANNEL *pMvSataChannel = pAdapter->mvSataAdapter.sataChannel[channel]; MV_U16_PTR IdentifyData = pMvSataChannel->identifyDevice; pMvSataChannel->outstandingCommands = 0; pVDev->u.disk.mv = pMvSataChannel; pVDev->u.disk.df_on_line = 1; pVDev->u.disk.pVBus = &pAdapter->VBus; pVDev->pVBus = &pAdapter->VBus; #ifdef SUPPORT_48BIT_LBA if (pMvSataChannel->lba48Address == MV_TRUE) pVDev->u.disk.dDeRealCapacity = ((IdentifyData[101]<<16) | IdentifyData[100]) - 1; else #endif if(IdentifyData[53] & 1) { pVDev->u.disk.dDeRealCapacity = (((IdentifyData[58]<<16 | IdentifyData[57]) < (IdentifyData[61]<<16 | IdentifyData[60])) ? (IdentifyData[61]<<16 | IdentifyData[60]) : (IdentifyData[58]<<16 | IdentifyData[57])) - 1; } else pVDev->u.disk.dDeRealCapacity = (IdentifyData[61]<<16 | IdentifyData[60]) - 1; pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxPioModeSupported - MV_ATA_TRANSFER_PIO_0; if (pAdapter->mvChannel[channel].maxUltraDmaModeSupported!=0xFF) { pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxUltraDmaModeSupported - MV_ATA_TRANSFER_UDMA_0 + 8; } } static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plugged) { PVDevice pVDev; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelIndex]; if (!pMvSataChannel) return; if (plugged) { pVDev = &(pAdapter->VDevices[channelIndex]); init_vdev_params(pAdapter, channelIndex); pVDev->VDeviceType = pVDev->u.disk.df_atapi? VD_ATAPI : pVDev->u.disk.df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->VDeviceCapacity = pVDev->u.disk.dDeRealCapacity-SAVE_FOR_RAID_INFO; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; pVDev->vf_online = 1; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { int iMember; for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; } #endif fNotifyGUI(ET_DEVICE_PLUGGED,pVDev); fCheckBootable(pVDev); RegisterVDevice(pVDev); #ifndef FOR_DEMO if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } #endif } else { pVDev = &(pAdapter->VDevices[channelIndex]); failDevice(pVDev); } } static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelNum]; MV_CHANNEL *pChannelInfo = &(pAdapter->mvChannel[channelNum]); MV_U32 udmaMode,pioMode; KdPrint(("RR18xx [%d]: start channel (%d)", pMvSataAdapter->adapterId, channelNum)); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { /* If failed, try again - this is when trying to hardreset a channel */ /* when drive is just spinning up */ StallExec(5000000); /* wait 5 sec before trying again */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Hard reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* identify device*/ if (mvStorageDevATAIdentifyDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform ATA Identify command\n" , pMvSataAdapter->adapterId, channelNum); return -1; } if (hptmv_parse_identify_results(pMvSataChannel)) { MV_ERROR("RR18xx [%d,%d]: Error in parsing ATA Identify message\n" , pMvSataAdapter->adapterId, channelNum); return -1; } /* mvStorageDevATASetFeatures */ /* Disable 8 bit PIO in case CFA enabled */ if (pMvSataChannel->identifyDevice[86] & 4) { KdPrint(("RR18xx [%d]: Disable 8 bit PIO (CFA enabled) \n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_8_BIT_PIO, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures" " failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Write cache */ #ifdef ENABLE_WRITE_CACHE if (pMvSataChannel->identifyDevice[82] & 0x20) { if (!(pMvSataChannel->identifyDevice[85] & 0x20)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel %d, write cache enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, write cache not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else /* disable write cache */ { if (pMvSataChannel->identifyDevice[85] & 0x20) { KdPrint(("RR18xx [%d]: channel =%d, disable write cache\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, write cache disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif /* Set transfer mode */ KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_SLOW\n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 1) { pioMode = MV_ATA_TRANSFER_PIO_4; } else if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 2) { pioMode = MV_ATA_TRANSFER_PIO_3; } else { MV_ERROR("IAL Error in IDENTIFY info: PIO modes 3 and 4 not supported\n"); pioMode = MV_ATA_TRANSFER_PIO_SLOW; } KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_4\n", pMvSataAdapter->adapterId)); pAdapter->mvChannel[channelNum].maxPioModeSupported = pioMode; if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pioMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } udmaMode = MV_ATA_TRANSFER_UDMA_0; if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x40) { udmaMode = MV_ATA_TRANSFER_UDMA_6; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x20) { udmaMode = MV_ATA_TRANSFER_UDMA_5; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x10) { udmaMode = MV_ATA_TRANSFER_UDMA_4; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 8) { udmaMode = MV_ATA_TRANSFER_UDMA_3; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 4) { udmaMode = MV_ATA_TRANSFER_UDMA_2; } KdPrint(("RR18xx [%d] Set transfer mode XFER_UDMA_%d\n", pMvSataAdapter->adapterId, udmaMode & 0xf)); pChannelInfo->maxUltraDmaModeSupported = udmaMode; /*if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, udmaMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; }*/ if (pChannelInfo->maxUltraDmaModeSupported == 0xFF) return TRUE; else do { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pChannelInfo->maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) { if (pChannelInfo->maxUltraDmaModeSupported > MV_ATA_TRANSFER_UDMA_0) { if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_REG_WRITE_BYTE(pMvSataAdapter->adapterIoBaseAddress, pMvSataChannel->eDmaRegsOffset + 0x11c, /* command reg */ MV_ATA_COMMAND_IDLE_IMMEDIATE); mvMicroSecondsDelay(10000); mvSataChannelHardReset(pMvSataAdapter, channelNum); if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; } if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; pChannelInfo->maxUltraDmaModeSupported--; continue; } else return FALSE; } break; }while (1); /* Read look ahead */ #ifdef ENABLE_READ_AHEAD if (pMvSataChannel->identifyDevice[82] & 0x40) { if (!(pMvSataChannel->identifyDevice[85] & 0x40)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, read look ahead enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, Read Look Ahead not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else { if (pMvSataChannel->identifyDevice[86] & 0x20) { KdPrint(("RR18xx [%d]:channel %d, disable read look ahead\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]:channel %d: ATA Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]:channel %d, read look ahead disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif { KdPrint(("RR18xx [%d]: channel %d config EDMA, Non Queued Mode\n", pMvSataAdapter->adapterId, channelNum)); if (mvSataConfigEdmaMode(pMvSataAdapter, channelNum, MV_EDMA_MODE_NOT_QUEUED, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d Error: mvSataConfigEdmaMode failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d] Failed to enable DMA, channel=%d\n", pMvSataAdapter->adapterId, channelNum); return -1; } MV_ERROR("RR18xx [%d,%d]: channel started successfully\n", pMvSataAdapter->adapterId, channelNum); #ifndef FOR_DEMO set_fail_led(pMvSataAdapter, channelNum, 0); #endif return 0; } static void hptmv_handle_event(void * data, int flag) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)data; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_U8 channelIndex; mtx_assert(&pAdapter->lock, MA_OWNED); /* mvOsSemTake(&pMvSataAdapter->semaphore); */ for (channelIndex = 0; channelIndex < MV_SATA_CHANNELS_NUM; channelIndex++) { switch(pAdapter->sataEvents[channelIndex]) { case SATA_EVENT_CHANNEL_CONNECTED: /* Handle only connects */ if (flag == 1) break; KdPrint(("RR18xx [%d,%d]: new device connected\n", pMvSataAdapter->adapterId, channelIndex)); hptmv_init_channel(pAdapter, channelIndex); if (mvSataConfigureChannel( pMvSataAdapter, channelIndex) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to configure\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { /*mvSataChannelHardReset(pMvSataAdapter, channel);*/ if (start_channel( pAdapter, channelIndex)) { MV_ERROR("RR18xx [%d,%d]Failed to start channel\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { device_change(pAdapter, channelIndex, TRUE); } } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_CHANNEL_DISCONNECTED: /* Handle only disconnects */ if (flag == 0) break; KdPrint(("RR18xx [%d,%d]: device disconnected\n", pMvSataAdapter->adapterId, channelIndex)); /* Flush pending commands */ if(pMvSataAdapter->sataChannel[channelIndex]) { _VBUS_INST(&pAdapter->VBus) mvSataFlushDmaQueue (pMvSataAdapter, channelIndex, MV_FLUSH_TYPE_CALLBACK); CheckPendingCall(_VBUS_P0); mvSataRemoveChannel(pMvSataAdapter,channelIndex); hptmv_free_channel(pAdapter, channelIndex); pMvSataAdapter->sataChannel[channelIndex] = NULL; KdPrint(("RR18xx [%d,%d]: channel removed\n", pMvSataAdapter->adapterId, channelIndex)); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); } else { KdPrint(("RR18xx [%d,%d]: channel already removed!!\n", pMvSataAdapter->adapterId, channelIndex)); } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_NO_CHANGE: break; default: break; } } /* mvOsSemRelease(&pMvSataAdapter->semaphore); */ } #define EVENT_CONNECT 1 #define EVENT_DISCONNECT 0 static void hptmv_handle_event_connect(void *data) { hptmv_handle_event (data, 0); } static void hptmv_handle_event_disconnect(void *data) { hptmv_handle_event (data, 1); } static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2) { IAL_ADAPTER_T *pAdapter = pMvSataAdapter->IALData; switch (eventType) { case MV_EVENT_TYPE_SATA_CABLE: { MV_U8 channel = param2; if (param1 == EVENT_CONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_CONNECTED; KdPrint(("RR18xx [%d,%d]: device connected event received\n", pMvSataAdapter->adapterId, channel)); /* Delete previous timers (if multiple drives connected in the same time */ callout_reset(&pAdapter->event_timer_connect, 10 * hz, hptmv_handle_event_connect, pAdapter); } else if (param1 == EVENT_DISCONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_DISCONNECTED; KdPrint(("RR18xx [%d,%d]: device disconnected event received \n", pMvSataAdapter->adapterId, channel)); device_change(pAdapter, channel, FALSE); /* Delete previous timers (if multiple drives disconnected in the same time */ /*callout_reset(&pAdapter->event_timer_disconnect, 10 * hz, hptmv_handle_event_disconnect, pAdapter); */ /*It is not necessary to wait, handle it directly*/ hptmv_handle_event_disconnect(pAdapter); } else { MV_ERROR("RR18xx: illegal value for param1(%d) at " "connect/disconnect event, host=%d\n", param1, pMvSataAdapter->adapterId ); } } break; case MV_EVENT_TYPE_ADAPTER_ERROR: KdPrint(("RR18xx: DEVICE error event received, pci cause " "reg=%x, don't how to handle this\n", param1)); return MV_TRUE; default: MV_ERROR("RR18xx[%d]: unknown event type (%d)\n", pMvSataAdapter->adapterId, eventType); return MV_FALSE; } return MV_TRUE; } static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter) { pAdapter->requestsArrayBaseAddr = (MV_U8 *)contigmalloc(REQUESTS_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->requestsArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA request" " queues\n", pAdapter->mvSataAdapter.adapterId); return -1; } pAdapter->requestsArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->requestsArrayBaseAddr); pAdapter->requestsArrayBaseAlignedAddr = pAdapter->requestsArrayBaseAddr; pAdapter->requestsArrayBaseAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->requestsArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1)); pAdapter->requestsArrayBaseDmaAlignedAddr = pAdapter->requestsArrayBaseDmaAddr; pAdapter->requestsArrayBaseDmaAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1); if ((pAdapter->requestsArrayBaseDmaAlignedAddr - pAdapter->requestsArrayBaseDmaAddr) != (pAdapter->requestsArrayBaseAlignedAddr - pAdapter->requestsArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); return -1; } /* response queues */ pAdapter->responsesArrayBaseAddr = (MV_U8 *)contigmalloc(RESPONSES_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->responsesArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response" " queues\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr); pAdapter->responsesArrayBaseAlignedAddr = pAdapter->responsesArrayBaseAddr; pAdapter->responsesArrayBaseAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->responsesArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1)); pAdapter->responsesArrayBaseDmaAlignedAddr = pAdapter->responsesArrayBaseDmaAddr; pAdapter->responsesArrayBaseDmaAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1); if ((pAdapter->responsesArrayBaseDmaAlignedAddr - pAdapter->responsesArrayBaseDmaAddr) != (pAdapter->responsesArrayBaseAlignedAddr - pAdapter->responsesArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Response Queues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } return 0; } static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter) { contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); } static PVOID AllocatePRDTable(IAL_ADAPTER_T *pAdapter) { PVOID ret; if (pAdapter->pFreePRDLink) { KdPrint(("pAdapter->pFreePRDLink:%p\n",pAdapter->pFreePRDLink)); ret = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = *(void**)ret; return ret; } return NULL; } static void FreePRDTable(IAL_ADAPTER_T *pAdapter, PVOID PRDTable) { *(void**)PRDTable = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = PRDTable; } extern PVDevice fGetFirstChild(PVDevice pLogical); extern void fResetBootMark(PVDevice pLogical); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter) { PVDevice pPhysical, pLogical; PVBus pVBus; int i,j; for(i=0;iVDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->vf_online==0) { pPhysical->vf_bootmark = pLogical->vf_bootmark = 0; continue; } if (pLogical->VDeviceType==VD_SPARE || pPhysical!=fGetFirstChild(pLogical)) continue; pVBus = &pAdapter->VBus; if(pVBus) { j=0; while(jpVDevice[j]) j++; if(jpVDevice[j] = pLogical; pLogical->pVBus = pVBus; if (j>0 && pLogical->vf_bootmark) { if (pVBus->pVDevice[0]->vf_bootmark) { fResetBootMark(pLogical); } else { do { pVBus->pVDevice[j] = pVBus->pVDevice[j-1]; } while (--j); pVBus->pVDevice[0] = pLogical; } } } } } } PVDevice GetSpareDisk(_VBUS_ARG PVDevice pArray) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pArray->pVBus->OsExt; LBA_T capacity = LongDiv(pArray->VDeviceCapacity, pArray->u.array.bArnMember-1); LBA_T thiscap, maxcap = MAX_LBA_T; PVDevice pVDevice, pFind = NULL; int i; for(i=0;iVDevices[i]; if(!pVDevice) continue; thiscap = pArray->vf_format_v2? pVDevice->u.disk.dDeRealCapacity : pVDevice->VDeviceCapacity; /* find the smallest usable spare disk */ if (pVDevice->VDeviceType==VD_SPARE && pVDevice->u.disk.df_on_line && thiscap < maxcap && thiscap >= capacity) { maxcap = pVDevice->VDeviceCapacity; pFind = pVDevice; } } return pFind; } /****************************************************************** * IO ATA Command *******************************************************************/ int HPTLIBAPI fDeReadWrite(PDevice pDev, ULONG Lba, UCHAR Cmd, void *tmpBuffer) { return mvReadWrite(pDev->mv, Lba, Cmd, tmpBuffer); } void HPTLIBAPI fDeSelectMode(PDevice pDev, UCHAR NewMode) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; UCHAR mvMode; /* 508x don't use MW-DMA? */ if (NewMode>4 && NewMode<8) NewMode = 4; pDev->bDeModeSetting = NewMode; if (NewMode<=4) mvMode = MV_ATA_TRANSFER_PIO_0 + NewMode; else mvMode = MV_ATA_TRANSFER_UDMA_0 + (NewMode-8); /*To fix 88i8030 bug*/ if (mvMode > MV_ATA_TRANSFER_UDMA_0 && mvMode < MV_ATA_TRANSFER_UDMA_4) mvMode = MV_ATA_TRANSFER_UDMA_0; mvSataDisableChannelDma(pSataAdapter, channelIndex); /* Flush pending commands */ mvSataFlushDmaQueue (pSataAdapter, channelIndex, MV_FLUSH_TYPE_NONE); if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_TRANSFER, mvMode, 0, 0, 0) == MV_FALSE) { KdPrint(("channel %d: Set Features failed\n", channelIndex)); } /* Enable EDMA */ if (mvSataEnableChannelDma(pSataAdapter, channelIndex) == MV_FALSE) KdPrint(("Failed to enable DMA, channel=%d", channelIndex)); } int HPTLIBAPI fDeSetTCQ(PDevice pDev, int enable, int depth) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if (enable) { if (pSataChannel->queuedDMA == MV_EDMA_MODE_NOT_QUEUED && (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))) { UCHAR depth = ((pSataChannel->identifyDevice[IDEN_QUEUE_DEPTH]) & 0x1f) + 1; channelInfo->queueDepth = (depth==32)? 31 : depth; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_QUEUED, depth); ret = 1; } } else { if (pSataChannel->queuedDMA != MV_EDMA_MODE_NOT_QUEUED) { channelInfo->queueDepth = 2; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_NOT_QUEUED, 0); ret = 1; } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetNCQ(PDevice pDev, int enable, int depth) { return 0; } int HPTLIBAPI fDeSetWriteCache(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x20))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetReadAhead(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x40))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } #ifdef SUPPORT_ARRAY #define IdeRegisterVDevice fCheckArray #else void IdeRegisterVDevice(PDevice pDev) { PVDevice pVDev = Map2pVDevice(pDev); pVDev->VDeviceType = pDev->df_atapi? VD_ATAPI : pDev->df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->vf_online = 1; pVDev->VDeviceCapacity = pDev->dDeRealCapacity; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; } #endif static __inline PBUS_DMAMAP dmamap_get(struct IALAdapter * pAdapter) { PBUS_DMAMAP p = pAdapter->pbus_dmamap_list; if (p) pAdapter->pbus_dmamap_list = p-> next; return p; } static __inline void dmamap_put(PBUS_DMAMAP p) { p->next = p->pAdapter->pbus_dmamap_list; p->pAdapter->pbus_dmamap_list = p; } static int num_adapters = 0; static int init_adapter(IAL_ADAPTER_T *pAdapter) { PVBus _vbus_p = &pAdapter->VBus; MV_SATA_ADAPTER *pMvSataAdapter; int i, channel, rid; PVDevice pVDev; mtx_init(&pAdapter->lock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&pAdapter->event_timer_connect, &pAdapter->lock, 0); callout_init_mtx(&pAdapter->event_timer_disconnect, &pAdapter->lock, 0); sx_xlock(&hptmv_list_lock); pAdapter->next = 0; if(gIal_Adapter == 0){ gIal_Adapter = pAdapter; pCurAdapter = gIal_Adapter; } else { pCurAdapter->next = pAdapter; pCurAdapter = pAdapter; } sx_xunlock(&hptmv_list_lock); pAdapter->outstandingCommands = 0; pMvSataAdapter = &(pAdapter->mvSataAdapter); _vbus_p->OsExt = (void *)pAdapter; pMvSataAdapter->IALData = pAdapter; if (bus_dma_tag_create(bus_get_dma_tag(pAdapter->hpt_dev),/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (MAX_SG_DESCRIPTORS-1), /* maxsize */ MAX_SG_DESCRIPTORS, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &pAdapter->lock, /* lockfuncarg */ &pAdapter->io_dma_parent /* tag */)) { return ENXIO; } if (hptmv_allocate_edma_queues(pAdapter)) { MV_ERROR("RR18xx: Failed to allocate memory for EDMA queues\n"); return ENOMEM; } /* also map EPROM address */ rid = 0x10; if (!(pAdapter->mem_res = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, 0, ~0, MV_SATA_PCI_BAR0_SPACE_SIZE+0x40000, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { MV_ERROR("RR18xx: Failed to remap memory space\n"); hptmv_free_edma_queues(pAdapter); return ENXIO; } else { KdPrint(("RR18xx: io base address 0x%p\n", pMvSataAdapter->adapterIoBaseAddress)); } pMvSataAdapter->adapterId = num_adapters++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = pci_read_config(pAdapter->hpt_dev, PCIR_REVID, 1); pMvSataAdapter->pciConfigDeviceId = pci_get_device(pAdapter->hpt_dev); /* init RR18xx */ pMvSataAdapter->intCoalThre[0]= 1; pMvSataAdapter->intCoalThre[1]= 1; pMvSataAdapter->intTimeThre[0] = 1; pMvSataAdapter->intTimeThre[1] = 1; pMvSataAdapter->pciCommand = 0x0107E371; pMvSataAdapter->pciSerrMask = 0xd77fe6ul; pMvSataAdapter->pciInterruptMask = 0xd77fe6ul; pMvSataAdapter->mvSataEventNotify = hptmv_event_notify; if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { MV_ERROR("RR18xx[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); unregister: bus_release_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, rid, pAdapter->mem_res); hptmv_free_edma_queues(pAdapter); return ENXIO; } pAdapter->ver_601 = pMvSataAdapter->pcbVersion; #ifndef FOR_DEMO set_fail_leds(pMvSataAdapter, 0); #endif /* setup command blocks */ KdPrint(("Allocate command blocks\n")); _vbus_(pFreeCommands) = 0; pAdapter->pCommandBlocks = malloc(sizeof(struct _Command) * MAX_COMMAND_BLOCKS_FOR_EACH_VBUS, M_DEVBUF, M_NOWAIT); KdPrint(("pCommandBlocks:%p\n",pAdapter->pCommandBlocks)); if (!pAdapter->pCommandBlocks) { MV_ERROR("insufficient memory\n"); goto unregister; } for (i=0; ipCommandBlocks[i])); } /*Set up the bus_dmamap*/ pAdapter->pbus_dmamap = (PBUS_DMAMAP)malloc (sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM, M_DEVBUF, M_NOWAIT); if(!pAdapter->pbus_dmamap) { MV_ERROR("insufficient memory\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); goto unregister; } memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; for (i=0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); if(bus_dmamap_create(pAdapter->io_dma_parent, 0, &pmap->dma_map)) { MV_ERROR("Can not allocate dma map\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); free(pAdapter->pbus_dmamap, M_DEVBUF); goto unregister; } callout_init_mtx(&pmap->timeout, &pAdapter->lock, 0); } /* setup PRD Tables */ KdPrint(("Allocate PRD Tables\n")); pAdapter->pFreePRDLink = 0; pAdapter->prdTableAddr = (PUCHAR)contigmalloc( (PRD_ENTRIES_SIZE*PRD_TABLES_FOR_VBUS + 32), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); KdPrint(("prdTableAddr:%p\n",pAdapter->prdTableAddr)); if (!pAdapter->prdTableAddr) { MV_ERROR("insufficient PRD Tables\n"); goto unregister; } pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; for (i=0; ipFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); PRDTable += PRD_ENTRIES_SIZE; } } /* enable the adapter interrupts */ /* configure and start the connected channels*/ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pAdapter->mvChannel[channel].online = MV_FALSE; if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_TRUE) { KdPrint(("RR18xx[%d]: channel %d is connected\n", pMvSataAdapter->adapterId, channel)); if (hptmv_init_channel(pAdapter, channel) == 0) { if (mvSataConfigureChannel(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx[%d]: Failed to configure channel" " %d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } else { if (start_channel(pAdapter, channel)) { MV_ERROR("RR18xx[%d]: Failed to start channel," " channel=%d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } pAdapter->mvChannel[channel].online = MV_TRUE; /* mvSataChannelSetEdmaLoopBackMode(pMvSataAdapter, channel, MV_TRUE);*/ } } } KdPrint(("pAdapter->mvChannel[channel].online:%x, channel:%d\n", pAdapter->mvChannel[channel].online, channel)); } #ifdef SUPPORT_ARRAY for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } #endif KdPrint(("Initialize Devices\n")); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel) { init_vdev_params(pAdapter, channel); IdeRegisterVDevice(&pAdapter->VDevices[channel].u.disk); } } #ifdef SUPPORT_ARRAY CheckArrayCritical(_VBUS_P0); #endif _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); for (channel=0;channelpVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); } #if defined(SUPPORT_ARRAY) && defined(_RAID5N_) init_raid5_memory(_VBUS_P0); _vbus_(r5).enable_write_back = 1; printf("RR18xx: RAID5 write-back %s\n", _vbus_(r5).enable_write_back? "enabled" : "disabled"); #endif mvSataUnmaskAdapterInterrupt(pMvSataAdapter); return 0; } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pMvSataAdapter->IALData; mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channel)== MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Hard reser the SATA channel\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Connect Device\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; }else { MV_ERROR("channel %d: perform recalibrate command", channel); if (!mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, MV_NON_UDMA_PROTOCOL_NON_DATA, MV_FALSE, NULL, /* pBuffer*/ 0, /* count */ 0, /*features*/ /* sectorCount */ 0, 0, /* lbaLow */ 0, /* lbaMid */ /* lbaHigh */ 0, 0, /* device */ /* command */ 0x10)) MV_ERROR("channel %d: recalibrate failed", channel); /* Set transfer mode */ if((mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxPioModeSupported, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) ) { MV_ERROR("channel %d: Set Features failed", channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("Failed to enable DMA, channel=%d", channel); hptmv_free_channel(pAdapter, channel); return -1; } } return 0; } static int fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } return 0; } void fCompleteAllCommandsSynchronously(PVBus _vbus_p) { UINT cont; ULONG ticks = 0; MV_U8 channel; MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; do { check_cmds: cont = 0; CheckPendingCall(_VBUS_P0); #ifdef _RAID5N_ dataxfer_poll(); xor_poll(); #endif for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { while (pMvSataChannel->outstandingCommands) { if (!mvSataInterruptServiceRoutine(pMvSataAdapter)) { StallExec(1000); if (ticks++ > 3000) { MvSataResetChannel(pMvSataAdapter,channel); goto check_cmds; } } else ticks = 0; } cont = 1; } } } while (cont); } void fResetVBus(_VBUS_ARG0) { KdPrint(("fMvResetBus(%p)", _vbus_p)); /* some commands may already finished. */ CheckPendingCall(_VBUS_P0); fResetActiveCommands(_vbus_p); /* * the other pending commands may still be finished successfully. */ fCompleteAllCommandsSynchronously(_vbus_p); /* Now there should be no pending commands. No more action needed. */ CheckIdleCall(_VBUS_P0); KdPrint(("fMvResetBus() done")); } /*No rescan function*/ void fRescanAllDevice(_VBUS_ARG0) { } static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct) { PCommand pCmd = (PCommand) commandId; _VBUS_INST(pCmd->pVDevice->pVBus) if (pCmd->uScratch.sata_param.prdAddr) FreePRDTable(pMvSataAdapter->IALData,pCmd->uScratch.sata_param.prdAddr); switch (comp_type) { case MV_COMPLETION_TYPE_NORMAL: pCmd->Result = RETURN_SUCCESS; break; case MV_COMPLETION_TYPE_ABORT: pCmd->Result = RETURN_BUS_RESET; break; case MV_COMPLETION_TYPE_ERROR: MV_ERROR("IAL: COMPLETION ERROR, adapter %d, channel %d, flags=%x\n", pMvSataAdapter->adapterId, channelNum, responseFlags); if (responseFlags & 4) { MV_ERROR("ATA regs: error %x, sector count %x, LBA low %x, LBA mid %x," " LBA high %x, device %x, status %x\n", registerStruct->errorRegister, registerStruct->sectorCountRegister, registerStruct->lbaLowRegister, registerStruct->lbaMidRegister, registerStruct->lbaHighRegister, registerStruct->deviceRegister, registerStruct->statusRegister); } /*We can't do handleEdmaError directly here, because CommandCompletionCB is called by * mv's ISR, if we retry the command, than the internel data structure may be destroyed*/ pCmd->uScratch.sata_param.responseFlags = responseFlags; pCmd->uScratch.sata_param.bIdeStatus = registerStruct->statusRegister; pCmd->uScratch.sata_param.errorRegister = registerStruct->errorRegister; pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)handleEdmaError,pCmd); return TRUE; default: MV_ERROR(" Unknown completion type (%d)\n", comp_type); return MV_FALSE; } if (pCmd->uCmd.Ide.Command == IDE_COMMAND_VERIFY && pCmd->uScratch.sata_param.cmd_priv > 1) { pCmd->uScratch.sata_param.cmd_priv --; return TRUE; } pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return TRUE; } void fDeviceSendCommand(_VBUS_ARG PCommand pCmd) { MV_SATA_EDMA_PRD_ENTRY *pPRDTable = 0; MV_SATA_ADAPTER *pMvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; PVDevice pVDevice = pCmd->pVDevice; PDevice pDevice = &pVDevice->u.disk; LBA_T Lba = pCmd->uCmd.Ide.Lba; USHORT nSector = pCmd->uCmd.Ide.nSectors; MV_QUEUE_COMMAND_RESULT result; MV_QUEUE_COMMAND_INFO commandInfo; MV_UDMA_COMMAND_PARAMS *pUdmaParams = &commandInfo.commandParams.udmaCommand; MV_NONE_UDMA_COMMAND_PARAMS *pNoUdmaParams = &commandInfo.commandParams.NoneUdmaCommand; MV_BOOLEAN is48bit; MV_U8 channel; int i=0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); if (!pDevice->df_on_line) { MV_ERROR("Device is offline"); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } pDevice->HeadPosition = pCmd->uCmd.Ide.Lba + pCmd->uCmd.Ide.nSectors; pMvSataChannel = pDevice->mv; pMvSataAdapter = pMvSataChannel->mvSataAdapter; channel = pMvSataChannel->channelNumber; /* old RAID0 has hidden lba. Remember to clear dDeHiddenLba when delete array! */ Lba += pDevice->dDeHiddenLba; /* check LBA */ if (Lba+nSector-1 > pDevice->dDeRealCapacity) { pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* * always use 48bit LBA if drive supports it. * Some Seagate drives report error if you use a 28-bit command * to access sector 0xfffffff. */ is48bit = pMvSataChannel->lba48Address; switch (pCmd->uCmd.Ide.Command) { case IDE_COMMAND_READ: case IDE_COMMAND_WRITE: if (pDevice->bDeModeSetting<8) goto pio; commandInfo.type = MV_QUEUED_COMMAND_TYPE_UDMA; pUdmaParams->isEXT = is48bit; pUdmaParams->numOfSectors = nSector; pUdmaParams->lowLBAAddress = Lba; pUdmaParams->highLBAAddress = 0; pUdmaParams->prdHighAddr = 0; pUdmaParams->callBack = CommandCompletionCB; pUdmaParams->commandId = (MV_VOID_PTR )pCmd; if(pCmd->uCmd.Ide.Command == IDE_COMMAND_READ) pUdmaParams->readWrite = MV_UDMA_TYPE_READ; else pUdmaParams->readWrite = MV_UDMA_TYPE_WRITE; if (pCmd->pSgTable && pCmd->cf_physical_sg) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 0)) { pio: mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); if (pCmd->pSgTable && pCmd->cf_physical_sg==0) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 1)) { pCmd->Result = RETURN_NEED_LOGICAL_SG; goto finish_cmd; } } do { ULONG size = tmpSg->wSgSize? tmpSg->wSgSize : 0x10000; ULONG_PTR addr = tmpSg->dSgAddress; if (size & 0x1ff) { pCmd->Result = RETURN_INVALID_REQUEST; goto finish_cmd; } if (mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, (pCmd->cf_data_out)?MV_NON_UDMA_PROTOCOL_PIO_DATA_OUT:MV_NON_UDMA_PROTOCOL_PIO_DATA_IN, is48bit, (MV_U16_PTR)addr, size >> 1, /* count */ 0, /* features N/A */ (MV_U16)(size>>9), /*sector count*/ (MV_U16)( (is48bit? (MV_U16)((Lba >> 16) & 0xFF00) : 0 ) | (UCHAR)(Lba & 0xFF) ), /*lbalow*/ (MV_U16)((Lba >> 8) & 0xFF), /* lbaMid */ (MV_U16)((Lba >> 16) & 0xFF),/* lbaHigh */ (MV_U8)(0x40 | (is48bit ? 0 : (UCHAR)(Lba >> 24) & 0xFF )),/* device */ (MV_U8)(is48bit ? (pCmd->cf_data_in?IDE_COMMAND_READ_EXT:IDE_COMMAND_WRITE_EXT):pCmd->uCmd.Ide.Command) )==MV_FALSE) { pCmd->Result = RETURN_IDE_ERROR; goto finish_cmd; } Lba += size>>9; if(Lba & 0xF0000000) is48bit = MV_TRUE; } while ((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pCmd->Result = RETURN_SUCCESS; finish_cmd: mvSataEnableChannelDma(pMvSataAdapter,channel); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } } pPRDTable = (MV_SATA_EDMA_PRD_ENTRY *) AllocatePRDTable(pMvSataAdapter->IALData); KdPrint(("pPRDTable:%p\n",pPRDTable)); if (!pPRDTable) { pCmd->Result = RETURN_DEVICE_BUSY; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); HPT_ASSERT(0); return; } do{ pPRDTable[i].highBaseAddr = (sizeof(tmpSg->dSgAddress)>4 ? (MV_U32)(tmpSg->dSgAddress>>32) : 0); pPRDTable[i].flags = (MV_U16)tmpSg->wSgFlag; pPRDTable[i].byteCount = (MV_U16)tmpSg->wSgSize; pPRDTable[i].lowBaseAddr = (MV_U32)tmpSg->dSgAddress; pPRDTable[i].reserved = 0; i++; }while((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pUdmaParams->prdLowAddr = (ULONG)fOsPhysicalAddress(pPRDTable); if ((pUdmaParams->numOfSectors == 256) && (pMvSataChannel->lba48Address == MV_FALSE)) { pUdmaParams->numOfSectors = 0; } pCmd->uScratch.sata_param.prdAddr = (PVOID)pPRDTable; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK) { queue_failed: switch (result) { case MV_QUEUE_COMMAND_RESULT_BAD_LBA_ADDRESS: MV_ERROR("IAL Error: Edma Queue command failed. Bad LBA " "LBA[31:0](0x%08x)\n", pUdmaParams->lowLBAAddress); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_QUEUED_MODE_DISABLED: MV_ERROR("IAL Error: Edma Queue command failed. EDMA" " disabled adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); mvSataEnableChannelDma(pMvSataAdapter,channel); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_FULL: MV_ERROR("IAL Error: Edma Queue command failed. Queue is" " Full adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); pCmd->Result = RETURN_DEVICE_BUSY; break; case MV_QUEUE_COMMAND_RESULT_BAD_PARAMS: MV_ERROR("IAL Error: Edma Queue command failed. (Bad " "Params), pMvSataAdapter: %p, pSataChannel: %p.\n", pMvSataAdapter, pMvSataAdapter->sataChannel[channel]); pCmd->Result = RETURN_IDE_ERROR; break; default: MV_ERROR("IAL Error: Bad result value (%d) from queue" " command\n", result); pCmd->Result = RETURN_IDE_ERROR; } if(pPRDTable) FreePRDTable(pMvSataAdapter->IALData,pPRDTable); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); } pDevice->QueueLength++; return; case IDE_COMMAND_VERIFY: commandInfo.type = MV_QUEUED_COMMAND_TYPE_NONE_UDMA; pNoUdmaParams->bufPtr = NULL; pNoUdmaParams->callBack = CommandCompletionCB; pNoUdmaParams->commandId = (MV_VOID_PTR)pCmd; pNoUdmaParams->count = 0; pNoUdmaParams->features = 0; pNoUdmaParams->protocolType = MV_NON_UDMA_PROTOCOL_NON_DATA; pCmd->uScratch.sata_param.cmd_priv = 1; if (pMvSataChannel->lba48Address == MV_TRUE){ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS_EXT; pNoUdmaParams->isEXT = MV_TRUE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(((Lba & 0xff000000) >> 16)| (Lba & 0xff)); pNoUdmaParams->sectorCount = nSector; pNoUdmaParams->device = 0x40; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } return; } else{ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS; pNoUdmaParams->isEXT = MV_FALSE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(Lba & 0xff); pNoUdmaParams->sectorCount = 0xff & nSector; pNoUdmaParams->device = (MV_U8)(0x40 | ((Lba & 0xf000000) >> 24)); pNoUdmaParams->callBack = CommandCompletionCB; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); /*FIXME: how about the commands already queued? but marvel also forgets to consider this*/ if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } } break; default: pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); break; } } /********************************************************** * * Probe the hostadapter. * **********************************************************/ static int hpt_probe(device_t dev) { if ((pci_get_vendor(dev) == MV_SATA_VENDOR_ID) && (pci_get_device(dev) == MV_SATA_DEVICE_ID_5081 #ifdef FOR_DEMO || pci_get_device(dev) == MV_SATA_DEVICE_ID_5080 #endif )) { KdPrintI((CONTROLLER_NAME " found\n")); device_set_desc(dev, CONTROLLER_NAME); return (BUS_PROBE_DEFAULT); } else return(ENXIO); } /*********************************************************** * * Auto configuration: attach and init a host adapter. * ***********************************************************/ static int hpt_attach(device_t dev) { IAL_ADAPTER_T * pAdapter = device_get_softc(dev); int rid; union ccb *ccb; struct cam_devq *devq; struct cam_sim *hpt_vsim; device_printf(dev, "%s Version %s \n", DRIVER_NAME, DRIVER_VERSION); pAdapter->hpt_dev = dev; rid = init_adapter(pAdapter); if (rid) return rid; rid = 0; - if ((pAdapter->hpt_irq = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((pAdapter->hpt_irq = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); } if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_intr, pAdapter, &pAdapter->hpt_intr)) { hpt_printk(("can't set up interrupt\n")); free(pAdapter, M_DEVBUF); return(ENXIO); } if((ccb = (union ccb *)malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK)) != (union ccb*)NULL) { bzero(ccb, sizeof(*ccb)); ccb->ccb_h.pinfo.priority = 1; ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; } else { return ENOMEM; } /* * Create the device queue for our SIM(s). */ if((devq = cam_simq_alloc(8/*MAX_QUEUE_COMM*/)) == NULL) { KdPrint(("ENXIO\n")); return ENOMEM; } /* * Construct our SIM entry */ hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), &pAdapter->lock, 1, 8, devq); if (hpt_vsim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&pAdapter->lock); if (xpt_bus_register(hpt_vsim, dev, 0) != CAM_SUCCESS) { cam_sim_free(hpt_vsim, /*free devq*/ TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } if(xpt_create_path(&pAdapter->path, /*periph */ NULL, cam_sim_path(hpt_vsim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(hpt_vsim)); cam_sim_free(hpt_vsim, /*free_devq*/TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } mtx_unlock(&pAdapter->lock); xpt_setup_ccb(&(ccb->ccb_h), pAdapter->path, /*priority*/5); ccb->ccb_h.func_code = XPT_SASYNC_CB; ccb->csa.event_enable = AC_LOST_DEVICE; ccb->csa.callback = hpt_async; ccb->csa.callback_arg = hpt_vsim; xpt_action((union ccb *)ccb); free(ccb, M_DEVBUF); if (device_get_unit(dev) == 0) { /* Start the work thread. XXX */ launch_worker_thread(); } return 0; } static int hpt_detach(device_t dev) { return (EBUSY); } /*************************************************************** * The poll function is used to simulate the interrupt when * the interrupt subsystem is not functioning. * ***************************************************************/ static void hpt_poll(struct cam_sim *sim) { IAL_ADAPTER_T *pAdapter; pAdapter = cam_sim_softc(sim); hpt_intr_locked((void *)cam_sim_softc(sim)); } /**************************************************************** * Name: hpt_intr * Description: Interrupt handler. ****************************************************************/ static void hpt_intr(void *arg) { IAL_ADAPTER_T *pAdapter; pAdapter = arg; mtx_lock(&pAdapter->lock); hpt_intr_locked(pAdapter); mtx_unlock(&pAdapter->lock); } static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter) { mtx_assert(&pAdapter->lock, MA_OWNED); /* KdPrintI(("----- Entering Isr() -----\n")); */ if (mvSataInterruptServiceRoutine(&pAdapter->mvSataAdapter) == MV_TRUE) { _VBUS_INST(&pAdapter->VBus) CheckPendingCall(_VBUS_P0); } /* KdPrintI(("----- Leaving Isr() -----\n")); */ } /********************************************************** * Asynchronous Events *********************************************************/ #if (!defined(UNREFERENCED_PARAMETER)) #define UNREFERENCED_PARAMETER(x) (void)(x) #endif static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { /* debug XXXX */ panic("Here"); UNREFERENCED_PARAMETER(callback_arg); UNREFERENCED_PARAMETER(code); UNREFERENCED_PARAMETER(path); UNREFERENCED_PARAMETER(arg); } static void FlushAdapter(IAL_ADAPTER_T *pAdapter) { int i; hpt_printk(("flush all devices\n")); /* flush all devices */ for (i=0; iVBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } } static int hpt_shutdown(device_t dev) { IAL_ADAPTER_T *pAdapter; pAdapter = device_get_softc(dev); EVENTHANDLER_DEREGISTER(shutdown_final, pAdapter->eh); mtx_lock(&pAdapter->lock); FlushAdapter(pAdapter); mtx_unlock(&pAdapter->lock); /* give the flush some time to happen, *otherwise "shutdown -p now" will make file system corrupted */ DELAY(1000 * 1000 * 5); return 0; } void Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) if (mWaitingForIdle(_VBUS_P0)) { CheckIdleCall(_VBUS_P0); #ifdef SUPPORT_ARRAY { int i; PVDevice pArray; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { KdPrint(("auto rebuild.\n")); pArray->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); } } } #endif } /* launch the awaiting commands blocked by mWaitingForIdle */ while(pAdapter->pending_Q!= NULL) { _VBUS_INST(&pAdapter->VBus) union ccb *ccb = (union ccb *)pAdapter->pending_Q->ccb_h.ccb_ccb_ptr; hpt_free_ccb(&pAdapter->pending_Q, ccb); CallAfterReturn(_VBUS_P (DPC_PROC)OsSendCommand, ccb); } } static void ccb_done(union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T * pAdapter = pmap->pAdapter; KdPrintI(("ccb_done: ccb %p status %x\n", ccb, ccb->ccb_h.status)); dmamap_put(pmap); xpt_done(ccb); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) { if(DPC_Request_Nums == 0) Check_Idle_Call(pAdapter); wakeup(pAdapter); } } /**************************************************************** * Name: hpt_action * Description: Process a queued command from the CAM layer. * Parameters: sim - Pointer to SIM object * ccb - Pointer to SCSI command structure. ****************************************************************/ void hpt_action(struct cam_sim *sim, union ccb *ccb) { IAL_ADAPTER_T * pAdapter = (IAL_ADAPTER_T *) cam_sim_softc(sim); PBUS_DMAMAP pmap; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("hpt_action\n")); KdPrint(("hpt_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ { /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.target_id >= MAX_VDEVICE_PER_VBUS || pAdapter->VBus.pVDevice[ccb->ccb_h.target_id]==0) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); pmap = dmamap_get(pAdapter); HPT_ASSERT(pmap); ccb->ccb_adapter = pmap; memset((void *)pmap->psg, 0, sizeof(pmap->psg)); if (mWaitingForIdle(_VBUS_P0)) hpt_queue_ccb(&pAdapter->pending_Q, ccb); else OsSendCommand(_VBUS_P ccb); /* KdPrint(("leave scsiio\n")); */ break; } case XPT_RESET_BUS: KdPrint(("reset bus\n")); fResetVBus(_VBUS_P0); xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; /* Not necessary to reset bus */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = MAX_VDEVICE_PER_VBUS; cpi->max_lun = 0; cpi->initiator_id = MAX_VDEVICE_PER_VBUS; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: KdPrint(("invalid cmd\n")); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } /* KdPrint(("leave hpt_action..............\n")); */ } /* shall be called at lock_driver() */ static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb) { if(*ccb_Q == NULL) ccb->ccb_h.ccb_ccb_ptr = ccb; else { ccb->ccb_h.ccb_ccb_ptr = (*ccb_Q)->ccb_h.ccb_ccb_ptr; (*ccb_Q)->ccb_h.ccb_ccb_ptr = (char *)ccb; } *ccb_Q = ccb; } /* shall be called at lock_driver() */ static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) { union ccb *TempCCB; TempCCB = *ccb_Q; if(ccb->ccb_h.ccb_ccb_ptr == ccb) /*it means SCpnt is the last one in CURRCMDs*/ *ccb_Q = NULL; else { while(TempCCB->ccb_h.ccb_ccb_ptr != (char *)ccb) TempCCB = (union ccb *)TempCCB->ccb_h.ccb_ccb_ptr; TempCCB->ccb_h.ccb_ccb_ptr = ccb->ccb_h.ccb_ccb_ptr; if(*ccb_Q == ccb) *ccb_Q = TempCCB; } } #ifdef SUPPORT_ARRAY /*************************************************************************** * Function: hpt_worker_thread * Description: Do background rebuilding. Execute in kernel thread context. * Returns: None ***************************************************************************/ static void hpt_worker_thread(void) { for(;;) { mtx_lock(&DpcQueue_Lock); while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; p = DpcQueue[DpcQueue_First]; DpcQueue_First++; DpcQueue_First %= MAX_DPC; DPC_Request_Nums++; mtx_unlock(&DpcQueue_Lock); p.dpc(p.pAdapter, p.arg, p.flags); mtx_lock(&p.pAdapter->lock); mtx_lock(&DpcQueue_Lock); DPC_Request_Nums--; /* since we may have prevented Check_Idle_Call, do it here */ if (DPC_Request_Nums==0) { if (p.pAdapter->outstandingCommands == 0) { _VBUS_INST(&p.pAdapter->VBus); Check_Idle_Call(p.pAdapter); CheckPendingCall(_VBUS_P0); } } mtx_unlock(&p.pAdapter->lock); mtx_unlock(&DpcQueue_Lock); /*Schedule out*/ if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) { /* abort rebuilding process. */ IAL_ADAPTER_T *pAdapter; PVDevice pArray; PVBus _vbus_p; int i; sx_slock(&hptmv_list_lock); pAdapter = gIal_Adapter; while(pAdapter != 0){ mtx_lock(&pAdapter->lock); _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp==0) continue; else if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) { pArray->u.array.rf_abort_rebuild = 1; } } mtx_unlock(&pAdapter->lock); pAdapter = pAdapter->next; } sx_sunlock(&hptmv_list_lock); } mtx_lock(&DpcQueue_Lock); } mtx_unlock(&DpcQueue_Lock); /*Remove this debug option*/ /* #ifdef DEBUG if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) pause("hptrdy", 2*hz); #endif */ kproc_suspend_check(curproc); pause("-", 2*hz); /* wait for something to do */ } } static struct proc *hptdaemonproc; static struct kproc_desc hpt_kp = { "hpt_wt", hpt_worker_thread, &hptdaemonproc }; /*Start this thread in the hpt_attach, to prevent kernel from loading it without our controller.*/ static void launch_worker_thread(void) { IAL_ADAPTER_T *pAdapTemp; kproc_start(&hpt_kp); sx_slock(&hptmv_list_lock); for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { _VBUS_INST(&pAdapTemp->VBus) int i; PVDevice pVDev; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pVDev->u.array.rf_need_rebuild && !pVDev->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapTemp, pVDev, (UCHAR)((pVDev->u.array.CriticalMembers || pVDev->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } } sx_sunlock(&hptmv_list_lock); /* * hpt_worker_thread needs to be suspended after shutdown sync, when fs sync finished. */ EVENTHANDLER_REGISTER(shutdown_post_sync, kproc_shutdown, hptdaemonproc, SHUTDOWN_PRI_LAST); } /* *SYSINIT(hptwt, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, launch_worker_thread, NULL); */ #endif /********************************************************************************/ int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { union ccb *ccb = (union ccb *)pCmd->pOrgCommand; if (logical) { pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; pSg->wSgSize = ccb->csio.dxfer_len; pSg->wSgFlag = SG_FLAG_EOT; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } /*******************************************************************************/ ULONG HPTLIBAPI GetStamp(void) { /* * the system variable, ticks, can't be used since it hasn't yet been active * when our driver starts (ticks==0, it's a invalid stamp value) */ ULONG stamp; do { stamp = random(); } while (stamp==0); return stamp; } static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev) { int i; IDENTIFY_DATA2 *pIdentify = (IDENTIFY_DATA2*)pVDev->u.disk.mv->identifyDevice; inquiryData->DeviceType = T_DIRECT; /*DIRECT_ACCESS_DEVICE*/ inquiryData->AdditionalLength = (UCHAR)(sizeof(INQUIRYDATA) - 5); #ifndef SERIAL_CMDS inquiryData->CommandQueue = 1; #endif switch(pVDev->VDeviceType) { case VD_SINGLE_DISK: case VD_ATAPI: case VD_REMOVABLE: /* Set the removable bit, if applicable. */ if ((pVDev->u.disk.df_removable_drive) || (pIdentify->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; /* Fill in vendor identification fields. */ for (i = 0; i < 20; i += 2) { inquiryData->VendorId[i] = ((PUCHAR)pIdentify->ModelNumber)[i + 1]; inquiryData->VendorId[i+1] = ((PUCHAR)pIdentify->ModelNumber)[i]; } /* Initialize unused portion of product id. */ for (i = 0; i < 4; i++) inquiryData->ProductId[12+i] = ' '; /* firmware revision */ for (i = 0; i < 4; i += 2) { inquiryData->ProductRevisionLevel[i] = ((PUCHAR)pIdentify->FirmwareRevision)[i+1]; inquiryData->ProductRevisionLevel[i+1] = ((PUCHAR)pIdentify->FirmwareRevision)[i]; } break; default: memcpy(&inquiryData->VendorId, "RR18xx ", 8); #ifdef SUPPORT_ARRAY switch(pVDev->VDeviceType){ case VD_RAID_0: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 1/0 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 0 Array ", 16); break; case VD_RAID_1: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 0/1 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 1 Array ", 16); break; case VD_RAID_5: memcpy(&inquiryData->ProductId, "RAID 5 Array ", 16); break; case VD_JBOD: memcpy(&inquiryData->ProductId, "JBOD Array ", 16); break; } #endif memcpy(&inquiryData->ProductRevisionLevel, "3.00", 4); break; } } static void hpt_timeout(void *arg) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)((union ccb *)arg)->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); fResetVBus(_VBUS_P0); } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCommand pCmd = (PCommand)arg; union ccb *ccb = pCmd->pOrgCommand; struct ccb_hdr *ccb_h = &ccb->ccb_h; PBUS_DMAMAP pmap = (PBUS_DMAMAP) ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; FPSCAT_GATH psg = pCmd->pSgTable; int idx; _VBUS_INST(pVDev->pVBus) HPT_ASSERT(pCmd->cf_physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; psg->wSgSize = segs[idx].ds_len; psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ } /* psg[-1].wSgFlag = SG_FLAG_EOT; */ if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&pmap->timeout, 20 * hz, hpt_timeout, ccb); pVDev->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); } static void HPTLIBAPI OsSendCommand(_VBUS_ARG union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; struct ccb_hdr *ccb_h = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; KdPrintI(("OsSendCommand: ccb %p cdb %x-%x-%x\n", ccb, *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[0], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[4], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[8] )); pAdapter->outstandingCommands++; if (pVDev == NULL || pVDev->vf_online == 0) { ccb->ccb_h.status = CAM_REQ_INVALID; ccb_done(ccb); goto Command_Complished; } switch(ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: /* FALLTHROUGH */ ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: ZeroMemory(ccb->csio.data_ptr, ccb->csio.dxfer_len); SetInquiryData((PINQUIRYDATA)ccb->csio.data_ptr, pVDev); ccb_h->status = CAM_REQ_CMP; break; case READ_CAPACITY: { UCHAR *rbuf=csio->data_ptr; unsigned int cap; if (pVDev->VDeviceCapacity > 0xfffffffful) { cap = 0xfffffffful; } else { cap = pVDev->VDeviceCapacity - 1; } rbuf[0] = (UCHAR)(cap>>24); rbuf[1] = (UCHAR)(cap>>16); rbuf[2] = (UCHAR)(cap>>8); rbuf[3] = (UCHAR)cap; /* Claim 512 byte blocks (big-endian). */ rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb_h->status = CAM_REQ_CMP; break; } case 0x9e: /*SERVICE_ACTION_IN*/ { UCHAR *rbuf = csio->data_ptr; LBA_T cap = pVDev->VDeviceCapacity - 1; rbuf[0] = (UCHAR)(cap>>56); rbuf[1] = (UCHAR)(cap>>48); rbuf[2] = (UCHAR)(cap>>40); rbuf[3] = (UCHAR)(cap>>32); rbuf[4] = (UCHAR)(cap>>24); rbuf[5] = (UCHAR)(cap>>16); rbuf[6] = (UCHAR)(cap>>8); rbuf[7] = (UCHAR)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb_h->status = CAM_REQ_CMP; break; } case READ_6: case WRITE_6: case READ_10: case WRITE_10: case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ case 0x13: case 0x2f: { UCHAR Cdb[16]; UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); int error; HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, Cdb, CdbLength); } else { KdPrintE(("ERROR!!!\n")); ccb->ccb_h.status = CAM_REQ_INVALID; break; } } else { bcopy(csio->cdb_io.cdb_bytes, Cdb, CdbLength); } pCmd->pOrgCommand = ccb; pCmd->pVDevice = pVDev; pCmd->pfnCompletion = fOsCommandDone; pCmd->pfnBuildSgl = fOsBuildSgl; pCmd->pSgTable = pmap->psg; switch (Cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((ULONG)Cdb[1] << 16) | ((ULONG)Cdb[2] << 8) | (ULONG)Cdb[3]; pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[4]; break; case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Lba = (HPT_U64)Cdb[2] << 56 | (HPT_U64)Cdb[3] << 48 | (HPT_U64)Cdb[4] << 40 | (HPT_U64)Cdb[5] << 32 | (HPT_U64)Cdb[6] << 24 | (HPT_U64)Cdb[7] << 16 | (HPT_U64)Cdb[8] << 8 | (HPT_U64)Cdb[9]; pCmd->uCmd.Ide.nSectors = (USHORT)Cdb[12] << 8 | (USHORT)Cdb[13]; break; default: pCmd->uCmd.Ide.Lba = (ULONG)Cdb[5] | ((ULONG)Cdb[4] << 8) | ((ULONG)Cdb[3] << 16) | ((ULONG)Cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[8] | ((USHORT)Cdb[7]<<8); break; } switch (Cdb[0]) { case READ_6: case READ_10: case 0x88: /* READ_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_READ; pCmd->cf_data_in = 1; break; case WRITE_6: case WRITE_10: case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_WRITE; pCmd->cf_data_out = 1; break; case 0x13: case 0x2f: pCmd->uCmd.Ide.Command = IDE_COMMAND_VERIFY; break; } /*///////////////////////// */ pCmd->cf_physical_sg = 1; error = bus_dmamap_load_ccb(pAdapter->io_dma_parent, pmap->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d\n", error)); if (error && error!=EINPROGRESS) { hpt_printk(("bus_dmamap_load error %d\n", error)); FreeCommand(_VBUS_P pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; dmamap_put(pmap); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) wakeup(pAdapter); xpt_done(ccb); } goto Command_Complished; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ccb_done(ccb); Command_Complished: CheckPendingCall(_VBUS_P0); return; } static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd) { union ccb *ccb = pCmd->pOrgCommand; PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; KdPrint(("fOsCommandDone(pcmd=%p, result=%d)\n", pCmd, pCmd->Result)); callout_stop(&pmap->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pAdapter->io_dma_parent, pmap->dma_map); FreeCommand(_VBUS_P pCmd); ccb_done(ccb); } int hpt_queue_dpc(HPT_DPC dpc, IAL_ADAPTER_T * pAdapter, void *arg, UCHAR flags) { int p; mtx_lock(&DpcQueue_Lock); p = (DpcQueue_Last + 1) % MAX_DPC; if (p==DpcQueue_First) { KdPrint(("DPC Queue full!\n")); mtx_unlock(&DpcQueue_Lock); return -1; } DpcQueue[DpcQueue_Last].dpc = dpc; DpcQueue[DpcQueue_Last].pAdapter = pAdapter; DpcQueue[DpcQueue_Last].arg = arg; DpcQueue[DpcQueue_Last].flags = flags; DpcQueue_Last = p; mtx_unlock(&DpcQueue_Lock); return 0; } #ifdef _RAID5N_ /* * Allocate memory above 16M, otherwise we may eat all low memory for ISA devices. * How about the memory for 5081 request/response array and PRD table? */ void *os_alloc_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void *os_alloc_dma_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void os_free_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void os_free_dma_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void DoXor1(ULONG *p0, ULONG *p1, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ = *p1++ ^ *p2++; } void DoXor2(ULONG *p0, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ ^= *p2++; } #endif Index: head/sys/dev/hptnr/hptnr_osm_bsd.c =================================================================== --- head/sys/dev/hptnr/hptnr_osm_bsd.c (revision 295789) +++ head/sys/dev/hptnr/hptnr_osm_bsd.c (revision 295790) @@ -1,1678 +1,1678 @@ /* $Id: osm_bsd.c,v 1.36 2010/05/11 03:12:11 lcn Exp $ */ /*- * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005-2011 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include int msi = 0; int debug_flag = 0; static HIM *hpt_match(device_t dev) { PCI_ID pci_id; HIM *him; int i; for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if (him->get_controller_count) him->get_controller_count(&pci_id,0,0); if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(cdb[0]) { case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { PassthroughCmd *passthru = &pCmd->uCmd.Passthrough; HPT_U8 *sense_buffer = (HPT_U8 *)&ccb->csio.sense_data; memset(&ccb->csio.sense_data, 0,sizeof(ccb->csio.sense_data)); sense_buffer[0] = 0x72; /* Response Code */ sense_buffer[7] = 14; /* Additional Sense Length */ sense_buffer[8] = 0x9; /* ATA Return Descriptor */ sense_buffer[9] = 0xc; /* Additional Descriptor Length */ sense_buffer[11] = (HPT_U8)passthru->bFeaturesReg; /* Error */ sense_buffer[13] = (HPT_U8)passthru->bSectorCountReg; /* Sector Count (7:0) */ sense_buffer[15] = (HPT_U8)passthru->bLbaLowReg; /* LBA Low (7:0) */ sense_buffer[17] = (HPT_U8)passthru->bLbaMidReg; /* LBA Mid (7:0) */ sense_buffer[19] = (HPT_U8)passthru->bLbaHighReg; /* LBA High (7:0) */ if ((cdb[0] == 0x85) && (cdb[1] & 0x1)) { sense_buffer[10] = 1; sense_buffer[12] = (HPT_U8)(passthru->bSectorCountReg >> 8); /* Sector Count (15:8) */ sense_buffer[14] = (HPT_U8)(passthru->bLbaLowReg >> 8); /* LBA Low (15:8) */ sense_buffer[16] = (HPT_U8)(passthru->bLbaMidReg >> 8); /* LBA Mid (15:8) */ sense_buffer[18] = (HPT_U8)(passthru->bLbaHighReg >> 8); /* LBA High (15:8) */ } sense_buffer[20] = (HPT_U8)passthru->bDriveHeadReg; /* Device */ sense_buffer[21] = (HPT_U8)passthru->bCommandReg; /* Status */ KdPrint(("sts 0x%x err 0x%x low 0x%x mid 0x%x hig 0x%x dh 0x%x sc 0x%x", passthru->bCommandReg, passthru->bFeaturesReg, passthru->bLbaLowReg, passthru->bLbaMidReg, passthru->bLbaHighReg, passthru->bDriveHeadReg, passthru->bSectorCountReg)); KdPrint(("result:0x%x,bFeaturesReg:0x%04x,bSectorCountReg:0x%04x,LBA:0x%04x%04x%04x ", pCmd->Result,passthru->bFeaturesReg,passthru->bSectorCountReg, passthru->bLbaHighReg,passthru->bLbaMidReg,passthru->bLbaLowReg)); } default: break; } switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x85: /*ATA_16*/ case 0xA1: /*ATA_12*/ { int error; HPT_U8 prot; PassthroughCmd *passthru; if (mIsArray(vd->type)) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } HPT_ASSERT(vd->type == VD_RAW && vd->u.raw.legacy_disk); prot = (cdb[1] & 0x1e) >> 1; if (prot < 3 || prot > 5) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if (!pCmd) { HPT_ASSERT(0); ccb->ccb_h.status = CAM_BUSY; break; } passthru = &pCmd->uCmd.Passthrough; if (cdb[0] == 0x85/*ATA_16*/) { if (cdb[1] & 0x1) { passthru->bFeaturesReg = ((HPT_U16)cdb[3] << 8) | cdb[4]; passthru->bSectorCountReg = ((HPT_U16)cdb[5] << 8) | cdb[6]; passthru->bLbaLowReg = ((HPT_U16)cdb[7] << 8) | cdb[8]; passthru->bLbaMidReg = ((HPT_U16)cdb[9] << 8) | cdb[10]; passthru->bLbaHighReg = ((HPT_U16)cdb[11] << 8) | cdb[12]; } else { passthru->bFeaturesReg = cdb[4]; passthru->bSectorCountReg = cdb[6]; passthru->bLbaLowReg = cdb[8]; passthru->bLbaMidReg = cdb[10]; passthru->bLbaHighReg = cdb[12]; } passthru->bDriveHeadReg = cdb[13]; passthru->bCommandReg = cdb[14]; } else { /*ATA_12*/ passthru->bFeaturesReg = cdb[3]; passthru->bSectorCountReg = cdb[4]; passthru->bLbaLowReg = cdb[5]; passthru->bLbaMidReg = cdb[6]; passthru->bLbaHighReg = cdb[7]; passthru->bDriveHeadReg = cdb[8]; passthru->bCommandReg = cdb[9]; } if (cdb[1] & 0xe0) { if (!(passthru->bCommandReg == ATA_CMD_READ_MULTI || passthru->bCommandReg == ATA_CMD_READ_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_EXT || passthru->bCommandReg == ATA_CMD_WRITE_MULTI_FUA_EXT) ) { goto error; } } if (passthru->bFeaturesReg == ATA_SET_FEATURES_XFER && passthru->bCommandReg == ATA_CMD_SET_FEATURES) { goto error; } passthru->nSectors = ccb->csio.dxfer_len/ATA_SECTOR_SIZE; switch (prot) { default: /*None data*/ break; case 4: /*PIO data in, T_DIR=1 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8) == 0) { OsPrint(("PIO data in, T_DIR=1 match check")); goto error; } pCmd->flags.data_in = 1; break; case 5: /*PIO data out, T_DIR=0 match check*/ if ((cdb[2] & 3) && (cdb[2] & 0x8)) { OsPrint(("PIO data out, T_DIR=0 match check")); goto error; } pCmd->flags.data_out = 1; break; } pCmd->type = CMD_TYPE_PASSTHROUGH; pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; if(!ccb->csio.dxfer_len) { ldm_queue_cmd(pCmd); return; } pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; error: ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_INVALID; break; } case INQUIRY: { PINQUIRYDATA inquiryData; HIM_DEVICE_CONFIG devconf; HPT_U8 *rbuf; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; if (cdb[1] & 1) { rbuf = (HPT_U8 *)inquiryData; switch(cdb[2]) { case 0: rbuf[0] = 0; rbuf[1] = 0; rbuf[2] = 0; rbuf[3] = 3; rbuf[4] = 0; rbuf[5] = 0x80; rbuf[6] = 0x83; ccb->ccb_h.status = CAM_REQ_CMP; break; case 0x80: { rbuf[0] = 0; rbuf[1] = 0x80; rbuf[2] = 0; if (vd->type == VD_RAW) { rbuf[3] = 20; vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); memcpy(&rbuf[4], devconf.pIdentifyData->SerialNumber, 20); ldm_ide_fixstring(&rbuf[4], 20); } else { rbuf[3] = 1; rbuf[4] = 0x20; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case 0x83: rbuf[0] = 0; rbuf[1] = 0x83; rbuf[2] = 0; rbuf[3] = 12; rbuf[4] = 1; rbuf[5] = 2; rbuf[6] = 0; rbuf[7] = 8; rbuf[8] = 0; rbuf[9] = 0x19; rbuf[10] = 0x3C; rbuf[11] = 0; rbuf[12] = 0; rbuf[13] = 0; rbuf[14] = 0; rbuf[15] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } else if (cdb[2]) { ccb->ccb_h.status = CAM_REQ_INVALID; break; } inquiryData->DeviceType = 0; /*DIRECT_ACCESS_DEVICE*/ inquiryData->Versions = 5; /*SPC-3*/ inquiryData->ResponseDataFormat = 2; inquiryData->AdditionalLength = 0x5b; inquiryData->CommandQueue = 1; if (ccb->csio.dxfer_len > 63) { rbuf = (HPT_U8 *)inquiryData; rbuf[58] = 0x60; rbuf[59] = 0x3; rbuf[64] = 0x3; rbuf[66] = 0x3; rbuf[67] = 0x20; } if (vd->type == VD_RAW) { vd->u.raw.him->get_device_config(vd->u.raw.phy_dev,&devconf); if ((devconf.pIdentifyData->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; memcpy(&inquiryData->VendorId, "ATA ", 8); memcpy(&inquiryData->ProductId, devconf.pIdentifyData->ModelNumber, 16); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductId, 16); memcpy(&inquiryData->ProductRevisionLevel, devconf.pIdentifyData->FirmwareRevision, 4); ldm_ide_fixstring((HPT_U8 *)&inquiryData->ProductRevisionLevel, 4); if (inquiryData->ProductRevisionLevel[0] == 0 || inquiryData->ProductRevisionLevel[0] == ' ') memcpy(&inquiryData->ProductRevisionLevel, "n/a ", 4); } else { memcpy(&inquiryData->VendorId, "HPT ", 8); snprintf((char *)&inquiryData->ProductId, 16, "DISK_%d_%d ", os_get_vbus_seq(vbus_ext), vd->target_id); inquiryData->ProductId[15] = ' '; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; HPT_U8 sector_size_shift = 0; HPT_U64 new_cap; HPT_U32 sector_size = 0; if (mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in READ_CAPACITY")); sector_size_shift = 3; break; default: break; } } new_cap = vd->capacity >> sector_size_shift; if (new_cap > 0xfffffffful) cap = 0xffffffff; else cap = new_cap - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2 << sector_size_shift; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case REPORT_LUNS: { HPT_U8 *rbuf = ccb->csio.data_ptr; memset(rbuf, 0, 16); rbuf[3] = 8; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = 0; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; if(mIsArray(vd->type)) sector_size_shift = vd->u.array.sector_size_shift; else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("set 4k setctor size in SERVICE_ACTION_IN")); sector_size_shift = 3; break; default: break; } } cap = (vd->capacity >> sector_size_shift) - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2 << sector_size_shift; rbuf[11] = 0; if(!mIsArray(vd->type)){ rbuf[13] = vd->u.raw.logicalsectors_per_physicalsector; rbuf[14] = (HPT_U8)((vd->u.raw.lowest_aligned >> 8) & 0x3f); rbuf[15] = (HPT_U8)(vd->u.raw.lowest_aligned); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: case 0x8f: /* VERIFY_16 */ { int error; HPT_U8 sector_size_shift = 0; HPT_U32 sector_size = 0; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: case 0x8f: /* VERIFY_16 */ { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } if(mIsArray(vd->type)) { sector_size_shift = vd->u.array.sector_size_shift; } else{ if(vd->type == VD_RAW){ sector_size = vd->u.raw.logical_sector_size; } switch (sector_size) { case 0x1000: KdPrint(("<8>resize sector size from 4k to 512")); sector_size_shift = 3; break; default: break; } } pCmd->uCmd.Ide.Lba <<= sector_size_shift; pCmd->uCmd.Ide.nSectors <<= sector_size_shift; switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: ccb->ccg.heads = 255; ccb->ccg.secs_per_track = 63; ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } hpt_lock_vbus(vbus_ext); vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); unit_number++; if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); hpt_unlock_vbus(vbus_ext); return ; } if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), { 0, 0 } }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) { return(ENOMEM); } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: head/sys/dev/hptrr/hptrr_osm_bsd.c =================================================================== --- head/sys/dev/hptrr/hptrr_osm_bsd.c (revision 295789) +++ head/sys/dev/hptrr/hptrr_osm_bsd.c (revision 295790) @@ -1,1324 +1,1324 @@ /* * Copyright (c) HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include /* $Id: osm_bsd.c,v 1.27 2007/11/22 07:35:49 gmm Exp $ * * HighPoint RAID Driver for FreeBSD * Copyright (C) 2005 HighPoint Technologies, Inc. All Rights Reserved. */ #include #include static int attach_generic = 0; TUNABLE_INT("hw.hptrr.attach_generic", &attach_generic); static HIM *hpt_match(device_t dev) { PCI_ID pci_id; int i; HIM *him; /* Some of supported chips are used not only by HPT. */ if (pci_get_vendor(dev) != 0x1103 && !attach_generic) return (NULL); for (him = him_list; him; him = him->next) { for (i=0; him->get_supported_device_id(i, &pci_id); i++) { if ((pci_get_vendor(dev) == pci_id.vid) && (pci_get_device(dev) == pci_id.did)){ return (him); } } } return (NULL); } static int hpt_probe(device_t dev) { HIM *him; him = hpt_match(dev); if (him != NULL) { KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev) )); device_set_desc(dev, him->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); him = hpt_match(dev); hba->ext_type = EXT_TYPE_HBA; hba->ldm_adapter.him = him; pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return ENXIO; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK | M_ZERO); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; } /* * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory, * but there are some problems currently (alignment, etc). */ static __inline void *__get_free_pages(int order) { /* don't use low memory - other devices may get starved */ return contigmalloc(PAGE_SIZE<hba_list; hba; hba = hba->next) hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle); ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0); for (f=vbus_ext->freelist_head; f; f=f->next) { KdPrint(("%s: %d*%d=%d bytes", f->tag, f->count, f->size, f->count*f->size)); for (i=0; icount; i++) { p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK); if (!p) return (ENXIO); *p = f->head; f->head = p; } } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size, j; HPT_ASSERT((f->size & (f->alignment-1))==0); for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; KdPrint(("%s: %d*%d=%d bytes, order %d", f->tag, f->count, f->size, f->count*f->size, order)); HPT_ASSERT(f->alignment<=PAGE_SIZE); for (i=0; icount;) { p = (void **)__get_free_pages(order); if (!p) return -1; for (j = size/f->size; j && icount; i++,j--) { *p = f->head; *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p); f->head = p; p = (void **)((unsigned long)p + f->size); } } } HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE); for (i=0; ivbus, p, (BUS_ADDRESS)vtophys(p)); } return 0; } static void hpt_free_mem(PVBUS_EXT vbus_ext) { struct freelist *f; void *p; int i; BUS_ADDRESS bus; for (f=vbus_ext->freelist_head; f; f=f->next) { #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif while ((p=freelist_get(f))) free(p, M_DEVBUF); } for (i=0; ivbus, &bus); HPT_ASSERT(p); free_pages(p, 0); } for (f=vbus_ext->freelist_dma_head; f; f=f->next) { int order, size; #if DBG if (f->count!=f->reserved_count) { KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count)); } #endif for (order=0, size=PAGE_SIZE; sizesize; order++, size<<=1) ; while ((p=freelist_get_dma(f, &bus))) { if (order) free_pages(p, order); else { /* can't free immediately since other blocks in this page may still be in the list */ if (((HPT_UPTR)p & (PAGE_SIZE-1))==0) dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus); } } } while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus))) free_pages(p, 0); } static int hpt_init_vbus(PVBUS_EXT vbus_ext) { PHBA hba; for (hba = vbus_ext->hba_list; hba; hba = hba->next) if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) { KdPrint(("fail to initialize %p", hba)); return -1; } ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter); return 0; } static void hpt_flush_done(PCOMMAND pCmd) { PVDEV vd = pCmd->target; if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) { vd = vd->u.array.transform->target; HPT_ASSERT(vd); pCmd->target = vd; pCmd->Result = RETURN_PENDING; vdev_queue_cmd(pCmd); return; } *(int *)pCmd->priv = 1; wakeup(pCmd); } /* * flush a vdev (without retry). */ static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd) { PCOMMAND pCmd; int result = 0, done; HPT_UINT count; KdPrint(("flusing dev %p", vd)); hpt_assert_vbus_locked(vbus_ext); if (mIsArray(vd->type) && vd->u.array.transform) count = MAX(vd->u.array.transform->source->cmds_per_request, vd->u.array.transform->target->cmds_per_request); else count = vd->cmds_per_request; pCmd = ldm_alloc_cmds(vd->vbus, count); if (!pCmd) { return -1; } pCmd->type = CMD_TYPE_FLUSH; pCmd->flags.hard_flush = 1; pCmd->target = vd; pCmd->done = hpt_flush_done; done = 0; pCmd->priv = &done; ldm_queue_cmd(pCmd); if (!done) { while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) { ldm_reset_vbus(vd->vbus); } } KdPrint(("flush result %d", pCmd->Result)); if (pCmd->Result!=RETURN_SUCCESS) result = -1; ldm_free_cmds(pCmd); return result; } static void hpt_stop_tasks(PVBUS_EXT vbus_ext); static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PHBA hba; int i; KdPrint(("hpt_shutdown_vbus")); /* stop all ctl tasks and disable the worker taskqueue */ hpt_stop_tasks(vbus_ext); hpt_lock_vbus(vbus_ext); vbus_ext->worker.ta_context = 0; /* flush devices */ for (i=0; ihba_list; hba; hba=hba->next) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); hpt_free_mem(vbus_ext); while ((hba=vbus_ext->hba_list)) { vbus_ext->hba_list = hba->next; free(hba->ldm_adapter.him_handle, M_DEVBUF); } callout_drain(&vbus_ext->timer); mtx_destroy(&vbus_ext->lock); free(vbus_ext, M_DEVBUF); KdPrint(("hpt_shutdown_vbus done")); } static void __hpt_do_tasks(PVBUS_EXT vbus_ext) { OSM_TASK *tasks; tasks = vbus_ext->tasks; vbus_ext->tasks = 0; while (tasks) { OSM_TASK *t = tasks; tasks = t->next; t->next = 0; t->func(vbus_ext->vbus, t->data); } } static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending) { if(vbus_ext){ hpt_lock_vbus(vbus_ext); __hpt_do_tasks(vbus_ext); hpt_unlock_vbus(vbus_ext); } } static void hpt_action(struct cam_sim *sim, union ccb *ccb); static void hpt_poll(struct cam_sim *sim); static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg); static void hpt_pci_intr(void *arg); static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext) { POS_CMDEXT p = vbus_ext->cmdext_list; if (p) vbus_ext->cmdext_list = p->next; return p; } static __inline void cmdext_put(POS_CMDEXT p) { p->next = p->vbus_ext->cmdext_list; p->vbus_ext->cmdext_list = p; } static void hpt_timeout(void *arg) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; KdPrint(("pCmd %p timeout", pCmd)); ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus); } static void os_cmddone(PCOMMAND pCmd) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; KdPrint(("os_cmddone(%p, %d)", pCmd, pCmd->Result)); callout_stop(&ext->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map); cmdext_put(ext); ldm_free_cmds(pCmd); xpt_done(ccb); } static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical) { POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; union ccb *ccb = ext->ccb; if (logical) { os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr); pSg->size = ccb->csio.dxfer_len; pSg->eot = 1; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCOMMAND pCmd = (PCOMMAND)arg; POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv; PSG psg = pCmd->psg; int idx; HPT_ASSERT(pCmd->flags.physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<=os_max_sg_descriptors); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->addr.bus = segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; if (pCmd->flags.data_in) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->flags.data_out) { bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd); ldm_queue_cmd(pCmd); } static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb) { PVBUS vbus = (PVBUS)vbus_ext->vbus; PVDEV vd; PCOMMAND pCmd; POS_CMDEXT ext; HPT_U8 *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x", ccb, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8] )); /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= osm_max_targets || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } vd = ldm_find_target(vbus, ccb->ccb_h.target_id); if (!vd) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; xpt_done(ccb); return; } switch (cdb[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: { PINQUIRYDATA inquiryData; memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len); inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr; inquiryData->AdditionalLength = 31; inquiryData->CommandQueue = 1; memcpy(&inquiryData->VendorId, "HPT ", 8); memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16); if (vd->target_id / 10) { inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0'; inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0'; } else inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0'; memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4); ccb->ccb_h.status = CAM_REQ_CMP; } break; case READ_CAPACITY: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U32 cap; if (vd->capacity>0xfffffffful) cap = 0xfffffffful; else cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>24); rbuf[1] = (HPT_U8)(cap>>16); rbuf[2] = (HPT_U8)(cap>>8); rbuf[3] = (HPT_U8)cap; rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case SERVICE_ACTION_IN: { HPT_U8 *rbuf = ccb->csio.data_ptr; HPT_U64 cap = vd->capacity - 1; rbuf[0] = (HPT_U8)(cap>>56); rbuf[1] = (HPT_U8)(cap>>48); rbuf[2] = (HPT_U8)(cap>>40); rbuf[3] = (HPT_U8)(cap>>32); rbuf[4] = (HPT_U8)(cap>>24); rbuf[5] = (HPT_U8)(cap>>16); rbuf[6] = (HPT_U8)(cap>>8); rbuf[7] = (HPT_U8)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case READ_6: case READ_10: case READ_16: case WRITE_6: case WRITE_10: case WRITE_16: case 0x13: case 0x2f: { int error; pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request); if(!pCmd){ KdPrint(("Failed to allocate command!")); ccb->ccb_h.status = CAM_BUSY; break; } switch (cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3]; pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4]; break; case READ_16: case WRITE_16: { HPT_U64 block = ((HPT_U64)cdb[2]<<56) | ((HPT_U64)cdb[3]<<48) | ((HPT_U64)cdb[4]<<40) | ((HPT_U64)cdb[5]<<32) | ((HPT_U64)cdb[6]<<24) | ((HPT_U64)cdb[7]<<16) | ((HPT_U64)cdb[8]<<8) | ((HPT_U64)cdb[9]); pCmd->uCmd.Ide.Lba = block; pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8); break; } default: pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8); break; } switch (cdb[0]) { case READ_6: case READ_10: case READ_16: pCmd->flags.data_in = 1; break; case WRITE_6: case WRITE_10: case WRITE_16: pCmd->flags.data_out = 1; break; } pCmd->priv = ext = cmdext_get(vbus_ext); HPT_ASSERT(ext); ext->ccb = ccb; pCmd->target = vd; pCmd->done = os_cmddone; pCmd->buildsgl = os_buildsgl; pCmd->psg = ext->psg; pCmd->flags.physical_sg = 1; error = bus_dmamap_load_ccb(vbus_ext->io_dmat, ext->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d", error)); if (error && error!=EINPROGRESS) { os_printk("bus_dmamap_load error %d", error); cmdext_put(ext); ldm_free_cmds(pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); } return; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_action(struct cam_sim *sim, union ccb *ccb) { PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim); KdPrint(("hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id)); hpt_assert_vbus_locked(vbus_ext); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: hpt_scsi_io(vbus_ext, ccb); return; case XPT_RESET_BUS: ldm_reset_vbus((PVBUS)vbus_ext->vbus); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = osm_max_targets; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = osm_max_targets; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hpt_pci_intr(void *arg) { PVBUS_EXT vbus_ext = (PVBUS_EXT)arg; hpt_lock_vbus(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); hpt_unlock_vbus(vbus_ext); } static void hpt_poll(struct cam_sim *sim) { PVBUS_EXT vbus_ext = cam_sim_softc(sim); hpt_assert_vbus_locked(vbus_ext); ldm_intr((PVBUS)vbus_ext->vbus); } static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { KdPrint(("hpt_async")); } static int hpt_shutdown(device_t dev) { KdPrint(("hpt_shutdown(dev=%p)", dev)); return 0; } static int hpt_detach(device_t dev) { /* we don't allow the driver to be unloaded. */ return EBUSY; } static void hpt_ioctl_done(struct _IOCTL_ARG *arg) { arg->ioctl_cmnd = 0; wakeup(arg); } static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args) { ioctl_args->result = -1; ioctl_args->done = hpt_ioctl_done; ioctl_args->ioctl_cmnd = (void *)1; hpt_lock_vbus(vbus_ext); ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args); while (ioctl_args->ioctl_cmnd) { if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; ldm_reset_vbus((PVBUS)vbus_ext->vbus); __hpt_do_tasks(vbus_ext); } /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */ hpt_unlock_vbus(vbus_ext); } static void hpt_do_ioctl(IOCTL_ARG *ioctl_args) { PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { __hpt_do_ioctl(vbus_ext, ioctl_args); if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS) return; } } #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\ IOCTL_ARG arg;\ arg.dwIoControlCode = code;\ arg.lpInBuffer = inbuf;\ arg.lpOutBuffer = outbuf;\ arg.nInBufferSize = insize;\ arg.nOutBufferSize = outsize;\ arg.lpBytesReturned = 0;\ hpt_do_ioctl(&arg);\ arg.result;\ }) #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff)) static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount) { int i; HPT_U32 count = nMaxCount-1; if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES, &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount)) return -1; nMaxCount = (int)pIds[0]; for (i=0; ilock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0); if (hpt_init_vbus(vbus_ext)) { os_printk("fail to initialize hardware"); break; /* FIXME */ } } /* register CAM interface */ ldm_for_each_vbus(vbus, vbus_ext) { struct cam_devq *devq; struct ccb_setasync ccb; if (bus_dma_tag_create(NULL,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */ os_max_sg_descriptors, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &vbus_ext->lock, /* lockfuncarg */ &vbus_ext->io_dmat /* tag */)) { return ; } for (i=0; ivbus_ext = vbus_ext; ext->next = vbus_ext->cmdext_list; vbus_ext->cmdext_list = ext; if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) { os_printk("Can't create dma map(%d)", i); return ; } callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0); } if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) { os_printk("cam_simq_alloc failed"); return ; } vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name, vbus_ext, 0, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq); if (!vbus_ext->sim) { os_printk("cam_sim_alloc failed"); cam_simq_free(devq); return ; } hpt_lock_vbus(vbus_ext); if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) { os_printk("xpt_bus_register failed"); cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } if (xpt_create_path(&vbus_ext->path, /*periph */ NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { os_printk("xpt_create_path failed"); xpt_bus_deregister(cam_sim_path(vbus_ext->sim)); cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE); hpt_unlock_vbus(vbus_ext); vbus_ext->sim = NULL; return ; } hpt_unlock_vbus(vbus_ext); xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = AC_LOST_DEVICE; ccb.callback = hpt_async; ccb.callback_arg = vbus_ext; xpt_action((union ccb *)&ccb); for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle)) { os_printk("can't set up interrupt"); return ; } hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE); } vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT); if (!vbus_ext->shutdown_eh) os_printk("Shutdown event registration failed"); } ldm_for_each_vbus(vbus, vbus_ext) { TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext); if (vbus_ext->tasks) TASK_ENQUEUE(&vbus_ext->worker); } make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "%s", driver_name); } #if defined(KLD_MODULE) typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ }; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ }; static void override_kernel_driver(void) { driverlink_t dl, dlfirst; driver_t *tmpdriver; devclass_t dc = devclass_find("pci"); if (dc){ dlfirst = TAILQ_FIRST(&dc->drivers); for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) { if(strcmp(dl->driver->name, driver_name) == 0) { tmpdriver=dl->driver; dl->driver=dlfirst->driver; dlfirst->driver=tmpdriver; break; } } } } #else #define override_kernel_driver() #endif static void hpt_init(void *dummy) { if (bootverbose) os_printk("%s %s", driver_name_long, driver_ver); override_kernel_driver(); init_config(); hpt_ich.ich_func = hpt_final_init; hpt_ich.ich_arg = NULL; if (config_intrhook_establish(&hpt_ich) != 0) { printf("%s: cannot establish configuration hook\n", driver_name_long); } } SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL); /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { driver_name, driver_methods, sizeof(HBA) }; static devclass_t hpt_devclass; #ifndef TARGETNAME #error "no TARGETNAME found" #endif /* use this to make TARGETNAME be expanded */ #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2) #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5) __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); __MODULE_VERSION(TARGETNAME, 1); __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1); static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td) { return 0; } static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data; IOCTL_ARG ioctl_args; HPT_U32 bytesReturned; switch (cmd){ case HPT_DO_IOCONTROL: { if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) { KdPrint(("ioctl=%x in=%p len=%d out=%p len=%d\n", piop->dwIoControlCode, piop->lpInBuffer, piop->nInBufferSize, piop->lpOutBuffer, piop->nOutBufferSize)); memset(&ioctl_args, 0, sizeof(ioctl_args)); ioctl_args.dwIoControlCode = piop->dwIoControlCode; ioctl_args.nInBufferSize = piop->nInBufferSize; ioctl_args.nOutBufferSize = piop->nOutBufferSize; ioctl_args.lpBytesReturned = &bytesReturned; if (ioctl_args.nInBufferSize) { ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpInBuffer) goto invalid; if (copyin((void*)piop->lpInBuffer, ioctl_args.lpInBuffer, piop->nInBufferSize)) goto invalid; } if (ioctl_args.nOutBufferSize) { ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK); if (!ioctl_args.lpOutBuffer) goto invalid; } hpt_do_ioctl(&ioctl_args); if (ioctl_args.result==HPT_IOCTL_RESULT_OK) { if (piop->nOutBufferSize) { if (copyout(ioctl_args.lpOutBuffer, (void*)piop->lpOutBuffer, piop->nOutBufferSize)) goto invalid; } if (piop->lpBytesReturned) { if (copyout(&bytesReturned, (void*)piop->lpBytesReturned, sizeof(HPT_U32))) goto invalid; } if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return 0; } invalid: if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF); if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF); return EFAULT; } return EFAULT; } case HPT_SCAN_BUS: { return hpt_rescan_bus(); } default: KdPrint(("invalid command!")); return EFAULT; } } static int hpt_rescan_bus(void) { union ccb *ccb; PVBUS vbus; PVBUS_EXT vbus_ext; ldm_for_each_vbus(vbus, vbus_ext) { if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); } return(0); } Index: head/sys/dev/isci/isci.c =================================================================== --- head/sys/dev/isci/isci.c (revision 295789) +++ head/sys/dev/isci/isci.c (revision 295790) @@ -1,676 +1,676 @@ /*- * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_ISCI, "isci", "isci driver memory allocations"); struct isci_softc *g_isci; uint32_t g_isci_debug_level = 0; static int isci_probe(device_t); static int isci_attach(device_t); static int isci_detach(device_t); int isci_initialize(struct isci_softc *isci); void isci_allocate_dma_buffer_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error); static devclass_t isci_devclass; static device_method_t isci_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isci_probe), DEVMETHOD(device_attach, isci_attach), DEVMETHOD(device_detach, isci_detach), { 0, 0 } }; static driver_t isci_pci_driver = { "isci", isci_pci_methods, sizeof(struct isci_softc), }; DRIVER_MODULE(isci, pci, isci_pci_driver, isci_devclass, 0, 0); MODULE_DEPEND(isci, cam, 1, 1, 1); static struct _pcsid { u_int32_t type; const char *desc; } pci_ids[] = { { 0x1d608086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d618086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, { 0x1d628086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d638086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d648086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d658086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d668086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d678086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d688086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d698086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d6a8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, { 0x1d6b8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, { 0x1d6c8086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d6d8086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d6e8086, "Intel(R) C600 Series Chipset SAS Controller" }, { 0x1d6f8086, "Intel(R) C600 Series Chipset SAS Controller (SATA mode)" }, { 0x00000000, NULL } }; static int isci_probe (device_t device) { u_int32_t type = pci_get_devid(device); struct _pcsid *ep = pci_ids; while (ep->type && ep->type != type) ++ep; if (ep->desc) { device_set_desc(device, ep->desc); return (BUS_PROBE_DEFAULT); } else return (ENXIO); } static int isci_allocate_pci_memory(struct isci_softc *isci) { int i; for (i = 0; i < ISCI_NUM_PCI_BARS; i++) { struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; pci_bar->resource_id = PCIR_BAR(i*2); - pci_bar->resource = bus_alloc_resource(isci->device, - SYS_RES_MEMORY, &pci_bar->resource_id, 0, ~0, 1, + pci_bar->resource = bus_alloc_resource_any(isci->device, + SYS_RES_MEMORY, &pci_bar->resource_id, RF_ACTIVE); if(pci_bar->resource == NULL) isci_log_message(0, "ISCI", "unable to allocate pci resource\n"); else { pci_bar->bus_tag = rman_get_bustag(pci_bar->resource); pci_bar->bus_handle = rman_get_bushandle(pci_bar->resource); } } return (0); } static int isci_attach(device_t device) { int error; struct isci_softc *isci = DEVICE2SOFTC(device); g_isci = isci; isci->device = device; pci_enable_busmaster(device); isci_allocate_pci_memory(isci); error = isci_initialize(isci); if (error) { isci_detach(device); return (error); } isci_interrupt_setup(isci); isci_sysctl_initialize(isci); return (0); } static int isci_detach(device_t device) { struct isci_softc *isci = DEVICE2SOFTC(device); int i, phy; for (i = 0; i < isci->controller_count; i++) { struct ISCI_CONTROLLER *controller = &isci->controllers[i]; SCI_STATUS status; void *unmap_buffer; if (controller->scif_controller_handle != NULL) { scic_controller_disable_interrupts( scif_controller_get_scic_handle(controller->scif_controller_handle)); mtx_lock(&controller->lock); status = scif_controller_stop(controller->scif_controller_handle, 0); mtx_unlock(&controller->lock); while (controller->is_started == TRUE) { /* Now poll for interrupts until the controller stop complete * callback is received. */ mtx_lock(&controller->lock); isci_interrupt_poll_handler(controller); mtx_unlock(&controller->lock); pause("isci", 1); } if(controller->sim != NULL) { mtx_lock(&controller->lock); xpt_free_path(controller->path); xpt_bus_deregister(cam_sim_path(controller->sim)); cam_sim_free(controller->sim, TRUE); mtx_unlock(&controller->lock); } } if (controller->timer_memory != NULL) free(controller->timer_memory, M_ISCI); if (controller->remote_device_memory != NULL) free(controller->remote_device_memory, M_ISCI); for (phy = 0; phy < SCI_MAX_PHYS; phy++) { if (controller->phys[phy].cdev_fault) led_destroy(controller->phys[phy].cdev_fault); if (controller->phys[phy].cdev_locate) led_destroy(controller->phys[phy].cdev_locate); } while (1) { sci_pool_get(controller->unmap_buffer_pool, unmap_buffer); if (unmap_buffer == NULL) break; contigfree(unmap_buffer, PAGE_SIZE, M_ISCI); } } /* The SCIF controllers have been stopped, so we can now * free the SCI library memory. */ if (isci->sci_library_memory != NULL) free(isci->sci_library_memory, M_ISCI); for (i = 0; i < ISCI_NUM_PCI_BARS; i++) { struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; if (pci_bar->resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, pci_bar->resource_id, pci_bar->resource); } for (i = 0; i < isci->num_interrupts; i++) { struct ISCI_INTERRUPT_INFO *interrupt_info; interrupt_info = &isci->interrupt_info[i]; if(interrupt_info->tag != NULL) bus_teardown_intr(device, interrupt_info->res, interrupt_info->tag); if(interrupt_info->res != NULL) bus_release_resource(device, SYS_RES_IRQ, rman_get_rid(interrupt_info->res), interrupt_info->res); pci_release_msi(device); } pci_disable_busmaster(device); return (0); } int isci_initialize(struct isci_softc *isci) { int error; uint32_t status = 0; uint32_t library_object_size; uint32_t verbosity_mask; uint32_t scic_log_object_mask; uint32_t scif_log_object_mask; uint8_t *header_buffer; library_object_size = scif_library_get_object_size(SCI_MAX_CONTROLLERS); isci->sci_library_memory = malloc(library_object_size, M_ISCI, M_NOWAIT | M_ZERO ); isci->sci_library_handle = scif_library_construct( isci->sci_library_memory, SCI_MAX_CONTROLLERS); sci_object_set_association( isci->sci_library_handle, (void *)isci); verbosity_mask = (1<sci_library_handle), scif_log_object_mask, verbosity_mask); sci_logger_enable(sci_object_get_logger( scif_library_get_scic_handle(isci->sci_library_handle)), scic_log_object_mask, verbosity_mask); header_buffer = (uint8_t *)&isci->pci_common_header; for (uint8_t i = 0; i < sizeof(isci->pci_common_header); i++) header_buffer[i] = pci_read_config(isci->device, i, 1); scic_library_set_pci_info( scif_library_get_scic_handle(isci->sci_library_handle), &isci->pci_common_header); isci->oem_parameters_found = FALSE; isci_get_oem_parameters(isci); /* trigger interrupt if 32 completions occur before timeout expires */ isci->coalesce_number = 32; /* trigger interrupt if 2 microseconds elapse after a completion occurs, * regardless if "coalesce_number" completions have occurred */ isci->coalesce_timeout = 2; isci->controller_count = scic_library_get_pci_device_controller_count( scif_library_get_scic_handle(isci->sci_library_handle)); for (int index = 0; index < isci->controller_count; index++) { struct ISCI_CONTROLLER *controller = &isci->controllers[index]; SCI_CONTROLLER_HANDLE_T scif_controller_handle; controller->index = index; isci_controller_construct(controller, isci); scif_controller_handle = controller->scif_controller_handle; status = isci_controller_initialize(controller); if(status != SCI_SUCCESS) { isci_log_message(0, "ISCI", "isci_controller_initialize FAILED: %x\n", status); return (status); } error = isci_controller_allocate_memory(controller); if (error != 0) return (error); scif_controller_set_interrupt_coalescence( scif_controller_handle, isci->coalesce_number, isci->coalesce_timeout); } /* FreeBSD provides us a hook to ensure we get a chance to start * our controllers and complete initial domain discovery before * it searches for the boot device. Once we're done, we'll * disestablish the hook, signaling the kernel that is can proceed * with the boot process. */ isci->config_hook.ich_func = &isci_controller_start; isci->config_hook.ich_arg = &isci->controllers[0]; if (config_intrhook_establish(&isci->config_hook) != 0) isci_log_message(0, "ISCI", "config_intrhook_establish failed!\n"); return (status); } void isci_allocate_dma_buffer_callback(void *arg, bus_dma_segment_t *seg, int nseg, int error) { struct ISCI_MEMORY *memory = (struct ISCI_MEMORY *)arg; memory->error = error; if (nseg != 1 || error != 0) isci_log_message(0, "ISCI", "Failed to allocate physically contiguous memory!\n"); else memory->physical_address = seg->ds_addr; } int isci_allocate_dma_buffer(device_t device, struct ISCI_MEMORY *memory) { uint32_t status; status = bus_dma_tag_create(bus_get_dma_tag(device), 0x40 /* cacheline alignment */, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, memory->size, 0x1 /* we want physically contiguous */, memory->size, 0, NULL, NULL, &memory->dma_tag); if(status == ENOMEM) { isci_log_message(0, "ISCI", "bus_dma_tag_create failed\n"); return (status); } status = bus_dmamem_alloc(memory->dma_tag, (void **)&memory->virtual_address, BUS_DMA_ZERO, &memory->dma_map); if(status == ENOMEM) { isci_log_message(0, "ISCI", "bus_dmamem_alloc failed\n"); return (status); } status = bus_dmamap_load(memory->dma_tag, memory->dma_map, (void *)memory->virtual_address, memory->size, isci_allocate_dma_buffer_callback, memory, 0); if(status == EINVAL) { isci_log_message(0, "ISCI", "bus_dmamap_load failed\n"); return (status); } return (0); } /** * @brief This callback method asks the user to associate the supplied * lock with an operating environment specific locking construct. * * @param[in] controller This parameter specifies the controller with * which this lock is to be associated. * @param[in] lock This parameter specifies the lock for which the * user should associate an operating environment specific * locking object. * * @see The SCI_LOCK_LEVEL enumeration for more information. * * @return none. */ void scif_cb_lock_associate(SCI_CONTROLLER_HANDLE_T controller, SCI_LOCK_HANDLE_T lock) { } /** * @brief This callback method asks the user to de-associate the supplied * lock with an operating environment specific locking construct. * * @param[in] controller This parameter specifies the controller with * which this lock is to be de-associated. * @param[in] lock This parameter specifies the lock for which the * user should de-associate an operating environment specific * locking object. * * @see The SCI_LOCK_LEVEL enumeration for more information. * * @return none. */ void scif_cb_lock_disassociate(SCI_CONTROLLER_HANDLE_T controller, SCI_LOCK_HANDLE_T lock) { } /** * @brief This callback method asks the user to acquire/get the lock. * This method should pend until the lock has been acquired. * * @param[in] controller This parameter specifies the controller with * which this lock is associated. * @param[in] lock This parameter specifies the lock to be acquired. * * @return none */ void scif_cb_lock_acquire(SCI_CONTROLLER_HANDLE_T controller, SCI_LOCK_HANDLE_T lock) { } /** * @brief This callback method asks the user to release a lock. * * @param[in] controller This parameter specifies the controller with * which this lock is associated. * @param[in] lock This parameter specifies the lock to be released. * * @return none */ void scif_cb_lock_release(SCI_CONTROLLER_HANDLE_T controller, SCI_LOCK_HANDLE_T lock) { } /** * @brief This callback method creates an OS specific deferred task * for internal usage. The handler to deferred task is stored by OS * driver. * * @param[in] controller This parameter specifies the controller object * with which this callback is associated. * * @return none */ void scif_cb_start_internal_io_task_create(SCI_CONTROLLER_HANDLE_T controller) { } /** * @brief This callback method schedules a OS specific deferred task. * * @param[in] controller This parameter specifies the controller * object with which this callback is associated. * @param[in] start_internal_io_task_routine This parameter specifies the * sci start_internal_io routine. * @param[in] context This parameter specifies a handle to a parameter * that will be passed into the "start_internal_io_task_routine" * when it is invoked. * * @return none */ void scif_cb_start_internal_io_task_schedule(SCI_CONTROLLER_HANDLE_T scif_controller, FUNCPTR start_internal_io_task_routine, void *context) { /** @todo Use FreeBSD tasklet to defer this routine to a later time, * rather than calling the routine inline. */ SCI_START_INTERNAL_IO_ROUTINE sci_start_internal_io_routine = (SCI_START_INTERNAL_IO_ROUTINE)start_internal_io_task_routine; sci_start_internal_io_routine(context); } /** * @brief In this method the user must write to PCI memory via access. * This method is used for access to memory space and IO space. * * @param[in] controller The controller for which to read a DWORD. * @param[in] address This parameter depicts the address into * which to write. * @param[out] write_value This parameter depicts the value being written * into the PCI memory location. * * @todo These PCI memory access calls likely needs to be optimized into macros? */ void scic_cb_pci_write_dword(SCI_CONTROLLER_HANDLE_T scic_controller, void *address, uint32_t write_value) { SCI_CONTROLLER_HANDLE_T scif_controller = (SCI_CONTROLLER_HANDLE_T) sci_object_get_association(scic_controller); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller); struct isci_softc *isci = isci_controller->isci; uint32_t bar = (uint32_t)(((POINTER_UINT)address & 0xF0000000) >> 28); bus_size_t offset = (bus_size_t)((POINTER_UINT)address & 0x0FFFFFFF); bus_space_write_4(isci->pci_bar[bar].bus_tag, isci->pci_bar[bar].bus_handle, offset, write_value); } /** * @brief In this method the user must read from PCI memory via access. * This method is used for access to memory space and IO space. * * @param[in] controller The controller for which to read a DWORD. * @param[in] address This parameter depicts the address from * which to read. * * @return The value being returned from the PCI memory location. * * @todo This PCI memory access calls likely need to be optimized into macro? */ uint32_t scic_cb_pci_read_dword(SCI_CONTROLLER_HANDLE_T scic_controller, void *address) { SCI_CONTROLLER_HANDLE_T scif_controller = (SCI_CONTROLLER_HANDLE_T)sci_object_get_association(scic_controller); struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller); struct isci_softc *isci = isci_controller->isci; uint32_t bar = (uint32_t)(((POINTER_UINT)address & 0xF0000000) >> 28); bus_size_t offset = (bus_size_t)((POINTER_UINT)address & 0x0FFFFFFF); return (bus_space_read_4(isci->pci_bar[bar].bus_tag, isci->pci_bar[bar].bus_handle, offset)); } /** * @brief This method is called when the core requires the OS driver * to stall execution. This method is utilized during initialization * or non-performance paths only. * * @param[in] microseconds This parameter specifies the number of * microseconds for which to stall. The operating system driver * is allowed to round this value up where necessary. * * @return none. */ void scic_cb_stall_execution(uint32_t microseconds) { DELAY(microseconds); } /** * @brief In this method the user must return the base address register (BAR) * value for the supplied base address register number. * * @param[in] controller The controller for which to retrieve the bar number. * @param[in] bar_number This parameter depicts the BAR index/number to be read. * * @return Return a pointer value indicating the contents of the BAR. * @retval NULL indicates an invalid BAR index/number was specified. * @retval All other values indicate a valid VIRTUAL address from the BAR. */ void * scic_cb_pci_get_bar(SCI_CONTROLLER_HANDLE_T controller, uint16_t bar_number) { return ((void *)(POINTER_UINT)((uint32_t)bar_number << 28)); } /** * @brief This method informs the SCI Core user that a phy/link became * ready, but the phy is not allowed in the port. In some * situations the underlying hardware only allows for certain phy * to port mappings. If these mappings are violated, then this * API is invoked. * * @param[in] controller This parameter represents the controller which * contains the port. * @param[in] port This parameter specifies the SCI port object for which * the callback is being invoked. * @param[in] phy This parameter specifies the phy that came ready, but the * phy can't be a valid member of the port. * * @return none */ void scic_cb_port_invalid_link_up(SCI_CONTROLLER_HANDLE_T controller, SCI_PORT_HANDLE_T port, SCI_PHY_HANDLE_T phy) { } Index: head/sys/dev/ixgb/if_ixgb.c =================================================================== --- head/sys/dev/ixgb/if_ixgb.c (revision 295789) +++ head/sys/dev/ixgb/if_ixgb.c (revision 295790) @@ -1,2536 +1,2536 @@ /******************************************************************************* Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int ixgb_display_debug_stats = 0; /********************************************************************* * Linked list of board private structures for all NICs found *********************************************************************/ struct adapter *ixgb_adapter_list = NULL; /********************************************************************* * Driver version *********************************************************************/ char ixgb_driver_version[] = "1.0.6"; char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixgb_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixgb_vendor_info_t ixgb_vendor_info_array[] = { /* Intel(R) PRO/10000 Network Connection */ {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *ixgb_strings[] = { "Intel(R) PRO/10GbE Network Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixgb_probe(device_t); static int ixgb_attach(device_t); static int ixgb_detach(device_t); static int ixgb_shutdown(device_t); static void ixgb_intr(void *); static void ixgb_start(struct ifnet *); static void ixgb_start_locked(struct ifnet *); static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); static uint64_t ixgb_get_counter(struct ifnet *, ift_counter); static void ixgb_watchdog(struct adapter *); static void ixgb_init(void *); static void ixgb_init_locked(struct adapter *); static void ixgb_stop(void *); static void ixgb_media_status(struct ifnet *, struct ifmediareq *); static int ixgb_media_change(struct ifnet *); static void ixgb_identify_hardware(struct adapter *); static int ixgb_allocate_pci_resources(struct adapter *); static void ixgb_free_pci_resources(struct adapter *); static void ixgb_local_timer(void *); static int ixgb_hardware_init(struct adapter *); static int ixgb_setup_interface(device_t, struct adapter *); static int ixgb_setup_transmit_structures(struct adapter *); static void ixgb_initialize_transmit_unit(struct adapter *); static int ixgb_setup_receive_structures(struct adapter *); static void ixgb_initialize_receive_unit(struct adapter *); static void ixgb_enable_intr(struct adapter *); static void ixgb_disable_intr(struct adapter *); static void ixgb_free_transmit_structures(struct adapter *); static void ixgb_free_receive_structures(struct adapter *); static void ixgb_update_stats_counters(struct adapter *); static void ixgb_clean_transmit_interrupts(struct adapter *); static int ixgb_allocate_receive_structures(struct adapter *); static int ixgb_allocate_transmit_structures(struct adapter *); static int ixgb_process_receive_interrupts(struct adapter *, int); static void ixgb_receive_checksum(struct adapter *, struct ixgb_rx_desc * rx_desc, struct mbuf *); static void ixgb_transmit_checksum_setup(struct adapter *, struct mbuf *, u_int8_t *); static void ixgb_set_promisc(struct adapter *); static void ixgb_disable_promisc(struct adapter *); static void ixgb_set_multi(struct adapter *); static void ixgb_print_hw_stats(struct adapter *); static void ixgb_print_link_status(struct adapter *); static int ixgb_get_buf(int i, struct adapter *, struct mbuf *); static void ixgb_enable_vlans(struct adapter * adapter); static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ixgb_dma_malloc(struct adapter *, bus_size_t, struct ixgb_dma_alloc *, int); static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); #ifdef DEVICE_POLLING static poll_handler_t ixgb_poll; #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixgb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixgb_probe), DEVMETHOD(device_attach, ixgb_attach), DEVMETHOD(device_detach, ixgb_detach), DEVMETHOD(device_shutdown, ixgb_shutdown), DEVMETHOD_END }; static driver_t ixgb_driver = { "ixgb", ixgb_methods, sizeof(struct adapter), }; static devclass_t ixgb_devclass; DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); MODULE_DEPEND(ixgb, pci, 1, 1, 1); MODULE_DEPEND(ixgb, ether, 1, 1, 1); /* some defines for controlling descriptor fetches in h/w */ #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is * pushed this many descriptors from * head */ #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ /********************************************************************* * Device identification routine * * ixgb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_probe(device_t dev) { ixgb_vendor_info_t *ent; u_int16_t pci_vendor_id = 0; u_int16_t pci_device_id = 0; u_int16_t pci_subvendor_id = 0; u_int16_t pci_subdevice_id = 0; char adapter_name[60]; INIT_DEBUGOUT("ixgb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IXGB_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixgb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s, Version - %s", ixgb_strings[ent->index], ixgb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_attach(device_t dev) { struct adapter *adapter; int tsize, rsize; int error = 0; device_printf(dev, "%s\n", ixgb_copyright); INIT_DEBUGOUT("ixgb_attach: begin"); /* Allocate, clear, and link in our adapter structure */ if (!(adapter = device_get_softc(dev))) { device_printf(dev, "adapter structure allocation failed\n"); return (ENOMEM); } bzero(adapter, sizeof(struct adapter)); adapter->dev = dev; adapter->osdep.dev = dev; IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); if (ixgb_adapter_list != NULL) ixgb_adapter_list->prev = adapter; adapter->next = ixgb_adapter_list; ixgb_adapter_list = adapter; /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, (void *)adapter, 0, ixgb_sysctl_stats, "I", "Statistics"); callout_init_mtx(&adapter->timer, &adapter->mtx, 0); /* Determine hardware revision */ ixgb_identify_hardware(adapter); /* Parameters (to be read from user) */ adapter->num_tx_desc = IXGB_MAX_TXD; adapter->num_rx_desc = IXGB_MAX_RXD; adapter->tx_int_delay = TIDV; adapter->rx_int_delay = RDTR; adapter->rx_buffer_len = IXGB_RXBUFFER_2048; adapter->hw.fc.high_water = FCRTH; adapter->hw.fc.low_water = FCRTL; adapter->hw.fc.pause_time = FCPAUSE; adapter->hw.fc.send_xon = TRUE; adapter->hw.fc.type = FLOW_CONTROL; /* Set the max frame size assuming standard ethernet sized frames */ adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ixgb_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } tsize = IXGB_ROUNDUP(adapter->num_tx_desc * sizeof(struct ixgb_tx_desc), 4096); /* Allocate Transmit Descriptor ring */ if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TxDescriptor memory\n"); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; rsize = IXGB_ROUNDUP(adapter->num_rx_desc * sizeof(struct ixgb_rx_desc), 4096); /* Allocate Receive Descriptor ring */ if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate rx_desc memory\n"); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_hw_init; } /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { device_printf(dev, "Unable to initialize the hardware\n"); error = EIO; goto err_hw_init; } /* Setup OS specific network interface */ if (ixgb_setup_interface(dev, adapter) != 0) goto err_hw_init; /* Initialize statistics */ ixgb_clear_hw_cntrs(&adapter->hw); ixgb_update_stats_counters(adapter); INIT_DEBUGOUT("ixgb_attach: end"); return (0); err_hw_init: ixgb_dma_free(adapter, &adapter->rxdma); err_rx_desc: ixgb_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: if (adapter->ifp != NULL) if_free(adapter->ifp); ixgb_free_pci_resources(adapter); sysctl_ctx_free(&adapter->sysctl_ctx); free(adapter->mta, M_DEVBUF); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("ixgb_detach: begin"); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IXGB_LOCK(adapter); adapter->in_detach = 1; ixgb_stop(adapter); IXGB_UNLOCK(adapter); #if __FreeBSD_version < 500000 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifdetach(ifp); #endif callout_drain(&adapter->timer); ixgb_free_pci_resources(adapter); #if __FreeBSD_version >= 500000 if_free(ifp); #endif /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { ixgb_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { ixgb_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } /* Remove from the adapter list */ if (ixgb_adapter_list == adapter) ixgb_adapter_list = adapter->next; if (adapter->next != NULL) adapter->next->prev = adapter->prev; if (adapter->prev != NULL) adapter->prev->next = adapter->next; free(adapter->mta, M_DEVBUF); IXGB_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixgb_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); IXGB_LOCK(adapter); ixgb_stop(adapter); IXGB_UNLOCK(adapter); return (0); } /********************************************************************* * Transmit entry point * * ixgb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void ixgb_start_locked(struct ifnet * ifp) { struct mbuf *m_head; struct adapter *adapter = ifp->if_softc; IXGB_LOCK_ASSERT(adapter); if (!adapter->link_active) return; while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (ixgb_encap(adapter, m_head)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ #if __FreeBSD_version < 500000 if (ifp->if_bpf) bpf_mtap(ifp, m_head); #else ETHER_BPF_MTAP(ifp, m_head); #endif /* Set timeout in case hardware has problems transmitting */ adapter->tx_timer = IXGB_TX_TIMEOUT; } return; } static void ixgb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; IXGB_LOCK(adapter); ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * Ioctl entry point * * ixgb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) { int mask, error = 0; struct ifreq *ifr = (struct ifreq *) data; struct adapter *adapter = ifp->if_softc; if (adapter->in_detach) goto out; switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { error = EINVAL; } else { IXGB_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->hw.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); IXGB_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ixgb_init_locked(adapter); } ixgb_disable_promisc(adapter); ixgb_set_promisc(adapter); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_stop(adapter); } } IXGB_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ixgb_set_multi(adapter); ixgb_enable_intr(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(ixgb_poll, ifp); if (error) return(error); IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IXGB_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IXGB_LOCK(adapter); ixgb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IXGB_UNLOCK(adapter); } } #endif /* DEVICE_POLLING */ if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixgb_init(adapter); } break; default: IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); error = EINVAL; } out: return (error); } /********************************************************************* * Watchdog entry point * * This routine is called whenever hardware quits transmitting. * **********************************************************************/ static void ixgb_watchdog(struct adapter *adapter) { struct ifnet *ifp; ifp = adapter->ifp; /* * If we are in this routine because of pause frames, then don't * reset the hardware. */ if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { adapter->tx_timer = IXGB_TX_TIMEOUT; return; } if_printf(ifp, "watchdog timeout -- resetting\n"); ixgb_stop(adapter); ixgb_init_locked(adapter); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return; } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void ixgb_init_locked(struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_init: begin"); IXGB_LOCK_ASSERT(adapter); ixgb_stop(adapter); ifp = adapter->ifp; /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr, IXGB_ETH_LENGTH_OF_ADDRESS); /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { if_printf(ifp, "Unable to initialize the hardware\n"); return; } ixgb_enable_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (ixgb_setup_transmit_structures(adapter)) { if_printf(ifp, "Could not setup transmit structures\n"); ixgb_stop(adapter); return; } ixgb_initialize_transmit_unit(adapter); /* Setup Multicast table */ ixgb_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (ixgb_setup_receive_structures(adapter)) { if_printf(ifp, "Could not setup receive structures\n"); ixgb_stop(adapter); return; } ixgb_initialize_receive_unit(adapter); /* Don't lose promiscuous settings */ ixgb_set_promisc(adapter); ifp = adapter->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; /* Enable jumbo frames */ if (ifp->if_mtu > ETHERMTU) { uint32_t temp_reg; IXGB_WRITE_REG(&adapter->hw, MFS, adapter->hw.max_frame_size << IXGB_MFS_SHIFT); temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); temp_reg |= IXGB_CTRL0_JFE; IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); } callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter); ixgb_clear_hw_cntrs(&adapter->hw); #ifdef DEVICE_POLLING /* * Only disable interrupts if we are polling, make sure they are on * otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) ixgb_disable_intr(adapter); else #endif ixgb_enable_intr(adapter); return; } static void ixgb_init(void *arg) { struct adapter *adapter = arg; IXGB_LOCK(adapter); ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); return; } #ifdef DEVICE_POLLING static int ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; u_int32_t reg_icr; int rx_npkts; IXGB_LOCK_ASSERT(adapter); if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); } } rx_npkts = ixgb_process_receive_interrupts(adapter, count); ixgb_clean_transmit_interrupts(adapter); if (ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); return (rx_npkts); } static int ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; int rx_npkts = 0; IXGB_LOCK(adapter); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = ixgb_poll_locked(ifp, cmd, count); IXGB_UNLOCK(adapter); return (rx_npkts); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Interrupt Service routine * **********************************************************************/ static void ixgb_intr(void *arg) { u_int32_t loop_cnt = IXGB_MAX_INTR; u_int32_t reg_icr; struct ifnet *ifp; struct adapter *adapter = arg; boolean_t rxdmt0 = FALSE; IXGB_LOCK(adapter); ifp = adapter->ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { IXGB_UNLOCK(adapter); return; } #endif reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr == 0) { IXGB_UNLOCK(adapter); return; } if (reg_icr & IXGB_INT_RXDMT0) rxdmt0 = TRUE; #ifdef _SV_ if (reg_icr & IXGB_INT_RXDMT0) adapter->sv_stats.icr_rxdmt0++; if (reg_icr & IXGB_INT_RXO) adapter->sv_stats.icr_rxo++; if (reg_icr & IXGB_INT_RXT0) adapter->sv_stats.icr_rxt0++; if (reg_icr & IXGB_INT_TXDW) adapter->sv_stats.icr_TXDW++; #endif /* _SV_ */ /* Link status change */ if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); } while (loop_cnt > 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_process_receive_interrupts(adapter, -1); ixgb_clean_transmit_interrupts(adapter); } loop_cnt--; } if (rxdmt0 && adapter->raidc) { IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); } if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("ixgb_media_status: begin"); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->hw.link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixgb_media_change(struct ifnet * ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("ixgb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head) { u_int8_t txd_popts; int i, j, error, nsegs; #if __FreeBSD_version < 500000 struct ifvlan *ifv = NULL; #endif bus_dma_segment_t segs[IXGB_MAX_SCATTER]; bus_dmamap_t map; struct ixgb_buffer *tx_buffer = NULL; struct ixgb_tx_desc *current_tx_desc = NULL; struct ifnet *ifp = adapter->ifp; /* * Force a cleanup if number of TX descriptors available hits the * threshold */ if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { ixgb_clean_transmit_interrupts(adapter); } if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { adapter->no_tx_desc_avail1++; return (ENOBUFS); } /* * Map the packet for DMA. */ if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { adapter->no_tx_map_avail++; return (ENOMEM); } error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { adapter->no_tx_dma_setup++; if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; " "error %u\n", error); bus_dmamap_destroy(adapter->txtag, map); return (error); } KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); if (nsegs > adapter->num_tx_desc_avail) { adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } if (ifp->if_hwassist > 0) { ixgb_transmit_checksum_setup(adapter, m_head, &txd_popts); } else txd_popts = 0; /* Find out if we are in vlan mode */ #if __FreeBSD_version < 500000 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #elseif __FreeBSD_version < 700000 mtag = VLAN_OUTPUT_TAG(ifp, m_head); #endif i = adapter->next_avail_tx_desc; for (j = 0; j < nsegs; j++) { tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buff_addr = htole64(segs[j].ds_addr); current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); current_tx_desc->popts = txd_popts; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; } adapter->num_tx_desc_avail -= nsegs; adapter->next_avail_tx_desc = i; #if __FreeBSD_version < 500000 if (ifv != NULL) { /* Set the vlan id */ current_tx_desc->vlan = ifv->ifv_tag; #elseif __FreeBSD_version < 700000 if (mtag != NULL) { /* Set the vlan id */ current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); #else if (m_head->m_flags & M_VLANTAG) { current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag; #endif /* Tell hardware to add tag */ current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; } tx_buffer->m_head = m_head; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet needs End Of Packet (EOP) */ current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 * that this frame is available to transmit. */ IXGB_WRITE_REG(&adapter->hw, TDT, i); return (0); } static void ixgb_set_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= IXGB_RCTL_MPE; reg_rctl &= ~IXGB_RCTL_UPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } return; } static void ixgb_disable_promisc(struct adapter * adapter) { u_int32_t reg_rctl; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= (~IXGB_RCTL_UPE); reg_rctl &= (~IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void ixgb_set_multi(struct adapter * adapter) { u_int32_t reg_rctl = 0; u_int8_t *mta; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("ixgb_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS * MAX_NUM_MULTICAST_ADDRESSES); if_maddr_rlock(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); mcnt++; } if_maddr_runlock(ifp); if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl |= IXGB_RCTL_MPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); return; } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void ixgb_local_timer(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ixgb_update_stats_counters(adapter); if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_print_hw_stats(adapter); } if (adapter->tx_timer != 0 && --adapter->tx_timer == 0) ixgb_watchdog(adapter); callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter); } static void ixgb_print_link_status(struct adapter * adapter) { if (adapter->hw.link_up) { if (!adapter->link_active) { if_printf(adapter->ifp, "Link is up %d Mbps %s \n", 10000, "Full Duplex"); adapter->link_active = 1; } } else { if (adapter->link_active) { if_printf(adapter->ifp, "Link is Down \n"); adapter->link_active = 0; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixgb_stop(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); INIT_DEBUGOUT("ixgb_stop: begin\n"); ixgb_disable_intr(adapter); adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); callout_stop(&adapter->timer); ixgb_free_transmit_structures(adapter); ixgb_free_receive_structures(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); adapter->tx_timer = 0; return; } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void ixgb_identify_hardware(struct adapter * adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MacType, etc. based on this PCI info */ switch (adapter->hw.device_id) { case IXGB_DEVICE_ID_82597EX: case IXGB_DEVICE_ID_82597EX_SR: adapter->hw.mac_type = ixgb_82597; break; default: INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); device_printf(dev, "unsupported device id 0x%x\n", adapter->hw.device_id); } return; } static int ixgb_allocate_pci_resources(struct adapter * adapter) { int rid; device_t dev = adapter->dev; rid = IXGB_MMBA; - adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, - &rid, 0, ~0, 1, + adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); if (!(adapter->res_memory)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->res_memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; rid = 0x0; - adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, - &rid, 0, ~0, 1, - RF_SHAREABLE | RF_ACTIVE); + adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, + &rid, + RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { device_printf(dev, "Unable to allocate bus resource: interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, NULL, (void (*) (void *))ixgb_intr, adapter, &adapter->int_handler_tag)) { device_printf(dev, "Error registering interrupt handler!\n"); return (ENXIO); } adapter->hw.back = &adapter->osdep; return (0); } static void ixgb_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; if (adapter->res_interrupt != NULL) { bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); } if (adapter->res_memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, adapter->res_memory); } if (adapter->res_ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->res_ioport); } return; } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. The controller is reset, the EEPROM is * verified, the MAC address is set, then the shared initialization * routines are called. * **********************************************************************/ static int ixgb_hardware_init(struct adapter * adapter) { /* Issue a global reset */ adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n"); return (EIO); } if (!ixgb_init_hw(&adapter->hw)) { device_printf(adapter->dev, "Hardware Initialization Failed"); return (EIO); } return (0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int ixgb_setup_interface(device_t dev, struct adapter * adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } #if __FreeBSD_version >= 502000 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #else ifp->if_unit = device_get_unit(dev); ifp->if_name = "ixgb"; #endif ifp->if_baudrate = 1000000000; ifp->if_init = ixgb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixgb_ioctl; ifp->if_start = ixgb_start; ifp->if_get_counter = ixgb_get_counter; ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; #if __FreeBSD_version < 500000 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifattach(ifp, adapter->hw.curr_mac_addr); #endif ifp->if_capabilities = IFCAP_HWCSUM; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); #if __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #endif ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, ixgb_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } static int ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, struct ixgb_dma_alloc * dma, int mapflags) { device_t dev; int r; dev = adapter->dev; r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &dma->dma_tag); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; " "error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; " "error %u\n", r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ixgb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; " "error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int ixgb_allocate_transmit_structures(struct adapter * adapter) { if (!(adapter->tx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n"); return ENOMEM; } bzero(adapter->tx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_tx_desc); return 0; } /********************************************************************* * * Allocate and initialize transmit structures. * **********************************************************************/ static int ixgb_setup_transmit_structures(struct adapter * adapter) { /* * Setup DMA descriptor areas. */ if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ IXGB_MAX_SCATTER, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->txtag)) { device_printf(adapter->dev, "Unable to allocate TX DMA tag\n"); return (ENOMEM); } if (ixgb_allocate_transmit_structures(adapter)) return ENOMEM; bzero((void *)adapter->tx_desc_base, (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); adapter->next_avail_tx_desc = 0; adapter->oldest_used_tx_desc = 0; /* Set number of descriptors available */ adapter->num_tx_desc_avail = adapter->num_tx_desc; /* Set checksum context */ adapter->active_checksum_context = OFFLOAD_NONE; return 0; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void ixgb_initialize_transmit_unit(struct adapter * adapter) { u_int32_t reg_tctl; u_int64_t tdba = adapter->txdma.dma_paddr; /* Setup the Base and Length of the Tx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); IXGB_WRITE_REG(&adapter->hw, TDLEN, adapter->num_tx_desc * sizeof(struct ixgb_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ IXGB_WRITE_REG(&adapter->hw, TDH, 0); IXGB_WRITE_REG(&adapter->hw, TDT, 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IXGB_READ_REG(&adapter->hw, TDBAL), IXGB_READ_REG(&adapter->hw, TDLEN)); IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); /* Program the Transmit Control Register */ reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); /* Setup Transmit Descriptor Settings for this adapter */ adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; if (adapter->tx_int_delay > 0) adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; return; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void ixgb_free_transmit_structures(struct adapter * adapter) { struct ixgb_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { tx_buffer = adapter->tx_buffer_area; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); } tx_buffer->m_head = NULL; } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } return; } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). We change the * context only if the protocol type changes. * **********************************************************************/ static void ixgb_transmit_checksum_setup(struct adapter * adapter, struct mbuf * mp, u_int8_t * txd_popts) { struct ixgb_context_desc *TXD; struct ixgb_buffer *tx_buffer; int curr_txd; if (mp->m_pkthdr.csum_flags) { if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) return; else adapter->active_checksum_context = OFFLOAD_TCP_IP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_UDP_IP) return; else adapter->active_checksum_context = OFFLOAD_UDP_IP; } else { *txd_popts = 0; return; } } else { *txd_popts = 0; return; } /* * If we reach this point, the checksum offload context needs to be * reset. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); TXD->tucse = 0; TXD->mss = 0; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); } TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; tx_buffer->m_head = NULL; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; return; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void ixgb_clean_transmit_interrupts(struct adapter * adapter) { int i, num_avail; struct ixgb_buffer *tx_buffer; struct ixgb_tx_desc *tx_desc; IXGB_LOCK_ASSERT(adapter); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; #ifdef _SV_ adapter->clean_tx_interrupts++; #endif num_avail = adapter->num_tx_desc_avail; i = adapter->oldest_used_tx_desc; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { tx_desc->status = 0; num_avail++; if (tx_buffer->m_head) { bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } if (++i == adapter->num_tx_desc) i = 0; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; } adapter->oldest_used_tx_desc = i; /* * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that * it is OK to send packets. If there are no pending descriptors, * clear the timeout. Otherwise, if some descriptors have been freed, * restart the timeout. */ if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { struct ifnet *ifp = adapter->ifp; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (num_avail == adapter->num_tx_desc) adapter->tx_timer = 0; else if (num_avail == adapter->num_tx_desc_avail) adapter->tx_timer = IXGB_TX_TIMEOUT; } adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int ixgb_get_buf(int i, struct adapter * adapter, struct mbuf * nmp) { register struct mbuf *mp = nmp; struct ixgb_buffer *rx_buffer; struct ifnet *ifp; bus_addr_t paddr; int error; ifp = adapter->ifp; if (mp == NULL) { mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { adapter->mbuf_alloc_failed++; return (ENOBUFS); } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } if (ifp->if_mtu <= ETHERMTU) { m_adj(mp, ETHER_ALIGN); } rx_buffer = &adapter->rx_buffer_area[i]; /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, mtod(mp, void *), mp->m_len, ixgb_dmamap_cb, &paddr, 0); if (error) { m_free(mp); return (error); } rx_buffer->m_head = mp; adapter->rx_desc_base[i].buff_addr = htole64(paddr); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int ixgb_allocate_receive_structures(struct adapter * adapter) { int i, error; struct ixgb_buffer *rx_buffer; if (!(adapter->rx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(adapter->dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } bzero(adapter->rx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->rxtag); if (error != 0) { device_printf(adapter->dev, "ixgb_allocate_receive_structures: " "bus_dma_tag_create failed; error %u\n", error); goto fail_0; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error != 0) { device_printf(adapter->dev, "ixgb_allocate_receive_structures: " "bus_dmamap_create failed; error %u\n", error); goto fail_1; } } for (i = 0; i < adapter->num_rx_desc; i++) { if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { adapter->rx_buffer_area[i].m_head = NULL; adapter->rx_desc_base[i].buff_addr = 0; return (ENOBUFS); } } return (0); fail_1: bus_dma_tag_destroy(adapter->rxtag); fail_0: adapter->rxtag = NULL; free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; return (error); } /********************************************************************* * * Allocate and initialize receive structures. * **********************************************************************/ static int ixgb_setup_receive_structures(struct adapter * adapter) { bzero((void *)adapter->rx_desc_base, (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); if (ixgb_allocate_receive_structures(adapter)) return ENOMEM; /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; adapter->next_rx_desc_to_use = 0; return (0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void ixgb_initialize_receive_unit(struct adapter * adapter) { u_int32_t reg_rctl; u_int32_t reg_rxcsum; u_int32_t reg_rxdctl; struct ifnet *ifp; u_int64_t rdba = adapter->rxdma.dma_paddr; ifp = adapter->ifp; /* * Make sure receives are disabled while setting up the descriptor * ring */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); /* Set the Receive Delay Timer Register */ IXGB_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * sizeof(struct ixgb_rx_desc)); /* Setup the HW Rx Head and Tail Descriptor Pointers */ IXGB_WRITE_REG(&adapter->hw, RDH, 0); IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); adapter->raidc = 1; if (adapter->raidc) { uint32_t raidc; uint8_t poll_threshold; #define IXGB_RAIDC_POLL_DEFAULT 120 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); poll_threshold >>= 1; poll_threshold &= 0x3F; raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | poll_threshold; IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); } /* Enable Receive Checksum Offload for TCP and UDP ? */ if (ifp->if_capenable & IFCAP_RXCSUM) { reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); reg_rxcsum |= IXGB_RXCSUM_TUOFL; IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); } /* Setup the Receive Control Register */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | IXGB_RCTL_CFF | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); switch (adapter->rx_buffer_len) { default: case IXGB_RXBUFFER_2048: reg_rctl |= IXGB_RCTL_BSIZE_2048; break; case IXGB_RXBUFFER_4096: reg_rctl |= IXGB_RCTL_BSIZE_4096; break; case IXGB_RXBUFFER_8192: reg_rctl |= IXGB_RCTL_BSIZE_8192; break; case IXGB_RXBUFFER_16384: reg_rctl |= IXGB_RCTL_BSIZE_16384; break; } reg_rctl |= IXGB_RCTL_RXEN; /* Enable Receives */ IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void ixgb_free_receive_structures(struct adapter * adapter) { struct ixgb_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->map != NULL) { bus_dmamap_unload(adapter->rxtag, rx_buffer->map); bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); } if (rx_buffer->m_head != NULL) m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ static int ixgb_process_receive_interrupts(struct adapter * adapter, int count) { struct ifnet *ifp; struct mbuf *mp; #if __FreeBSD_version < 500000 struct ether_header *eh; #endif int eop = 0; int len; u_int8_t accept_frame = 0; int i; int next_to_use = 0; int eop_desc; int rx_npkts = 0; /* Pointer to the receive descriptor being examined. */ struct ixgb_rx_desc *current_desc; IXGB_LOCK_ASSERT(adapter); ifp = adapter->ifp; i = adapter->next_rx_desc_to_check; next_to_use = adapter->next_rx_desc_to_use; eop_desc = adapter->next_rx_desc_to_check; current_desc = &adapter->rx_desc_base[i]; if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { #ifdef _SV_ adapter->no_pkts_avail++; #endif return (rx_npkts); } while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { mp = adapter->rx_buffer_area[i].m_head; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { count--; eop = 1; } else { eop = 0; } len = current_desc->length; if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE)) { accept_frame = 0; } if (accept_frame) { /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { eop_desc = i; adapter->fmp->m_pkthdr.rcvif = ifp; #if __FreeBSD_version < 500000 eh = mtod(adapter->fmp, struct ether_header *); /* Remove ethernet header from mbuf */ m_adj(adapter->fmp, sizeof(struct ether_header)); ixgb_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(eh, adapter->fmp, current_desc->special); else ether_input(ifp, eh, adapter->fmp); #else ixgb_receive_checksum(adapter, current_desc, adapter->fmp); #if __FreeBSD_version < 700000 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(ifp, adapter->fmp, current_desc->special); #else if (current_desc->status & IXGB_RX_DESC_STATUS_VP) { adapter->fmp->m_pkthdr.ether_vtag = current_desc->special; adapter->fmp->m_flags |= M_VLANTAG; } #endif if (adapter->fmp != NULL) { IXGB_UNLOCK(adapter); (*ifp->if_input) (ifp, adapter->fmp); IXGB_LOCK(adapter); rx_npkts++; } #endif adapter->fmp = NULL; adapter->lmp = NULL; } adapter->rx_buffer_area[i].m_head = NULL; } else { adapter->dropped_pkts++; if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } /* Zero out the receive descriptors status */ current_desc->status = 0; /* Advance our pointers to the next descriptor */ if (++i == adapter->num_rx_desc) { i = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_check = i; if (--i < 0) i = (adapter->num_rx_desc - 1); /* * 82597EX: Workaround for redundent write back in receive descriptor ring (causes * memory corruption). Avoid using and re-submitting the most recently received RX * descriptor back to hardware. * * if(Last written back descriptor == EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptor * back to hardware. * if(Last written back descriptor != EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptors * till last EOP bit set descriptor. */ if (eop_desc != i) { if (++eop_desc == adapter->num_rx_desc) eop_desc = 0; i = eop_desc; } /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ while (next_to_use != i) { current_desc = &adapter->rx_desc_base[next_to_use]; if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { mp = adapter->rx_buffer_area[next_to_use].m_head; ixgb_get_buf(next_to_use, adapter, mp); } else { if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) break; } /* Advance our pointers to the next descriptor */ if (++next_to_use == adapter->num_rx_desc) { next_to_use = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_use = next_to_use; if (--next_to_use < 0) next_to_use = (adapter->num_rx_desc - 1); /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); return (rx_npkts); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixgb_receive_checksum(struct adapter * adapter, struct ixgb_rx_desc * rx_desc, struct mbuf * mp) { if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } return; } static void ixgb_enable_vlans(struct adapter * adapter) { uint32_t ctrl; ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); ctrl |= IXGB_CTRL0_VME; IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); return; } static void ixgb_enable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); return; } static void ixgb_disable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMC, ~0); return; } void ixgb_write_pci_cfg(struct ixgb_hw * hw, uint32_t reg, uint16_t * value) { pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, *value, 2); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void ixgb_update_stats_counters(struct adapter * adapter) { adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); } static uint64_t ixgb_get_counter(struct ifnet *ifp, ift_counter cnt) { struct adapter *adapter; adapter = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: return (adapter->stats.gprcl); case IFCOUNTER_OPACKETS: return ( adapter->stats.gptcl); case IFCOUNTER_IBYTES: return (adapter->stats.gorcl); case IFCOUNTER_OBYTES: return (adapter->stats.gotcl); case IFCOUNTER_IMCASTS: return ( adapter->stats.mprcl); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.crcerrs + adapter->stats.rnbc + adapter->stats.mpc + adapter->stats.rlec); default: return (if_get_counter_default(ifp, cnt)); } } /********************************************************************** * * This routine is called only when ixgb_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void ixgb_print_hw_stats(struct adapter * adapter) { char buf_speed[100], buf_type[100]; ixgb_bus_speed bus_speed; ixgb_bus_type bus_type; device_t dev; dev = adapter->dev; #ifdef _SV_ device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail); device_printf(dev, "CleanTxInterrupts = %ld\n", adapter->clean_tx_interrupts); device_printf(dev, "ICR RXDMT0 = %lld\n", (long long)adapter->sv_stats.icr_rxdmt0); device_printf(dev, "ICR RXO = %lld\n", (long long)adapter->sv_stats.icr_rxo); device_printf(dev, "ICR RXT0 = %lld\n", (long long)adapter->sv_stats.icr_rxt0); device_printf(dev, "ICR TXDW = %lld\n", (long long)adapter->sv_stats.icr_TXDW); #endif /* _SV_ */ bus_speed = adapter->hw.bus.speed; bus_type = adapter->hw.bus.type; sprintf(buf_speed, bus_speed == ixgb_bus_speed_33 ? "33MHz" : bus_speed == ixgb_bus_speed_66 ? "66MHz" : bus_speed == ixgb_bus_speed_100 ? "100MHz" : bus_speed == ixgb_bus_speed_133 ? "133MHz" : "UNKNOWN"); device_printf(dev, "PCI_Bus_Speed = %s\n", buf_speed); sprintf(buf_type, bus_type == ixgb_bus_type_pci ? "PCI" : bus_type == ixgb_bus_type_pcix ? "PCI-X" : "UNKNOWN"); device_printf(dev, "PCI_Bus_Type = %s\n", buf_type); device_printf(dev, "Tx Descriptors not Avail1 = %ld\n", adapter->no_tx_desc_avail1); device_printf(dev, "Tx Descriptors not Avail2 = %ld\n", adapter->no_tx_desc_avail2); device_printf(dev, "Std Mbuf Failed = %ld\n", adapter->mbuf_alloc_failed); device_printf(dev, "Std Cluster Failed = %ld\n", adapter->mbuf_cluster_failed); device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc); device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc); device_printf(dev, "Receive No Buffers = %lld\n", (long long)adapter->stats.rnbc); device_printf(dev, "Receive length errors = %lld\n", (long long)adapter->stats.rlec); device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs); device_printf(dev, "Driver dropped packets = %ld\n", adapter->dropped_pkts); device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc); device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc); device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc); device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc); device_printf(dev, "Good Packets Rcvd = %lld\n", (long long)adapter->stats.gprcl); device_printf(dev, "Good Packets Xmtd = %lld\n", (long long)adapter->stats.gptcl); device_printf(dev, "Jumbo frames recvd = %lld\n", (long long)adapter->stats.jprcl); device_printf(dev, "Jumbo frames Xmtd = %lld\n", (long long)adapter->stats.jptcl); return; } static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *) arg1; ixgb_print_hw_stats(adapter); } return error; } Index: head/sys/dev/lmc/if_lmc.c =================================================================== --- head/sys/dev/lmc/if_lmc.c (revision 295789) +++ head/sys/dev/lmc/if_lmc.c (revision 295790) @@ -1,4588 +1,4588 @@ /* * $FreeBSD$ * * Copyright (c) 2002-2004 David Boggs. * All rights reserved. * * BSD License: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GNU General Public License: * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Description: * * This is an open-source Unix device driver for PCI-bus WAN interface cards. * It sends and receives packets in HDLC frames over synchronous links. * A generic PC plus Unix plus some SBE/LMC cards makes an OPEN router. * This driver works with FreeBSD, NetBSD, OpenBSD, BSD/OS and Linux. * It has been tested on i386 (32-bit little-end), Sparc (64-bit big-end), * and Alpha (64-bit little-end) architectures. * * History and Authors: * * Ron Crane had the neat idea to use a Fast Ethernet chip as a PCI * interface and add an Ethernet-to-HDLC gate array to make a WAN card. * David Boggs designed the Ethernet-to-HDLC gate arrays and PC cards. * We did this at our company, LAN Media Corporation (LMC). * SBE Corp acquired LMC and continues to make the cards. * * Since the cards use Tulip Ethernet chips, we started with Matt Thomas' * ubiquitous "de" driver. Michael Graff stripped out the Ethernet stuff * and added HSSI stuff. Basil Gunn ported it to Solaris (lost) and * Rob Braun ported it to Linux. Andrew Stanley-Jones added support * for three more cards and wrote the first version of lmcconfig. * During 2002-5 David Boggs rewrote it and now feels responsible for it. * * Responsible Individual: * * Send bug reports and improvements to . */ # include /* OS version */ # define IFNET 1 # include "opt_inet.h" /* INET */ # include "opt_inet6.h" /* INET6 */ # include "opt_netgraph.h" /* NETGRAPH */ # ifdef HAVE_KERNEL_OPTION_HEADERS # include "opt_device_polling.h" /* DEVICE_POLLING */ # endif # ifndef INET # define INET 0 # endif # ifndef INET6 # define INET6 0 # endif # ifndef NETGRAPH # define NETGRAPH 0 # endif # define P2P 0 /* not in FreeBSD */ # define NSPPP 1 /* No count devices in FreeBSD 5 */ # include "opt_bpf.h" /* DEV_BPF */ # define NBPFILTER DEV_BPF # define GEN_HDLC 0 /* not in FreeBSD */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if NETGRAPH # include # include # endif # if (INET || INET6) # include # include # endif # if NSPPP # include # endif # if NBPFILTER # include # endif /* and finally... */ # include /* The SROM is a generic 93C46 serial EEPROM (64 words by 16 bits). */ /* Data is set up before the RISING edge of CLK; CLK is parked low. */ static void shift_srom_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_SROM_DIN; /* DIN setup */ else csr &= ~TLP_SROM_DIN; /* DIN setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* Data is sampled on the RISING edge of CLK; CLK is parked low. */ static u_int16_t read_srom(softc_t *sc, u_int8_t addr) { int i; u_int32_t csr; u_int16_t data; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* CS rising edge prepares SROM for a new cycle. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 6, 4); /* issue read cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ for (data=0, i=16; i>=0; i--) /* read ->17<- bits of data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* DOUT sampled */ data = (data<<1) | ((csr & TLP_SROM_DOUT) ? 1:0); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return data; } /* The SROM is formatted by the mfgr and should NOT be written! */ /* But lmcconfig can rewrite it in case it gets overwritten somehow. */ /* IOCTL SYSCALL: can sleep. */ static void write_srom(softc_t *sc, u_int8_t addr, u_int16_t data) { u_int32_t csr; int i; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* Issue write-enable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 4, 4); /* issue write enable cmd */ shift_srom_bits(sc, 63, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue erase command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 7, 4); /* issue erase cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 5, 4); /* issue write cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ shift_srom_bits(sc, data, 16); /* issue data */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write-disable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 4, 4); /* issue write disable cmd */ shift_srom_bits(sc, 0, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* Not all boards have BIOS roms. */ /* The BIOS ROM is an AMD 29F010 1Mbit (128K by 8) EEPROM. */ static u_int8_t read_bios(softc_t *sc, u_int32_t addr) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_RD | TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Read the BIOS rom data. */ srom_mii = READ_CSR(TLP_SROM_MII); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return (u_int8_t)srom_mii & 0xFF; } static void write_bios_phys(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_WR | TLP_MII_MDOE; /* Load the data into the data register. */ srom_mii = (srom_mii & 0xFFFFFF00) | (data & 0xFF); WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* IOCTL SYSCALL: can sleep. */ static void write_bios(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int8_t read_data; /* this sequence enables writing */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0xA0); write_bios_phys(sc, addr, data); /* Wait for the write operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (DRIVER_DEBUG) printf("%s: write_bios() failed; rom addr=0x%x\n", NAME_UNIT, addr); return; } } read_data = read_bios(sc, addr); if (read_data == data) break; } } /* IOCTL SYSCALL: can sleep. */ static void erase_bios(softc_t *sc) { unsigned char read_data; /* This sequence enables erasing: */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x80); write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x10); /* Wait for the erase operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (DRIVER_DEBUG) printf("%s: erase_bios() failed\n", NAME_UNIT); return; } } read_data = read_bios(sc, 0); if (read_data == 0xFF) break; } } /* MDIO is 3-stated between tranactions. */ /* MDIO is set up before the RISING edge of MDC; MDC is parked low. */ static void shift_mii_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_MII_MDOUT; /* MDOUT setup */ else csr &= ~TLP_MII_MDOUT; /* MDOUT setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* The specification for the MII is IEEE Std 802.3 clause 22. */ /* MDIO is sampled on the RISING edge of MDC; MDC is parked low. */ static u_int16_t read_mii(softc_t *sc, u_int8_t regad) { int i; u_int32_t csr; u_int16_t data = 0; WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 2, 2); /* read op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ csr = READ_CSR(TLP_SROM_MII); csr |= TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, csr); shift_mii_bits(sc, 0, 2); /* turn-around */ for (i=15; i>=0; i--) /* data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* MDIN sampled */ data = (data<<1) | ((csr & TLP_MII_MDIN) ? 1:0); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } return data; } static void write_mii(softc_t *sc, u_int8_t regad, u_int16_t data) { WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 1, 2); /* write op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ shift_mii_bits(sc, 2, 2); /* turn-around */ shift_mii_bits(sc, data, 16); /* data */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); if (regad == 16) sc->led_state = data; /* a small optimization */ } static void set_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 |= bits; write_mii(sc, 16, mii16); } static void clr_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~bits; write_mii(sc, 16, mii16); } static void set_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 |= bits; write_mii(sc, 17, mii17); } static void clr_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 &= ~bits; write_mii(sc, 17, mii17); } /* * Watchdog code is more readable if it refreshes LEDs * once a second whether they need it or not. * But MII refs take 150 uSecs each, so remember the last value * written to MII16 and avoid LED writes that do nothing. */ static void led_off(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == led) return; set_mii16_bits(sc, led); } static void led_on(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == 0) return; clr_mii16_bits(sc, led); } static void led_inv(softc_t *sc, u_int16_t led) { u_int16_t mii16 = read_mii(sc, 16); mii16 ^= led; write_mii(sc, 16, mii16); } /* * T1 & T3 framer registers are accessed through MII regs 17 & 18. * Write the address to MII reg 17 then R/W data through MII reg 18. * The hardware interface is an Intel-style 8-bit muxed A/D bus. */ static void write_framer(softc_t *sc, u_int16_t addr, u_int8_t data) { write_mii(sc, 17, addr); write_mii(sc, 18, data); } static u_int8_t read_framer(softc_t *sc, u_int16_t addr) { write_mii(sc, 17, addr); return (u_int8_t)read_mii(sc, 18); } /* Tulip's hardware implementation of General Purpose IO * (GPIO) pins makes life difficult for software. * Bits 7-0 in the Tulip GPIO CSR are used for two purposes * depending on the state of bit 8. * If bit 8 is 0 then bits 7-0 are "data" bits. * If bit 8 is 1 then bits 7-0 are "direction" bits. * If a direction bit is one, the data bit is an output. * The problem is that the direction bits are WRITE-ONLY. * Software must remember the direction bits in a shadow copy. * (sc->gpio_dir) in order to change some but not all of the bits. * All accesses to the Tulip GPIO register use these five procedures. */ static void make_gpio_input(softc_t *sc, u_int32_t bits) { sc->gpio_dir &= ~bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static void make_gpio_output(softc_t *sc, u_int32_t bits) { sc->gpio_dir |= bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static u_int32_t read_gpio(softc_t *sc) { return READ_CSR(TLP_GPIO); } static void set_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) | bits) & 0xFF); } static void clr_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) & ~bits) & 0xFF); } /* Reset ALL of the flip-flops in the gate array to zero. */ /* This does NOT change the gate array programming. */ /* Called during initialization so it must not sleep. */ static void reset_xilinx(softc_t *sc) { /* Drive RESET low to force initialization. */ clr_gpio_bits(sc, GPIO_RESET); make_gpio_output(sc, GPIO_RESET); /* Hold RESET low for more than 10 uSec. */ DELAY(50); /* Done with RESET; make it an input. */ make_gpio_input(sc, GPIO_RESET); } /* Load Xilinx gate array program from on-board rom. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static void load_xilinx_from_rom(softc_t *sc) { int i; /* Drive MODE low to load from ROM rather than GPIO. */ clr_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_DP | GPIO_RESET); /* BUSY-WAIT for Xilinx chip to configure itself from ROM bits. */ for (i=0; i<100; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_DP) == 0) SLEEP(10000); /* Done with MODE; make it an input. */ make_gpio_input(sc, GPIO_MODE); } /* Load the Xilinx gate array program from userland bits. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static int load_xilinx_from_file(softc_t *sc, char *addr, u_int32_t len) { char *data; int i, j, error; /* Get some pages to hold the Xilinx bits; biggest file is < 6 KB. */ if (len > 8192) return EFBIG; /* too big */ data = malloc(len, M_DEVBUF, M_WAITOK); if (data == NULL) return ENOMEM; /* Copy the Xilinx bits from userland. */ if ((error = copyin(addr, data, len))) { free(data, M_DEVBUF); return error; } /* Drive MODE high to load from GPIO rather than ROM. */ set_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_RESET | GPIO_DP); /* BUSY-WAIT for Xilinx chip to clear its config memory. */ make_gpio_input(sc, GPIO_INIT); for (i=0; i<10000; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_INIT)==0) SLEEP(10000); /* Configure CLK and DATA as outputs. */ set_gpio_bits(sc, GPIO_CLK); /* park CLK high */ make_gpio_output(sc, GPIO_CLK | GPIO_DATA); /* Write bits to Xilinx; CLK is parked HIGH. */ /* DATA is set up before the RISING edge of CLK. */ for (i=0; istatus.card_type == TLP_CSID_SSI) { if (synth->prescale == 9) /* divide by 512 */ set_mii17_bits(sc, MII17_SSI_PRESCALE); else /* divide by 32 */ clr_mii17_bits(sc, MII17_SSI_PRESCALE); } clr_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* SYNTH is a low-true chip enable for the AV9110 chip. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_output(sc, GPIO_SSI_SYNTH); clr_gpio_bits(sc, GPIO_SSI_SYNTH); /* Serially shift the command into the AV9110 chip. */ shift_synth_bits(sc, synth->n, 7); shift_synth_bits(sc, synth->m, 7); shift_synth_bits(sc, synth->v, 1); shift_synth_bits(sc, synth->x, 2); shift_synth_bits(sc, synth->r, 2); shift_synth_bits(sc, 0x16, 5); /* enable clk/x output */ /* SYNTH (chip enable) going high ends the command. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_input(sc, GPIO_SSI_SYNTH); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); /* remember the new synthesizer parameters */ if (&sc->config.synth != synth) sc->config.synth = *synth; } /* Write a command to the DAC controlling the VCXO on some T3 adapters. */ /* The DAC is a TI-TLV5636: 12-bit resolution and a serial interface. */ /* DATA is set up before the FALLING edge of CLK. CLK is parked HIGH. */ static void write_dac(softc_t *sc, u_int16_t data) { int i; /* Prepare to use DATA and CLK. */ set_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* High-to-low transition prepares DAC for new value. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_output(sc, GPIO_T3_DAC); clr_gpio_bits(sc, GPIO_T3_DAC); /* Serially shift command bits into DAC. */ for (i=0; i<16; i++) { /* MSB first */ if ((data & (1<<(15-i))) != 0) set_gpio_bits(sc, GPIO_DATA); /* DATA setup */ else clr_gpio_bits(sc, GPIO_DATA); /* DATA setup */ clr_gpio_bits(sc, GPIO_CLK); /* CLK falling edge */ set_gpio_bits(sc, GPIO_CLK); /* CLK rising edge */ } /* Done with DAC; make it an input; loads new value into DAC. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_input(sc, GPIO_T3_DAC); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); } /* begin HSSI card code */ /* Must not sleep. */ static void hssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = READ_PCI_CFG(sc, TLP_CSID); sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 52; /* 52.000 Mbs */ sc->config.synth.m = 5; sc->config.synth.v = 0; sc->config.synth.x = 0; sc->config.synth.r = 0; sc->config.synth.prescale = 2; } /* set CRC length */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_HSSI_CRC32); else clr_mii16_bits(sc, MII16_HSSI_CRC32); /* Assert pin LA in HSSI conn: ask modem for local loop. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_HSSI_LA); else clr_mii16_bits(sc, MII16_HSSI_LA); /* Assert pin LB in HSSI conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_HSSI_LB); else clr_mii16_bits(sc, MII16_HSSI_LB); if (sc->status.card_type == TLP_CSID_HSSI) { /* set TXCLK src */ if (sc->config.tx_clk_src == CFG_CLKMUX_ST) set_gpio_bits(sc, GPIO_HSSI_TXCLK); else clr_gpio_bits(sc, GPIO_HSSI_TXCLK); make_gpio_output(sc, GPIO_HSSI_TXCLK); } else if (sc->status.card_type == TLP_CSID_HSSIc) { /* cPCI HSSI rev C has extra features */ /* Set TXCLK source. */ u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_CLKMUX; mii16 |= (sc->config.tx_clk_src&3)<<13; write_mii(sc, 16, mii16); /* cPCI HSSI implements loopback towards the net. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_HSSI_LOOP); else clr_mii16_bits(sc, MII16_HSSI_LOOP); /* Set DTE/DCE mode. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_HSSI_DCE); else clr_gpio_bits(sc, GPIO_HSSI_DCE); make_gpio_output(sc, GPIO_HSSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); } } static void hssi_ident(softc_t *sc) { } /* Called once a second; must not sleep. */ static int hssi_watchdog(softc_t *sc) { u_int16_t mii16 = read_mii(sc, 16) & MII16_HSSI_MODEM; int link_status = STATUS_UP; led_inv(sc, MII16_HSSI_LED_UL); /* Software is alive. */ led_on(sc, MII16_HSSI_LED_LL); /* always on (SSI cable) */ /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_HSSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_HSSI_LED_UR); /* Is the modem ready? */ if ((mii16 & MII16_HSSI_CA) == 0) { led_off(sc, MII16_HSSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_HSSI_LED_LR); /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: TA=%s CA=%s LA=%s LB=%s LC=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_HSSI_TA) ? on : off, (mii16 & MII16_HSSI_CA) ? on : off, (mii16 & MII16_HSSI_LA) ? on : off, (mii16 & MII16_HSSI_LB) ? on : off, (mii16 & MII16_HSSI_LC) ? on : off, (mii16 & MII16_HSSI_TM) ? on : off); } /* SNMP one-second-report */ sc->status.snmp.hssi.sigs = mii16 & MII16_HSSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int hssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_MODEM; mii16 |= (MII16_HSSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, MII16_HSSI_TA); else clr_mii16_bits(sc, MII16_HSSI_TA); } else error = EINVAL; return error; } /* begin DS3 card code */ /* Must not sleep. */ static void t3_config(softc_t *sc) { int i; u_int8_t ctl1; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T3; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.format = CFG_FORMAT_T3CPAR; sc->config.cable_len = 10; /* meters */ sc->config.scrambler = CFG_SCRAM_DL_KEN; sc->config.tx_clk_src = CFG_CLKMUX_INT; /* Center the VCXO -- get within 20 PPM of 44736000. */ write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, 2048); /* range is 0..4095 */ } /* Set cable length. */ if (sc->config.cable_len > 30) clr_mii16_bits(sc, MII16_DS3_ZERO); else set_mii16_bits(sc, MII16_DS3_ZERO); /* Set payload scrambler polynomial. */ if (sc->config.scrambler == CFG_SCRAM_LARS) set_mii16_bits(sc, MII16_DS3_POLY); else clr_mii16_bits(sc, MII16_DS3_POLY); /* Set payload scrambler on/off. */ if (sc->config.scrambler == CFG_SCRAM_OFF) clr_mii16_bits(sc, MII16_DS3_SCRAM); else set_mii16_bits(sc, MII16_DS3_SCRAM); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_DS3_CRC32); else clr_mii16_bits(sc, MII16_DS3_CRC32); /* Loopback towards host thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else clr_mii16_bits(sc, MII16_DS3_TRLBK); /* Loopback towards network thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (sc->config.loop_back == CFG_LOOP_DUAL) set_mii16_bits(sc, MII16_DS3_LNLBK); else clr_mii16_bits(sc, MII16_DS3_LNLBK); /* Configure T3 framer chip; write EVERY writeable register. */ ctl1 = CTL1_SER | CTL1_XTX; if (sc->config.loop_back == CFG_LOOP_INWARD) ctl1 |= CTL1_3LOOP; if (sc->config.loop_back == CFG_LOOP_DUAL) ctl1 |= CTL1_3LOOP; if (sc->config.format == CFG_FORMAT_T3M13) ctl1 |= CTL1_M13MODE; write_framer(sc, T3CSR_CTL1, ctl1); write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); write_framer(sc, T3CSR_CTL8, CTL8_FBEC); write_framer(sc, T3CSR_CTL12, CTL12_DLCB1 | CTL12_C21 | CTL12_MCB1); write_framer(sc, T3CSR_DBL_FEAC, 0); write_framer(sc, T3CSR_CTL14, CTL14_RGCEN | CTL14_TGCEN); write_framer(sc, T3CSR_INTEN, 0); write_framer(sc, T3CSR_CTL20, CTL20_CVEN); /* Clear error counters and latched error bits */ /* that may have happened while initializing. */ for (i=0; i<21; i++) read_framer(sc, i); } static void t3_ident(softc_t *sc) { printf(", TXC03401 rev B"); } /* Called once a second; must not sleep. */ static int t3_watchdog(softc_t *sc) { u_int16_t CV; u_int8_t CERR, PERR, MERR, FERR, FEBE; u_int8_t ctl1, stat16, feac; int link_status = STATUS_UP; u_int16_t mii16; /* Read the alarm registers. */ ctl1 = read_framer(sc, T3CSR_CTL1); stat16 = read_framer(sc, T3CSR_STAT16); mii16 = read_mii(sc, 16); /* Always ignore the RTLOC alarm bit. */ stat16 &= ~STAT16_RTLOC; /* Software is alive. */ led_inv(sc, MII16_DS3_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((stat16 & STAT16_RAIS) != 0) /* receiving ais */ led_on(sc, MII16_DS3_LED_BLU); else if (ctl1 & CTL1_TXAIS) /* sending ais */ led_inv(sc, MII16_DS3_LED_BLU); else led_off(sc, MII16_DS3_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((stat16 & STAT16_XERR) != 0) /* receiving rai */ led_on(sc, MII16_DS3_LED_YEL); else if ((ctl1 & CTL1_XTX) == 0) /* sending rai */ led_inv(sc, MII16_DS3_LED_YEL); else led_off(sc, MII16_DS3_LED_YEL); /* If certain status bits are set then the link is 'down'. */ /* The bad bits are: rxlos rxoof rxais rxidl xerr. */ if ((stat16 & ~(STAT16_FEAC | STAT16_SEF)) != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_DS3_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_DS3_LED_RED); else led_off(sc, MII16_DS3_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && ((stat16 & ~STAT16_FEAC) != sc->last_stat16)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOS=%s ROOF=%s RAIS=%s RIDL=%s SEF=%s XERR=%s\n", NAME_UNIT, (stat16 & STAT16_RLOS) ? on : off, (stat16 & STAT16_ROOF) ? on : off, (stat16 & STAT16_RAIS) ? on : off, (stat16 & STAT16_RIDL) ? on : off, (stat16 & STAT16_SEF) ? on : off, (stat16 & STAT16_XERR) ? on : off); } /* Check and print error counters if non-zero. */ CV = read_framer(sc, T3CSR_CVHI)<<8; CV += read_framer(sc, T3CSR_CVLO); PERR = read_framer(sc, T3CSR_PERR); CERR = read_framer(sc, T3CSR_CERR); FERR = read_framer(sc, T3CSR_FERR); MERR = read_framer(sc, T3CSR_MERR); FEBE = read_framer(sc, T3CSR_FEBE); /* CV is invalid during LOS. */ if ((stat16 & STAT16_RLOS)!=0) CV = 0; /* CERR & FEBE are invalid in M13 mode */ if (sc->config.format == CFG_FORMAT_T3M13) CERR = FEBE = 0; /* FEBE is invalid during AIS. */ if ((stat16 & STAT16_RAIS)!=0) FEBE = 0; if (DRIVER_DEBUG && (CV || PERR || CERR || FERR || MERR || FEBE)) printf("%s: CV=%u PERR=%u CERR=%u FERR=%u MERR=%u FEBE=%u\n", NAME_UNIT, CV, PERR, CERR, FERR, MERR, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += CV; sc->status.cntrs.par_errs += PERR; sc->status.cntrs.cpar_errs += CERR; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.mfrm_errs += MERR; sc->status.cntrs.febe_errs += FEBE; /* Check for FEAC messages (FEAC not defined in M13 mode). */ if (FORMAT_T3CPAR && (stat16 & STAT16_FEAC)) do { feac = read_framer(sc, T3CSR_FEAC_STK); if ((feac & FEAC_STK_VALID)==0) break; /* Ignore RxFEACs while a far end loopback has been requested. */ if ((sc->status.snmp.t3.line & TLOOP_FAR_LINE)!=0) continue; switch (feac & FEAC_STK_FEAC) { case T3BOP_LINE_UP: break; case T3BOP_LINE_DOWN: break; case T3BOP_LOOP_DS3: { if (sc->last_FEAC == T3BOP_LINE_DOWN) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' FEAC msg\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 0; } if (sc->last_FEAC == T3BOP_LINE_UP) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' FEAC msg\n", NAME_UNIT); set_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 300; } break; } case T3BOP_OOF: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOF' FEAC msg\n", NAME_UNIT); break; } case T3BOP_IDLE: { if (DRIVER_DEBUG) printf("%s: Received a 'far end IDL' FEAC msg\n", NAME_UNIT); break; } case T3BOP_AIS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end AIS' FEAC msg\n", NAME_UNIT); break; } case T3BOP_LOS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOS' FEAC msg\n", NAME_UNIT); break; } default: { if (DRIVER_DEBUG) printf("%s: Received a 'type 0x%02X' FEAC msg\n", NAME_UNIT, feac & FEAC_STK_FEAC); break; } } sc->last_FEAC = feac & FEAC_STK_FEAC; } while ((feac & FEAC_STK_MORE) != 0); stat16 &= ~STAT16_FEAC; /* Send Service-Affecting priority FEAC messages */ if (((sc->last_stat16 ^ stat16) & 0xF0) && (FORMAT_T3CPAR)) { /* Transmit continuous FEACs */ write_framer(sc, T3CSR_CTL14, read_framer(sc, T3CSR_CTL14) & ~CTL14_FEAC10); if ((stat16 & STAT16_RLOS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_LOS); else if ((stat16 & STAT16_ROOF)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_OOF); else if ((stat16 & STAT16_RAIS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_AIS); else if ((stat16 & STAT16_RIDL)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_IDLE); else write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); } /* Start sending RAI, Remote Alarm Indication. */ if (((stat16 & STAT16_ROOF)!=0) && ((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_ROOF)==0)) write_framer(sc, T3CSR_CTL1, ctl1 &= ~CTL1_XTX); /* Stop sending RAI, Remote Alarm Indication. */ else if (((stat16 & STAT16_ROOF)==0) && ((sc->last_stat16 & STAT16_ROOF)!=0)) write_framer(sc, T3CSR_CTL1, ctl1 |= CTL1_XTX); /* Start sending AIS, Alarm Indication Signal */ if (((stat16 & STAT16_RLOS)!=0) && ((sc->last_stat16 & STAT16_RLOS)==0)) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 | CTL1_TXAIS); } /* Stop sending AIS, Alarm Indication Signal */ else if (((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_RLOS)!=0)) { clr_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 & ~CTL1_TXAIS); } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if ((mii16 & MII16_DS3_LNLBK)!=0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); /* line loopback off */ } /* SNMP error counters */ sc->status.snmp.t3.lcv = CV; sc->status.snmp.t3.pcv = PERR; sc->status.snmp.t3.ccv = CERR; sc->status.snmp.t3.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t3.line = 0; if ((ctl1 & CTL1_XTX)==0) sc->status.snmp.t3.line |= TLINE_TX_RAI; if (stat16 & STAT16_XERR) sc->status.snmp.t3.line |= TLINE_RX_RAI; if (ctl1 & CTL1_TXAIS) sc->status.snmp.t3.line |= TLINE_TX_AIS; if (stat16 & STAT16_RAIS) sc->status.snmp.t3.line |= TLINE_RX_AIS; if (stat16 & STAT16_ROOF) sc->status.snmp.t3.line |= TLINE_LOF; if (stat16 & STAT16_RLOS) sc->status.snmp.t3.line |= TLINE_LOS; if (stat16 & STAT16_SEF) sc->status.snmp.t3.line |= T3LINE_SEF; /* SNMP Loopback Status */ sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (ctl1 & CTL1_3LOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_INWARD; if (mii16 & MII16_DS3_TRLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (mii16 & MII16_DS3_LNLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_LINE; /*if (ctl12 & CTL12_RTPLOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_PAYLOAD; */ /* Remember this state until next time. */ sc->last_stat16 = stat16; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t3_send_dbl_feac(softc_t *sc, int feac1, int feac2) { u_int8_t tx_feac; int i; /* The FEAC transmitter could be sending a continuous */ /* FEAC msg when told to send a double FEAC message. */ /* So save the current state of the FEAC transmitter. */ tx_feac = read_framer(sc, T3CSR_TX_FEAC); /* Load second FEAC code and stop FEAC transmitter. */ write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE + feac2); /* FEAC transmitter sends 10 more FEACs and then stops. */ SLEEP(20000); /* sending one FEAC takes 1700 uSecs */ /* Load first FEAC code and start FEAC transmitter. */ write_framer(sc, T3CSR_DBL_FEAC, CTL13_DFEXEC + feac1); /* Wait for double FEAC sequence to complete -- about 70 ms. */ for (i=0; i<10; i++) /* max delay 100 ms */ if (read_framer(sc, T3CSR_DBL_FEAC) & CTL13_DFEXEC) SLEEP(10000); /* Flush received FEACS; don't respond to our own loop cmd! */ while (read_framer(sc, T3CSR_FEAC_STK) & FEAC_STK_VALID) DELAY(1); /* XXX HANG */ /* Restore previous state of the FEAC transmitter. */ /* If it was sending a continous FEAC, it will resume. */ write_framer(sc, T3CSR_TX_FEAC, tx_feac); } /* IOCTL SYSCALL: can sleep. */ static int t3_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { if (sc->config.format != CFG_FORMAT_T3CPAR) error = EINVAL; else if (ioctl->data == TSEND_LINE) { sc->status.snmp.t3.loop |= TLOOP_FAR_LINE; t3_send_dbl_feac(sc, T3BOP_LINE_UP, T3BOP_LOOP_DS3); } else if (ioctl->data == TSEND_RESET) { t3_send_dbl_feac(sc, T3BOP_LINE_DOWN, T3BOP_LOOP_DS3); sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; } else error = EINVAL; break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { if (ioctl->data == CFG_LOOP_NONE) { clr_mii16_bits(sc, MII16_DS3_FRAME); clr_mii16_bits(sc, MII16_DS3_TRLBK); clr_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) & ~CTL1_3LOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~(CTL12_RTPLOOP | CTL12_RTPLLEN)); } else if (ioctl->data == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (ioctl->data == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else if (ioctl->data == CFG_LOOP_INWARD) write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); else if (ioctl->data == CFG_LOOP_DUAL) { set_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); } else if (ioctl->data == CFG_LOOP_PAYLOAD) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLLEN); DELAY(25); /* at least two frames (22 uS) */ write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~CTL12_RTPLLEN); } else error = EINVAL; break; } default: error = EINVAL; break; } return error; } /* begin SSI card code */ /* Must not sleep. */ static void ssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_SSI; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 51; /* 1.536 MHz */ sc->config.synth.m = 83; sc->config.synth.v = 1; sc->config.synth.x = 1; sc->config.synth.r = 1; sc->config.synth.prescale = 4; } /* Disable the TX clock driver while programming the oscillator. */ clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); /* Set DTE/DCE mode. */ /* If DTE mode then DCD & TXC are received. */ /* If DCE mode then DCD & TXC are driven. */ /* Boards with MII rev=4.0 don't drive DCD. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_SSI_DCE); else clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_SSI_CRC32); else clr_mii16_bits(sc, MII16_SSI_CRC32); /* Loop towards host thru cable drivers and receivers. */ /* Asserts DCD at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_PINS) set_mii16_bits(sc, MII16_SSI_LOOP); else clr_mii16_bits(sc, MII16_SSI_LOOP); /* Assert pin LL in modem conn: ask modem for local loop. */ /* Asserts TM at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_SSI_LL); else clr_mii16_bits(sc, MII16_SSI_LL); /* Assert pin RL in modem conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_SSI_RL); else clr_mii16_bits(sc, MII16_SSI_RL); } static void ssi_ident(softc_t *sc) { printf(", LTC1343/44"); } /* Called once a second; must not sleep. */ static int ssi_watchdog(softc_t *sc) { u_int16_t cable; u_int16_t mii16 = read_mii(sc, 16) & MII16_SSI_MODEM; int link_status = STATUS_UP; /* Software is alive. */ led_inv(sc, MII16_SSI_LED_UL); /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_SSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_SSI_LED_UR); /* Check the external cable. */ cable = read_mii(sc, 17); cable = cable & MII17_SSI_CABLE_MASK; cable = cable >> MII17_SSI_CABLE_SHIFT; if (cable == 7) { led_off(sc, MII16_SSI_LED_LL); /* no cable */ link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LL); /* The unit at the other end of the cable is ready if: */ /* DTE mode and DCD pin is asserted */ /* DCE mode and DSR pin is asserted */ if (((sc->config.dte_dce == CFG_DTE) && ((mii16 & MII16_SSI_DCD)==0)) || ((sc->config.dte_dce == CFG_DCE) && ((mii16 & MII16_SSI_DSR)==0))) { led_off(sc, MII16_SSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LR); if (DRIVER_DEBUG && (cable != sc->status.cable_type)) printf("%s: SSI cable type changed to '%s'\n", NAME_UNIT, ssi_cables[cable]); sc->status.cable_type = cable; /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: DTR=%s DSR=%s RTS=%s CTS=%s DCD=%s RI=%s LL=%s RL=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_SSI_DTR) ? on : off, (mii16 & MII16_SSI_DSR) ? on : off, (mii16 & MII16_SSI_RTS) ? on : off, (mii16 & MII16_SSI_CTS) ? on : off, (mii16 & MII16_SSI_DCD) ? on : off, (mii16 & MII16_SSI_RI) ? on : off, (mii16 & MII16_SSI_LL) ? on : off, (mii16 & MII16_SSI_RL) ? on : off, (mii16 & MII16_SSI_TM) ? on : off); } /* SNMP one-second report */ sc->status.snmp.ssi.sigs = mii16 & MII16_SSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int ssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_SSI_MODEM; mii16 |= (MII16_SSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); else clr_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); } else error = EINVAL; return error; } /* begin T1E1 card code */ /* Must not sleep. */ static void t1_config(softc_t *sc) { int i; u_int8_t pulse, lbo, gain; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T1E1; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_INT; sc->config.format = CFG_FORMAT_T1ESF; sc->config.cable_len = 10; sc->config.time_slots = 0x01FFFFFE; sc->config.tx_pulse = CFG_PULSE_AUTO; sc->config.rx_gain = CFG_GAIN_AUTO; sc->config.tx_lbo = CFG_LBO_AUTO; /* Bt8370 occasionally powers up in a loopback mode. */ /* Data sheet says zero LOOP reg and do a s/w reset. */ write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ write_framer(sc, Bt8370_CR0, 0x80); /* s/w reset */ for (i=0; i<10; i++) /* max delay 10 ms */ if (read_framer(sc, Bt8370_CR0) & 0x80) DELAY(1000); } /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_T1_CRC32); else clr_mii16_bits(sc, MII16_T1_CRC32); /* Invert HDLC payload data in SF/AMI mode. */ /* HDLC stuff bits satisfy T1 pulse density. */ if (FORMAT_T1SF) set_mii16_bits(sc, MII16_T1_INVERT); else clr_mii16_bits(sc, MII16_T1_INVERT); /* Set the transmitter output impedance. */ if (FORMAT_E1ANY) set_mii16_bits(sc, MII16_T1_Z); /* 001:CR0 -- Control Register 0 - T1/E1 and frame format */ write_framer(sc, Bt8370_CR0, sc->config.format); /* 002:JAT_CR -- Jitter Attenuator Control Register */ if (sc->config.tx_clk_src == CFG_CLKMUX_RT) /* loop timing */ write_framer(sc, Bt8370_JAT_CR, 0xA3); /* JAT in RX path */ else { /* 64-bit elastic store; free-running JCLK and CLADO */ write_framer(sc, Bt8370_JAT_CR, 0x4B); /* assert jcenter */ write_framer(sc, Bt8370_JAT_CR, 0x43); /* release jcenter */ } /* 00C-013:IERn -- Interrupt Enable Registers */ for (i=Bt8370_IER7; i<=Bt8370_IER0; i++) write_framer(sc, i, 0); /* no interrupts; polled */ /* 014:LOOP -- loopbacks */ if (sc->config.loop_back == CFG_LOOP_PAYLOAD) write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); else if (sc->config.loop_back == CFG_LOOP_LINE) write_framer(sc, Bt8370_LOOP, LOOP_LINE); else if (sc->config.loop_back == CFG_LOOP_OTHER) write_framer(sc, Bt8370_LOOP, LOOP_ANALOG); else if (sc->config.loop_back == CFG_LOOP_INWARD) write_framer(sc, Bt8370_LOOP, LOOP_FRAMER); else if (sc->config.loop_back == CFG_LOOP_DUAL) write_framer(sc, Bt8370_LOOP, LOOP_DUAL); else write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ /* 015:DL3_TS -- Data Link 3 */ write_framer(sc, Bt8370_DL3_TS, 0x00); /* disabled */ /* 018:PIO -- Programmable I/O */ write_framer(sc, Bt8370_PIO, 0xFF); /* all pins are outputs */ /* 019:POE -- Programmable Output Enable */ write_framer(sc, Bt8370_POE, 0x00); /* all outputs are enabled */ /* 01A;CMUX -- Clock Input Mux */ if (sc->config.tx_clk_src == CFG_CLKMUX_EXT) write_framer(sc, Bt8370_CMUX, 0x0C); /* external timing */ else write_framer(sc, Bt8370_CMUX, 0x0F); /* internal timing */ /* 020:LIU_CR -- Line Interface Unit Config Register */ write_framer(sc, Bt8370_LIU_CR, 0xC1); /* reset LIU, squelch */ /* 022:RLIU_CR -- RX Line Interface Unit Config Reg */ /* Errata sheet says don't use freeze-short, but we do anyway! */ write_framer(sc, Bt8370_RLIU_CR, 0xB1); /* AGC=2048, Long Eye */ /* Select Rx sensitivity based on cable length. */ if ((gain = sc->config.rx_gain) == CFG_GAIN_AUTO) { if (sc->config.cable_len > 2000) gain = CFG_GAIN_EXTEND; else if (sc->config.cable_len > 1000) gain = CFG_GAIN_LONG; else if (sc->config.cable_len > 100) gain = CFG_GAIN_MEDIUM; else gain = CFG_GAIN_SHORT; } /* 024:VGA_MAX -- Variable Gain Amplifier Max gain */ write_framer(sc, Bt8370_VGA_MAX, gain); /* 028:PRE_EQ -- Pre Equalizer */ if (gain == CFG_GAIN_EXTEND) write_framer(sc, Bt8370_PRE_EQ, 0xE6); /* ON; thresh 6 */ else write_framer(sc, Bt8370_PRE_EQ, 0xA6); /* OFF; thresh 6 */ /* 038-03C:GAINn -- RX Equalizer gain thresholds */ write_framer(sc, Bt8370_GAIN0, 0x24); write_framer(sc, Bt8370_GAIN1, 0x28); write_framer(sc, Bt8370_GAIN2, 0x2C); write_framer(sc, Bt8370_GAIN3, 0x30); write_framer(sc, Bt8370_GAIN4, 0x34); /* 040:RCR0 -- Receiver Control Register 0 */ if (FORMAT_T1ESF) write_framer(sc, Bt8370_RCR0, 0x05); /* B8ZS, 2/5 FErrs */ else if (FORMAT_T1SF) write_framer(sc, Bt8370_RCR0, 0x84); /* AMI, 2/5 FErrs */ else if (FORMAT_E1NONE) write_framer(sc, Bt8370_RCR0, 0x41); /* HDB3, rabort */ else if (FORMAT_E1CRC) write_framer(sc, Bt8370_RCR0, 0x09); /* HDB3, 3 FErrs or 915 CErrs */ else /* E1 no CRC */ write_framer(sc, Bt8370_RCR0, 0x19); /* HDB3, 3 FErrs */ /* 041:RPATT -- Receive Test Pattern configuration */ write_framer(sc, Bt8370_RPATT, 0x3E); /* looking for framed QRSS */ /* 042:RLB -- Receive Loop Back code detector config */ write_framer(sc, Bt8370_RLB, 0x09); /* 6 bits down; 5 bits up */ /* 043:LBA -- Loop Back Activate code */ write_framer(sc, Bt8370_LBA, 0x08); /* 10000 10000 10000 ... */ /* 044:LBD -- Loop Back Deactivate code */ write_framer(sc, Bt8370_LBD, 0x24); /* 100100 100100 100100 ... */ /* 045:RALM -- Receive Alarm signal configuration */ write_framer(sc, Bt8370_RALM, 0x0C); /* yel_intg rlof_intg */ /* 046:LATCH -- Alarm/Error/Counter Latch register */ write_framer(sc, Bt8370_LATCH, 0x1F); /* stop_cnt latch_{cnt,err,alm} */ /* Select Pulse Shape based on cable length (T1 only). */ if ((pulse = sc->config.tx_pulse) == CFG_PULSE_AUTO) { if (FORMAT_T1ANY) { if (sc->config.cable_len > 200) pulse = CFG_PULSE_T1CSU; else if (sc->config.cable_len > 160) pulse = CFG_PULSE_T1DSX4; else if (sc->config.cable_len > 120) pulse = CFG_PULSE_T1DSX3; else if (sc->config.cable_len > 80) pulse = CFG_PULSE_T1DSX2; else if (sc->config.cable_len > 40) pulse = CFG_PULSE_T1DSX1; else pulse = CFG_PULSE_T1DSX0; } else pulse = CFG_PULSE_E1TWIST; } /* Select Line Build Out based on cable length (T1CSU only). */ if ((lbo = sc->config.tx_lbo) == CFG_LBO_AUTO) { if (pulse == CFG_PULSE_T1CSU) { if (sc->config.cable_len > 1500) lbo = CFG_LBO_0DB; else if (sc->config.cable_len > 1000) lbo = CFG_LBO_7DB; else if (sc->config.cable_len > 500) lbo = CFG_LBO_15DB; else lbo = CFG_LBO_22DB; } else lbo = 0; } /* 068:TLIU_CR -- Transmit LIU Control Register */ write_framer(sc, Bt8370_TLIU_CR, (0x40 | (lbo & 0x30) | (pulse & 0x0E))); /* 070:TCR0 -- Transmit Framer Configuration */ write_framer(sc, Bt8370_TCR0, sc->config.format>>1); /* 071:TCR1 -- Transmitter Configuration */ if (FORMAT_T1SF) write_framer(sc, Bt8370_TCR1, 0x43); /* tabort, AMI PDV enforced */ else write_framer(sc, Bt8370_TCR1, 0x41); /* tabort, B8ZS or HDB3 */ /* 072:TFRM -- Transmit Frame format MYEL YEL MF FE CRC FBIT */ if (sc->config.format == CFG_FORMAT_T1ESF) write_framer(sc, Bt8370_TFRM, 0x0B); /* - YEL MF - CRC FBIT */ else if (sc->config.format == CFG_FORMAT_T1SF) write_framer(sc, Bt8370_TFRM, 0x19); /* - YEL MF - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FAS) write_framer(sc, Bt8370_TFRM, 0x11); /* - YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRC) write_framer(sc, Bt8370_TFRM, 0x1F); /* - YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCAS) write_framer(sc, Bt8370_TFRM, 0x31); /* MYEL YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRCCAS) write_framer(sc, Bt8370_TFRM, 0x3F); /* MYEL YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1NONE) write_framer(sc, Bt8370_TFRM, 0x00); /* NO FRAMING BITS AT ALL! */ /* 073:TERROR -- Transmit Error Insert */ write_framer(sc, Bt8370_TERROR, 0x00); /* no errors, please! */ /* 074:TMAN -- Transmit Manual Sa-byte/FEBE configuration */ write_framer(sc, Bt8370_TMAN, 0x00); /* none */ /* 075:TALM -- Transmit Alarm Signal Configuration */ if (FORMAT_E1ANY) write_framer(sc, Bt8370_TALM, 0x38); /* auto_myel auto_yel auto_ais */ else if (FORMAT_T1ANY) write_framer(sc, Bt8370_TALM, 0x18); /* auto_yel auto_ais */ /* 076:TPATT -- Transmit Test Pattern Configuration */ write_framer(sc, Bt8370_TPATT, 0x00); /* disabled */ /* 077:TLB -- Transmit Inband Loopback Code Configuration */ write_framer(sc, Bt8370_TLB, 0x00); /* disabled */ /* 090:CLAD_CR -- Clack Rate Adapter Configuration */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CLAD_CR, 0x06); /* loop filter gain 1/2^6 */ else write_framer(sc, Bt8370_CLAD_CR, 0x08); /* loop filter gain 1/2^8 */ /* 091:CSEL -- CLAD frequency Select */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CSEL, 0x55); /* 1544 kHz */ else write_framer(sc, Bt8370_CSEL, 0x11); /* 2048 kHz */ /* 092:CPHASE -- CLAD Phase detector */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CPHASE, 0x22); /* phase compare @ 386 kHz */ else write_framer(sc, Bt8370_CPHASE, 0x00); /* phase compare @ 2048 kHz */ if (FORMAT_T1ESF) /* BOP & PRM are enabled in T1ESF mode only. */ { /* 0A0:BOP -- Bit Oriented Protocol messages */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); /* 0A4:DL1_TS -- Data Link 1 Time Slot Enable */ write_framer(sc, Bt8370_DL1_TS, 0x40); /* FDL bits in odd frames */ /* 0A6:DL1_CTL -- Data Link 1 Control */ write_framer(sc, Bt8370_DL1_CTL, 0x03); /* FCS mode, TX on, RX on */ /* 0A7:RDL1_FFC -- Rx Data Link 1 Fifo Fill Control */ write_framer(sc, Bt8370_RDL1_FFC, 0x30); /* assert "near full" at 48 */ /* 0AA:PRM -- Performance Report Messages */ write_framer(sc, Bt8370_PRM, 0x80); } /* 0D0:SBI_CR -- System Bus Interface Configuration Register */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_SBI_CR, 0x47); /* 1.544 with 24 TS +Fbits */ else write_framer(sc, Bt8370_SBI_CR, 0x46); /* 2.048 with 32 TS */ /* 0D1:RSB_CR -- Receive System Bus Configuration Register */ /* Change RINDO & RFSYNC on falling edge of RSBCLKI. */ write_framer(sc, Bt8370_RSB_CR, 0x70); /* 0D2,0D3:RSYNC_{TS,BIT} -- Receive frame Sync offset */ write_framer(sc, Bt8370_RSYNC_BIT, 0x00); write_framer(sc, Bt8370_RSYNC_TS, 0x00); /* 0D4:TSB_CR -- Transmit System Bus Configuration Register */ /* Change TINDO & TFSYNC on falling edge of TSBCLKI. */ write_framer(sc, Bt8370_TSB_CR, 0x30); /* 0D5,0D6:TSYNC_{TS,BIT} -- Transmit frame Sync offset */ write_framer(sc, Bt8370_TSYNC_BIT, 0x00); write_framer(sc, Bt8370_TSYNC_TS, 0x00); /* 0D7:RSIG_CR -- Receive SIGnalling Configuratin Register */ write_framer(sc, Bt8370_RSIG_CR, 0x00); /* Assign and configure 64Kb TIME SLOTS. */ /* TS24..TS1 must be assigned for T1, TS31..TS0 for E1. */ /* Timeslots with no user data have RINDO and TINDO off. */ for (i=0; i<32; i++) { /* 0E0-0FF:SBCn -- System Bus Per-Channel Control */ if (FORMAT_T1ANY && (i==0 || i>24)) write_framer(sc, Bt8370_SBCn +i, 0x00); /* not assigned in T1 mode */ else if (FORMAT_E1ANY && (i==0) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS0 o/h bits */ else if (FORMAT_E1CAS && (i==16) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS16 o/h bits */ else if ((sc->config.time_slots & (1<config.time_slots & (1<>4, read_framer(sc, Bt8370_DID)&0x0F); } /* Called once a second; must not sleep. */ static int t1_watchdog(softc_t *sc) { u_int16_t LCV = 0, FERR = 0, CRC = 0, FEBE = 0; u_int8_t alm1, alm3, loop, isr0; int link_status = STATUS_UP; int i; /* Read the alarm registers */ alm1 = read_framer(sc, Bt8370_ALM1); alm3 = read_framer(sc, Bt8370_ALM3); loop = read_framer(sc, Bt8370_LOOP); isr0 = read_framer(sc, Bt8370_ISR0); /* Always ignore the SIGFRZ alarm bit, */ alm1 &= ~ALM1_SIGFRZ; if (FORMAT_T1ANY) /* ignore RYEL in T1 modes */ alm1 &= ~ALM1_RYEL; else if (FORMAT_E1NONE) /* ignore all alarms except LOS */ alm1 &= ALM1_RLOS; /* Software is alive. */ led_inv(sc, MII16_T1_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((alm1 & ALM1_RAIS)!=0) /* receiving ais */ led_on(sc, MII16_T1_LED_BLU); else if ((alm1 & ALM1_RLOS)!=0) /* sending ais */ led_inv(sc, MII16_T1_LED_BLU); else led_off(sc, MII16_T1_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((alm1 & (ALM1_RMYEL | ALM1_RYEL))!=0) /* receiving rai */ led_on(sc, MII16_T1_LED_YEL); else if ((alm1 & ALM1_RLOF)!=0) /* sending rai */ led_inv(sc, MII16_T1_LED_YEL); else led_off(sc, MII16_T1_LED_YEL); /* If any alarm bits are set then the link is 'down'. */ /* The bad bits are: rmyel ryel rais ralos rlos rlof. */ /* Some alarm bits have been masked by this point. */ if (alm1 != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_T1_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_T1_LED_RED); else led_off(sc, MII16_T1_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && (alm1 != sc->last_alm1)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOF=%s RLOS=%s RALOS=%s RAIS=%s RYEL=%s RMYEL=%s\n", NAME_UNIT, (alm1 & ALM1_RLOF) ? on : off, (alm1 & ALM1_RLOS) ? on : off, (alm1 & ALM1_RALOS) ? on : off, (alm1 & ALM1_RAIS) ? on : off, (alm1 & ALM1_RYEL) ? on : off, (alm1 & ALM1_RMYEL) ? on : off); } /* Check and print error counters if non-zero. */ LCV = read_framer(sc, Bt8370_LCV_LO) + (read_framer(sc, Bt8370_LCV_HI)<<8); if (!FORMAT_E1NONE) FERR = read_framer(sc, Bt8370_FERR_LO) + (read_framer(sc, Bt8370_FERR_HI)<<8); if (FORMAT_E1CRC || FORMAT_T1ESF) CRC = read_framer(sc, Bt8370_CRC_LO) + (read_framer(sc, Bt8370_CRC_HI)<<8); if (FORMAT_E1CRC) FEBE = read_framer(sc, Bt8370_FEBE_LO) + (read_framer(sc, Bt8370_FEBE_HI)<<8); /* Only LCV is valid if Out-Of-Frame */ if (FORMAT_E1NONE) FERR = CRC = FEBE = 0; if ((DRIVER_DEBUG) && (LCV || FERR || CRC || FEBE)) printf("%s: LCV=%u FERR=%u CRC=%u FEBE=%u\n", NAME_UNIT, LCV, FERR, CRC, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += LCV; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.crc_errs += CRC; sc->status.cntrs.febe_errs += FEBE; /* Check for BOP messages in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR1) & 0x80)) { u_int8_t bop_code = read_framer(sc, Bt8370_RBOP) & 0x3F; switch (bop_code) { case T1BOP_OOF: { if ((DRIVER_DEBUG) && ((sc->last_alm1 & ALM1_RMYEL)==0)) printf("%s: Receiving a 'yellow alarm' BOP msg\n", NAME_UNIT); break; } case T1BOP_LINE_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); sc->loop_timer = 305; break; } case T1BOP_LINE_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); sc->loop_timer = 0; break; } case T1BOP_PAY_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); sc->loop_timer = 305; break; } case T1BOP_PAY_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_PAYLOAD); sc->loop_timer = 0; break; } default: { if (DRIVER_DEBUG) printf("%s: Received a type 0x%02X BOP msg\n", NAME_UNIT, bop_code); break; } } } /* Check for HDLC pkts in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR2) & 0x70)) { /* while (not fifo-empty && not start-of-msg) flush fifo */ while ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0) read_framer(sc, Bt8370_RDL1); /* If (not fifo-empty), then begin processing fifo contents. */ if ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0x08) { u_int8_t msg[64]; u_int8_t stat = read_framer(sc, Bt8370_RDL1); sc->status.cntrs.fdl_pkts++; for (i=0; i<(stat & 0x3F); i++) msg[i] = read_framer(sc, Bt8370_RDL1); /* Is this FDL message a T1.403 performance report? */ if (((stat & 0x3F)==11) && ((msg[0]==0x38) || (msg[0]==0x3A)) && (msg[1]==1) && (msg[2]==3)) /* Copy 4 PRs from FDL pkt to SNMP struct. */ memcpy(sc->status.snmp.t1.prm, msg+3, 8); } } /* Check for inband loop up/down commands. */ if (FORMAT_T1ANY) { u_int8_t isr6 = read_framer(sc, Bt8370_ISR6); u_int8_t alarm2 = read_framer(sc, Bt8370_ALM2); u_int8_t tlb = read_framer(sc, Bt8370_TLB); /* Inband Code == Loop Up && On Transition && Inband Tx Inactive */ if ((isr6 & 0x40) && (alarm2 & 0x40) && ((tlb & 1)==0)) { /* CSU loop up is 10000 10000 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Up' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); /* Loop up */ sc->loop_timer = 305; } /* Inband Code == Loop Down && On Transition && Inband Tx Inactive */ if ((isr6 & 0x80) && (alarm2 & 0x80) && ((tlb & 1)==0)) { /* CSU loop down is 100 100 100 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Down' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); /* loop down */ sc->loop_timer = 0; } } /* Manually send Yellow Alarm BOP msgs. */ if (FORMAT_T1ESF) { u_int8_t isr7 = read_framer(sc, Bt8370_ISR7); if ((isr7 & 0x02) && (alm1 & 0x02)) /* RLOF on-transition */ { /* Start sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_CONT); write_framer(sc, Bt8370_TBOP, 0x00); /* send BOP; order matters */ } else if ((isr7 & 0x02) && ((alm1 & 0x02)==0)) /* RLOF off-transition */ { /* Stop sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); } } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if (loop != 0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, loop & ~(LOOP_PAYLOAD | LOOP_LINE)); } /* RX Test Pattern status */ if ((DRIVER_DEBUG) && (isr0 & 0x10)) printf("%s: RX Test Pattern Sync\n", NAME_UNIT); /* SNMP Error Counters */ sc->status.snmp.t1.lcv = LCV; sc->status.snmp.t1.fe = FERR; sc->status.snmp.t1.crc = CRC; sc->status.snmp.t1.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t1.line = 0; if (alm1 & ALM1_RMYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_TX_RAI; if (alm1 & ALM1_RAIS) sc->status.snmp.t1.line |= TLINE_RX_AIS; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_TX_AIS; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_LOF; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_LOS; if (alm3 & ALM3_RMAIS) sc->status.snmp.t1.line |= T1LINE_RX_TS16_AIS; if (alm3 & ALM3_SRED) sc->status.snmp.t1.line |= T1LINE_TX_TS16_LOMF; if (alm3 & ALM3_SEF) sc->status.snmp.t1.line |= T1LINE_SEF; if (isr0 & 0x10) sc->status.snmp.t1.line |= T1LINE_RX_TEST; if ((alm1 & ALM1_RMYEL) && (FORMAT_E1CAS)) sc->status.snmp.t1.line |= T1LINE_RX_TS16_LOMF; /* SNMP Loopback Status */ sc->status.snmp.t1.loop &= ~(TLOOP_FAR_LINE | TLOOP_FAR_PAYLOAD); if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_PAYLOAD) sc->status.snmp.t1.loop |= TLOOP_NEAR_PAYLOAD; if (loop & LOOP_LINE) sc->status.snmp.t1.loop |= TLOOP_NEAR_LINE; if (loop & LOOP_ANALOG) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_FRAMER) sc->status.snmp.t1.loop |= TLOOP_NEAR_INWARD; /* Remember this state until next time. */ sc->last_alm1 = alm1; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t1_send_bop(softc_t *sc, int bop_code) { u_int8_t bop; int i; /* The BOP transmitter could be sending a continuous */ /* BOP msg when told to send this BOP_25 message. */ /* So save and restore the state of the BOP machine. */ bop = read_framer(sc, Bt8370_BOP); write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_OFF); for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* send 25 repetitions of bop_code */ write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_25); write_framer(sc, Bt8370_TBOP, bop_code); /* order matters */ /* wait for tx to stop */ for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* Restore previous state of the BOP machine. */ write_framer(sc, Bt8370_BOP, bop); } /* IOCTL SYSCALL: can sleep. */ static int t1_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { switch (ioctl->data) { case TSEND_NORMAL: { write_framer(sc, Bt8370_TPATT, 0x00); /* tx pattern generator off */ write_framer(sc, Bt8370_RPATT, 0x00); /* rx pattern detector off */ write_framer(sc, Bt8370_TLB, 0x00); /* tx inband generator off */ break; } case TSEND_LINE: { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_UP); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x08); /* 10000 10000 ... */ write_framer(sc, Bt8370_TLB, 0x05); /* 5 bits, framed, start */ } sc->status.snmp.t1.loop |= TLOOP_FAR_LINE; break; } case TSEND_PAYLOAD: { t1_send_bop(sc, T1BOP_PAY_UP); sc->status.snmp.t1.loop |= TLOOP_FAR_PAYLOAD; break; } case TSEND_RESET: { if (sc->status.snmp.t1.loop == TLOOP_FAR_LINE) { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_DOWN); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x24); /* 100100 100100 ... */ write_framer(sc, Bt8370_TLB, 0x09); /* 6 bits, framed, start */ } sc->status.snmp.t1.loop &= ~TLOOP_FAR_LINE; } if (sc->status.snmp.t1.loop == TLOOP_FAR_PAYLOAD) { t1_send_bop(sc, T1BOP_PAY_DOWN); sc->status.snmp.t1.loop &= ~TLOOP_FAR_PAYLOAD; } break; } case TSEND_QRS: { write_framer(sc, Bt8370_TPATT, 0x1E); /* framed QRSS */ break; } default: { error = EINVAL; break; } } break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { u_int8_t new_loop = 0; if (ioctl->data == CFG_LOOP_NONE) new_loop = 0; else if (ioctl->data == CFG_LOOP_PAYLOAD) new_loop = LOOP_PAYLOAD; else if (ioctl->data == CFG_LOOP_LINE) new_loop = LOOP_LINE; else if (ioctl->data == CFG_LOOP_OTHER) new_loop = LOOP_ANALOG; else if (ioctl->data == CFG_LOOP_INWARD) new_loop = LOOP_FRAMER; else if (ioctl->data == CFG_LOOP_DUAL) new_loop = LOOP_DUAL; else error = EINVAL; if (error == 0) { write_framer(sc, Bt8370_LOOP, new_loop); sc->config.loop_back = ioctl->data; } break; } default: error = EINVAL; break; } return error; } static struct card hssi_card = { .config = hssi_config, .ident = hssi_ident, .watchdog = hssi_watchdog, .ioctl = hssi_ioctl, }; static struct card t3_card = { .config = t3_config, .ident = t3_ident, .watchdog = t3_watchdog, .ioctl = t3_ioctl, }; static struct card ssi_card = { .config = ssi_config, .ident = ssi_ident, .watchdog = ssi_watchdog, .ioctl = ssi_ioctl, }; static struct card t1_card = { .config = t1_config, .ident = t1_ident, .watchdog = t1_watchdog, .ioctl = t1_ioctl, }; /* RAWIP is raw IP packets (v4 or v6) in HDLC frames with NO HEADERS. */ /* No HDLC Address/Control fields! No line control protocol at all! */ /* rxintr_cleanup calls this to give a newly arrived pkt to higher levels. */ static void lmc_raw_input(struct ifnet *ifp, struct mbuf *mbuf) { softc_t *sc = IFP2SC(ifp); M_SETFIB(mbuf, ifp->if_fib); # if INET if (mbuf->m_data[0]>>4 == 4) netisr_dispatch(NETISR_IP, mbuf); else # endif # if INET6 if (mbuf->m_data[0]>>4 == 6) netisr_dispatch(NETISR_IPV6, mbuf); else # endif { m_freem(mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_input: rx pkt discarded: not IPv4 or IPv6\n", NAME_UNIT); } } /* * We are "standing on the head of a pin" in these routines. * Tulip CSRs can be accessed, but nothing else is interrupt-safe! * Do NOT access: MII, GPIO, SROM, BIOSROM, XILINX, SYNTH, or DAC. */ /* Singly-linked tail-queues hold mbufs with active DMA. * For RX, single mbuf clusters; for TX, mbuf chains are queued. * NB: mbufs are linked through their m_nextpkt field. * Callers must hold sc->bottom_lock; not otherwise locked. */ /* Put an mbuf (chain) on the tail of the descriptor ring queue. */ static void /* BSD version */ mbuf_enqueue(struct desc_ring *ring, struct mbuf *m) { m->m_nextpkt = NULL; if (ring->tail == NULL) ring->head = m; else ring->tail->m_nextpkt = m; ring->tail = m; } /* Get an mbuf (chain) from the head of the descriptor ring queue. */ static struct mbuf* /* BSD version */ mbuf_dequeue(struct desc_ring *ring) { struct mbuf *m = ring->head; if (m != NULL) if ((ring->head = m->m_nextpkt) == NULL) ring->tail = NULL; return m; } static void /* *** FreeBSD ONLY *** Callout from bus_dmamap_load() */ fbsd_dmamap_load(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct desc_ring *ring = arg; ring->nsegs = error ? 0 : nsegs; ring->segs[0] = segs[0]; ring->segs[1] = segs[1]; } /* Initialize a DMA descriptor ring. */ static int /* BSD version */ create_ring(softc_t *sc, struct desc_ring *ring, int num_descs) { struct dma_desc *descs; int size_descs = sizeof(struct dma_desc)*num_descs; int i, error = 0; /* The DMA descriptor array must not cross a page boundary. */ if (size_descs > PAGE_SIZE) { printf("%s: DMA descriptor array > PAGE_SIZE (%d)\n", NAME_UNIT, (u_int)PAGE_SIZE); return EINVAL; } /* Create a DMA tag for descriptors and buffers. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 2, PAGE_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &ring->tag))) { printf("%s: bus_dma_tag_create() failed: error %d\n", NAME_UNIT, error); return error; } /* Allocate wired physical memory for DMA descriptor array */ /* and map physical address to kernel virtual address. */ if ((error = bus_dmamem_alloc(ring->tag, (void**)&ring->first, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->map))) { printf("%s: bus_dmamem_alloc() failed; error %d\n", NAME_UNIT, error); return error; } descs = ring->first; /* Map kernel virtual address to PCI address for DMA descriptor array. */ if ((error = bus_dmamap_load(ring->tag, ring->map, descs, size_descs, fbsd_dmamap_load, ring, 0))) { printf("%s: bus_dmamap_load() failed; error %d\n", NAME_UNIT, error); return error; } ring->dma_addr = ring->segs[0].ds_addr; /* Allocate dmamaps for each DMA descriptor. */ for (i=0; itag, 0, &descs[i].map))) { printf("%s: bus_dmamap_create() failed; error %d\n", NAME_UNIT, error); return error; } ring->read = descs; ring->write = descs; ring->first = descs; ring->last = descs + num_descs -1; ring->last->control = TLP_DCTL_END_RING; ring->num_descs = num_descs; ring->size_descs = size_descs; ring->head = NULL; ring->tail = NULL; return 0; } /* Destroy a DMA descriptor ring */ static void /* BSD version */ destroy_ring(softc_t *sc, struct desc_ring *ring) { struct dma_desc *desc; struct mbuf *m; /* Free queued mbufs. */ while ((m = mbuf_dequeue(ring)) != NULL) m_freem(m); /* TX may have one pkt that is not on any queue. */ if (sc->tx_mbuf != NULL) { m_freem(sc->tx_mbuf); sc->tx_mbuf = NULL; } /* Unmap active DMA descriptors. */ while (ring->read != ring->write) { bus_dmamap_unload(ring->tag, ring->read->map); if (ring->read++ == ring->last) ring->read = ring->first; } /* Free the dmamaps of all DMA descriptors. */ for (desc=ring->first; desc!=ring->last+1; desc++) if (desc->map != NULL) bus_dmamap_destroy(ring->tag, desc->map); /* Unmap PCI address for DMA descriptor array. */ if (ring->dma_addr != 0) bus_dmamap_unload(ring->tag, ring->map); /* Free kernel memory for DMA descriptor array. */ if (ring->first != NULL) bus_dmamem_free(ring->tag, ring->first, ring->map); /* Free the DMA tag created for this ring. */ if (ring->tag != NULL) bus_dma_tag_destroy(ring->tag); } /* Clean up after a packet has been received. */ static int /* BSD version */ rxintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *first_desc, *last_desc; struct mbuf *first_mbuf=NULL, *last_mbuf=NULL; struct mbuf *new_mbuf; int pkt_len, desc_len; #if defined(DEVICE_POLLING) /* Input packet flow control (livelock prevention): */ /* Give pkts to higher levels only if quota is > 0. */ if (sc->quota <= 0) return 0; #endif /* This looks complicated, but remember: typically packets up */ /* to 2048 bytes long fit in one mbuf and use one descriptor. */ first_desc = last_desc = ring->read; /* ASSERTION: If there is a descriptor in the ring and the hardware has */ /* finished with it, then that descriptor will have RX_FIRST_DESC set. */ if ((ring->read != ring->write) && /* descriptor ring not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0) && /* hardware done */ ((ring->read->status & TLP_DSTS_RX_FIRST_DESC) == 0)) /* should be set */ panic("%s: rxintr_cleanup: rx-first-descriptor not set.\n", NAME_UNIT); /* First decide if a complete packet has arrived. */ /* Run down DMA descriptors looking for one marked "last". */ /* Bail out if an active descriptor is encountered. */ /* Accumulate most significant bits of packet length. */ pkt_len = 0; for (;;) { if (last_desc == ring->write) return 0; /* no more descs */ if (last_desc->status & TLP_DSTS_OWNER) return 0; /* still active */ if (last_desc->status & TLP_DSTS_RX_LAST_DESC) break; /* end of packet */ pkt_len += last_desc->length1 + last_desc->length2; /* entire desc filled */ if (last_desc++->control & TLP_DCTL_END_RING) last_desc = ring->first; /* ring wrap */ } /* A complete packet has arrived; how long is it? */ /* H/w ref man shows RX pkt length as a 14-bit field. */ /* An experiment found that only the 12 LSBs work. */ if (((last_desc->status>>16)&0xFFF) == 0) pkt_len += 4096; /* carry-bit */ pkt_len = (pkt_len & 0xF000) + ((last_desc->status>>16) & 0x0FFF); /* Subtract the CRC length unless doing so would underflow. */ if (pkt_len >= sc->config.crc_len) pkt_len -= sc->config.crc_len; /* Run down DMA descriptors again doing the following: * 1) put pkt info in pkthdr of first mbuf, * 2) link mbufs, * 3) set mbuf lengths. */ first_desc = ring->read; do { /* Read a DMA descriptor from the ring. */ last_desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* Dequeue the corresponding cluster mbuf. */ new_mbuf = mbuf_dequeue(ring); if (new_mbuf == NULL) panic("%s: rxintr_cleanup: expected an mbuf\n", NAME_UNIT); desc_len = last_desc->length1 + last_desc->length2; /* If bouncing, copy bounce buf to mbuf. */ DMA_SYNC(last_desc->map, desc_len, BUS_DMASYNC_POSTREAD); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, last_desc->map); /* 1) Put pkt info in pkthdr of first mbuf. */ if (last_desc == first_desc) { first_mbuf = new_mbuf; first_mbuf->m_pkthdr.len = pkt_len; /* total pkt length */ first_mbuf->m_pkthdr.rcvif = sc->ifp; /* how it got here */ } else /* 2) link mbufs. */ { last_mbuf->m_next = new_mbuf; /* M_PKTHDR should be set in the first mbuf only. */ new_mbuf->m_flags &= ~M_PKTHDR; } last_mbuf = new_mbuf; /* 3) Set mbuf lengths. */ new_mbuf->m_len = (pkt_len >= desc_len) ? desc_len : pkt_len; pkt_len -= new_mbuf->m_len; } while ((last_desc->status & TLP_DSTS_RX_LAST_DESC) == 0); /* Decide whether to accept or to discard this packet. */ /* RxHDLC sets MIIERR for bad CRC, abort and partial byte at pkt end. */ if (((last_desc->status & TLP_DSTS_RX_BAD) == 0) && (sc->status.oper_status == STATUS_UP) && (first_mbuf->m_pkthdr.len > 0)) { /* Optimization: copy a small pkt into a small mbuf. */ if (first_mbuf->m_pkthdr.len <= COPY_BREAK) { MGETHDR(new_mbuf, M_NOWAIT, MT_DATA); if (new_mbuf != NULL) { new_mbuf->m_pkthdr.rcvif = first_mbuf->m_pkthdr.rcvif; new_mbuf->m_pkthdr.len = first_mbuf->m_pkthdr.len; new_mbuf->m_len = first_mbuf->m_len; memcpy(new_mbuf->m_data, first_mbuf->m_data, first_mbuf->m_pkthdr.len); m_freem(first_mbuf); first_mbuf = new_mbuf; } } /* Include CRC and one flag byte in input byte count. */ sc->status.cntrs.ibytes += first_mbuf->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.ipackets++; if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); LMC_BPF_MTAP(first_mbuf); #if defined(DEVICE_POLLING) sc->quota--; #endif /* Give this good packet to the network stacks. */ #if NETGRAPH if (sc->ng_hook != NULL) /* is hook connected? */ { int error; /* ignore error */ NG_SEND_DATA_ONLY(error, sc->ng_hook, first_mbuf); return 1; /* did something */ } #endif /* NETGRAPH */ if (sc->config.line_pkg == PKG_RAWIP) lmc_raw_input(sc->ifp, first_mbuf); else { #if NSPPP sppp_input(sc->ifp, first_mbuf); #elif P2P new_mbuf = first_mbuf; while (new_mbuf != NULL) { sc->p2p->p2p_hdrinput(sc->p2p, new_mbuf->m_data, new_mbuf->m_len); new_mbuf = new_mbuf->m_next; } sc->p2p->p2p_input(sc->p2p, NULL); m_freem(first_mbuf); #else m_freem(first_mbuf); sc->status.cntrs.idiscards++; #endif } } else if (sc->status.oper_status != STATUS_UP) { /* If the link is down, this packet is probably noise. */ m_freem(first_mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: rxintr_cleanup: rx pkt discarded: link down\n", NAME_UNIT); } else /* Log and discard this bad packet. */ { if (DRIVER_DEBUG) printf("%s: RX bad pkt; len=%d %s%s%s%s\n", NAME_UNIT, first_mbuf->m_pkthdr.len, (last_desc->status & TLP_DSTS_RX_MII_ERR) ? " miierr" : "", (last_desc->status & TLP_DSTS_RX_DRIBBLE) ? " dribble" : "", (last_desc->status & TLP_DSTS_RX_DESC_ERR) ? " descerr" : "", (last_desc->status & TLP_DSTS_RX_OVERRUN) ? " overrun" : ""); if (last_desc->status & TLP_DSTS_RX_OVERRUN) sc->status.cntrs.fifo_over++; else sc->status.cntrs.ierrors++; m_freem(first_mbuf); } return 1; /* did something */ } /* Setup (prepare) to receive a packet. */ /* Try to keep the RX descriptor ring full of empty buffers. */ static int /* BSD version */ rxintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *desc; struct mbuf *m; int desc_len; int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->write == ring->last) ? ring->first : ring->write+1) == ring->read) return 0; /* ring is full; nothing to do */ /* Allocate a small mbuf and attach an mbuf cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MGETHDR() failed\n", NAME_UNIT); return 0; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MCLGET() failed\n", NAME_UNIT); return 0; } /* Queue the mbuf for later processing by rxintr_cleanup. */ mbuf_enqueue(ring, m); /* Write a DMA descriptor into the ring. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->write; /* Advance the ring write pointer. */ if (ring->write++ == ring->last) ring->write = ring->first; desc_len = (MCLBYTES < MAX_DESC_LEN) ? MCLBYTES : MAX_DESC_LEN; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, m->m_data, desc_len))) printf("%s: bus_dmamap_load(rx) failed; error %d\n", NAME_UNIT, error); /* Invalidate the cache for this mbuf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREREAD); /* Set up the DMA descriptor. */ desc->address1 = ring->segs[0].ds_addr; desc->length1 = desc_len>>1; desc->address2 = desc->address1 + desc->length1; desc->length2 = desc_len>>1; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptor to the hardware. */ desc->status = TLP_DSTS_OWNER; /* Notify the receiver that there is another buffer available. */ WRITE_CSR(TLP_RX_POLL, 1); return 1; /* did something */ } /* Clean up after a packet has been transmitted. */ /* Free the mbuf chain and update the DMA descriptor ring. */ static int /* BSD version */ txintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; while ((ring->read != ring->write) && /* while ring is not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0)) { /* Read a DMA descriptor from the ring. */ desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* This is a no-op on most architectures. */ DMA_SYNC(desc->map, desc->length1 + desc->length2, BUS_DMASYNC_POSTWRITE); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, desc->map); /* If this descriptor is the last segment of a packet, */ /* then dequeue and free the corresponding mbuf chain. */ if ((desc->control & TLP_DCTL_TX_LAST_SEG) != 0) { struct mbuf *m; if ((m = mbuf_dequeue(ring)) == NULL) panic("%s: txintr_cleanup: expected an mbuf\n", NAME_UNIT); /* Include CRC and one flag byte in output byte count. */ sc->status.cntrs.obytes += m->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.opackets++; if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); LMC_BPF_MTAP(m); /* The only bad TX status is fifo underrun. */ if ((desc->status & TLP_DSTS_TX_UNDERRUN) != 0) sc->status.cntrs.fifo_under++; m_freem(m); return 1; /* did something */ } } return 0; } /* Build DMA descriptors for a transmit packet mbuf chain. */ static int /* 0=success; 1=error */ /* BSD version */ txintr_setup_mbuf(softc_t *sc, struct mbuf *m) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; unsigned int desc_len; /* build DMA descriptors for a chain of mbufs. */ while (m != NULL) { char *data = m->m_data; int length = m->m_len; /* zero length mbufs happen! */ /* Build DMA descriptors for one mbuf. */ while (length > 0) { int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->temp==ring->last) ? ring->first : ring->temp+1) == ring->read) { /* Not enough DMA descriptors; try later. */ for (; ring->temp!=ring->write; ring->temp = (ring->temp==ring->first)? ring->last : ring->temp-1) bus_dmamap_unload(ring->tag, ring->temp->map); sc->status.cntrs.txdma++; return 1; } /* Provisionally, write a descriptor into the ring. */ /* But don't change the REAL ring write pointer. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->temp; /* Advance the temporary ring write pointer. */ if (ring->temp++ == ring->last) ring->temp = ring->first; /* Clear all control bits except the END_RING bit. */ desc->control &= TLP_DCTL_END_RING; /* Don't pad short packets up to 64 bytes. */ desc->control |= TLP_DCTL_TX_NO_PAD; /* Use Tulip's CRC-32 generator, if appropriate. */ if (sc->config.crc_len != CFG_CRC_32) desc->control |= TLP_DCTL_TX_NO_CRC; /* Set the OWNER bit, except in the first descriptor. */ if (desc != ring->write) desc->status = TLP_DSTS_OWNER; desc_len = (length > MAX_CHUNK_LEN) ? MAX_CHUNK_LEN : length; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, data, desc_len))) printf("%s: bus_dmamap_load(tx) failed; error %d\n", NAME_UNIT, error); /* Flush the cache and if bouncing, copy mbuf to bounce buf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREWRITE); /* Prevent wild fetches if mapping fails (nsegs==0). */ desc->length1 = desc->length2 = 0; desc->address1 = desc->address2 = 0; { bus_dma_segment_t *segs = ring->segs; int nsegs = ring->nsegs; if (nsegs >= 1) { desc->address1 = segs[0].ds_addr; desc->length1 = segs[0].ds_len; } if (nsegs == 2) { desc->address2 = segs[1].ds_addr; desc->length2 = segs[1].ds_len; } } data += desc_len; length -= desc_len; } /* while (length > 0) */ m = m->m_next; } /* while (m != NULL) */ return 0; /* success */ } /* Setup (prepare) to transmit a packet. */ /* Select a packet, build DMA descriptors and give packet to hardware. */ /* If DMA descriptors run out, abandon the attempt and return 0. */ static int /* BSD version */ txintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *first_desc, *last_desc; /* Protect against half-up links: Don't transmit */ /* if the receiver can't hear the far end. */ if (sc->status.oper_status != STATUS_UP) return 0; /* Pick a packet to transmit. */ #if NETGRAPH if ((sc->ng_hook != NULL) && (sc->tx_mbuf == NULL)) { if (!IFQ_IS_EMPTY(&sc->ng_fastq)) IFQ_DEQUEUE(&sc->ng_fastq, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ng_sndq, sc->tx_mbuf); } else #endif if (sc->tx_mbuf == NULL) { if (sc->config.line_pkg == PKG_RAWIP) IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); else { #if NSPPP sc->tx_mbuf = sppp_dequeue(sc->ifp); #elif P2P if (!IFQ_IS_EMPTY(&sc->p2p->p2p_isnd)) IFQ_DEQUEUE(&sc->p2p->p2p_isnd, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); #endif } } if (sc->tx_mbuf == NULL) return 0; /* no pkt to transmit */ /* Build DMA descriptors for an outgoing mbuf chain. */ ring->temp = ring->write; /* temporary ring write pointer */ if (txintr_setup_mbuf(sc, sc->tx_mbuf) != 0) return 0; /* Enqueue the mbuf; txintr_cleanup will free it. */ mbuf_enqueue(ring, sc->tx_mbuf); /* The transmitter has room for another packet. */ sc->tx_mbuf = NULL; /* Set first & last segment bits. */ /* last_desc is the desc BEFORE the one pointed to by ring->temp. */ first_desc = ring->write; first_desc->control |= TLP_DCTL_TX_FIRST_SEG; last_desc = (ring->temp==ring->first)? ring->last : ring->temp-1; last_desc->control |= TLP_DCTL_TX_LAST_SEG; /* Interrupt at end-of-transmission? Why bother the poor computer! */ /* last_desc->control |= TLP_DCTL_TX_INTERRUPT; */ /* Make sure the OWNER bit is not set in the next descriptor. */ /* The OWNER bit may have been set if a previous call aborted. */ ring->temp->status = 0; /* Commit the DMA descriptors to the software. */ ring->write = ring->temp; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptors to the hardware. */ first_desc->status = TLP_DSTS_OWNER; /* Notify the transmitter that there is another packet to send. */ WRITE_CSR(TLP_TX_POLL, 1); return 1; /* did something */ } static void check_intr_status(softc_t *sc) { u_int32_t status, cfcs, op_mode; u_int32_t missed, overruns; /* Check for four unusual events: * 1) fatal PCI bus errors - some are recoverable * 2) transmitter FIFO underruns - increase fifo threshold * 3) receiver FIFO overruns - clear potential hangup * 4) no receive descs or bufs - count missed packets */ /* 1) A fatal bus error causes a Tulip to stop initiating bus cycles. */ /* Module unload/load or boot are the only fixes for Parity Errors. */ /* Master and Target Aborts can be cleared and life may continue. */ status = READ_CSR(TLP_STATUS); if ((status & TLP_STAT_FATAL_ERROR) != 0) { u_int32_t fatal = (status & TLP_STAT_FATAL_BITS)>>TLP_STAT_FATAL_SHIFT; printf("%s: FATAL PCI BUS ERROR: %s%s%s%s\n", NAME_UNIT, (fatal == 0) ? "PARITY ERROR" : "", (fatal == 1) ? "MASTER ABORT" : "", (fatal == 2) ? "TARGET ABORT" : "", (fatal >= 3) ? "RESERVED (?)" : ""); cfcs = READ_PCI_CFG(sc, TLP_CFCS); /* try to clear it */ cfcs &= ~(TLP_CFCS_MSTR_ABORT | TLP_CFCS_TARG_ABORT); WRITE_PCI_CFG(sc, TLP_CFCS, cfcs); } /* 2) If the transmitter fifo underruns, increase the transmit fifo */ /* threshold: the number of bytes required to be in the fifo */ /* before starting the transmitter (cost: increased tx delay). */ /* The TX_FSM must be stopped to change this parameter. */ if ((status & TLP_STAT_TX_UNDERRUN) != 0) { op_mode = READ_CSR(TLP_OP_MODE); /* enable store-and-forward mode if tx_threshold tops out? */ if ((op_mode & TLP_OP_TX_THRESH) < TLP_OP_TX_THRESH) { op_mode += 0x4000; /* increment TX_THRESH field; can't overflow */ WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_TX_RUN); /* Wait for the TX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_TX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart tx */ if (DRIVER_DEBUG) printf("%s: tx underrun; tx fifo threshold now %d bytes\n", NAME_UNIT, 128<<((op_mode>>TLP_OP_TR_SHIFT)&3)); } } /* 3) Errata memo from Digital Equipment Corp warns that 21140A */ /* receivers through rev 2.2 can hang if the fifo overruns. */ /* Recommended fix: stop and start the RX FSM after an overrun. */ missed = READ_CSR(TLP_MISSED); if ((overruns = ((missed & TLP_MISS_OVERRUN)>>TLP_OVERRUN_SHIFT)) != 0) { if (DRIVER_DEBUG) printf("%s: rx overrun cntr=%d\n", NAME_UNIT, overruns); sc->status.cntrs.overruns += overruns; if ((READ_PCI_CFG(sc, TLP_CFRV) & 0xFF) <= 0x22) { op_mode = READ_CSR(TLP_OP_MODE); WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_RX_RUN); /* Wait for the RX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_RX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart rx */ } } /* 4) When the receiver is enabled and a packet arrives, but no DMA */ /* descriptor is available, the packet is counted as 'missed'. */ /* The receiver should never miss packets; warn if it happens. */ if ((missed = (missed & TLP_MISS_MISSED)) != 0) { if (DRIVER_DEBUG) printf("%s: rx missed %d pkts\n", NAME_UNIT, missed); sc->status.cntrs.missed += missed; } } static void /* This is where the work gets done. */ core_interrupt(void *arg, int check_status) { softc_t *sc = arg; int activity; /* If any CPU is inside this critical section, then */ /* other CPUs should go away without doing anything. */ if (BOTTOM_TRYLOCK == 0) { sc->status.cntrs.lck_intr++; return; } /* Clear pending card interrupts. */ WRITE_CSR(TLP_STATUS, READ_CSR(TLP_STATUS)); /* In Linux, pci_alloc_consistent() means DMA descriptors */ /* don't need explicit syncing. */ { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } do /* This is the main loop for interrupt processing. */ { activity = txintr_cleanup(sc); activity += txintr_setup(sc); activity += rxintr_cleanup(sc); activity += rxintr_setup(sc); } while (activity); { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* As the interrupt is dismissed, check for four unusual events. */ if (check_status) check_intr_status(sc); BOTTOM_UNLOCK; } /* user_interrupt() may be called from a syscall or a softirq */ static void user_interrupt(softc_t *sc, int check_status) { DISABLE_INTR; /* noop on FreeBSD-5 and Linux */ core_interrupt(sc, check_status); ENABLE_INTR; /* noop on FreeBSD-5 and Linux */ } # if defined(DEVICE_POLLING) /* Service the card from the kernel idle loop without interrupts. */ static int fbsd_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { softc_t *sc = IFP2SC(ifp); sc->quota = count; core_interrupt(sc, (cmd==POLL_AND_CHECK_STATUS)); return 0; } # endif /* DEVICE_POLLING */ /* BSD kernels call this procedure when an interrupt happens. */ static intr_return_t bsd_interrupt(void *arg) { softc_t *sc = arg; /* Cut losses early if this is not our interrupt. */ if ((READ_CSR(TLP_STATUS) & TLP_INT_TXRX) == 0) return IRQ_NONE; # if defined(DEVICE_POLLING) if (sc->ifp->if_capenable & IFCAP_POLLING) return IRQ_NONE; if ((sc->ifp->if_capabilities & IFCAP_POLLING) && (ether_poll_register(fbsd_poll, sc->ifp))) { WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); return IRQ_NONE; } else sc->quota = sc->rxring.num_descs; /* input flow control */ # endif /* DEVICE_POLLING */ /* Disable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); core_interrupt(sc, 0); /* Enable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_TXRX); return IRQ_HANDLED; } /* Administrative status of the driver (UP or DOWN) has changed. */ /* A card-specific action may be required: T1 and T3 cards: no-op. */ /* HSSI and SSI cards change the state of modem ready signals. */ static void set_status(softc_t *sc, int status) { struct ioctl ioctl; ioctl.cmd = IOCTL_SET_STATUS; ioctl.data = status; sc->card->ioctl(sc, &ioctl); } #if P2P /* Callout from P2P: */ /* Get the state of DCD (Data Carrier Detect). */ static int p2p_getmdm(struct p2pcom *p2p, caddr_t result) { softc_t *sc = IFP2SC(&p2p->p2p_if); /* Non-zero isn't good enough; TIOCM_CAR is 0x40. */ *(int *)result = (sc->status.oper_status==STATUS_UP) ? TIOCM_CAR : 0; return 0; } /* Callout from P2P: */ /* Set the state of DTR (Data Terminal Ready). */ static int p2p_mdmctl(struct p2pcom *p2p, int flag) { softc_t *sc = IFP2SC(&p2p->p2p_if); set_status(sc, flag); return 0; } #endif /* P2P */ #if NSPPP # ifndef PP_FR # define PP_FR 0 # endif /* Callout from SPPP: */ static void sppp_tls(struct sppp *sppp) { if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) sppp->pp_up(sppp); } /* Callout from SPPP: */ static void sppp_tlf(struct sppp *sppp) { if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) sppp->pp_down(sppp); } #endif /* NSPPP */ /* Configure line protocol stuff. * Called by attach_card() during module init. * Called by core_ioctl() when lmcconfig writes sc->config. * Called by detach_card() during module shutdown. */ static void config_proto(softc_t *sc, struct config *config) { /* Use line protocol stack instead of RAWIP mode. */ if ((sc->config.line_pkg == PKG_RAWIP) && (config->line_pkg != PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_attach(sc->ifp); LMC_BPF_ATTACH(DLT_PPP, 4); sc->sppp->pp_tls = sppp_tls; sc->sppp->pp_tlf = sppp_tlf; /* Force reconfiguration of SPPP params. */ sc->config.line_prot = 0; sc->config.keep_alive = config->keep_alive ? 0:1; #elif P2P int error = 0; sc->p2p->p2p_proto = 0; /* force p2p_attach */ if ((error = p2p_attach(sc->p2p))) /* calls bpfattach() */ { printf("%s: p2p_attach() failed; error %d\n", NAME_UNIT, error); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } else { sc->p2p->p2p_mdmctl = p2p_mdmctl; /* set DTR */ sc->p2p->p2p_getmdm = p2p_getmdm; /* get DCD */ } #elif GEN_HDLC int error = 0; sc->net_dev->mtu = HDLC_MAX_MTU; if ((error = hdlc_open(sc->net_dev))) { printf("%s: hdlc_open() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'sethdlc %s ppp'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } #else /* no line protocol stack was configured */ config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ #endif } /* Bypass line protocol stack and return to RAWIP mode. */ if ((sc->config.line_pkg != PKG_RAWIP) && (config->line_pkg == PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_flush(sc->ifp); sppp_detach(sc->ifp); setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); #elif P2P int error = 0; if_qflush(&sc->p2p->p2p_isnd); if ((error = p2p_detach(sc->p2p))) { printf("%s: p2p_detach() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'ifconfig %s down -remove'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_P2P; /* not in RAWIP mode; still attached to P2P */ } else { setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); } #elif GEN_HDLC hdlc_proto_detach(sc->hdlc_dev); hdlc_close(sc->net_dev); setup_netdev(sc->net_dev); #endif } #if NSPPP if (config->line_pkg != PKG_RAWIP) { /* Check for change to PPP protocol. */ if ((sc->config.line_prot != PROT_PPP) && (config->line_prot == PROT_PPP)) { LMC_BPF_DETACH; sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; LMC_BPF_ATTACH(DLT_PPP, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } # ifndef DLT_C_HDLC # define DLT_C_HDLC DLT_PPP # endif /* Check for change to C_HDLC protocol. */ if ((sc->config.line_prot != PROT_C_HDLC) && (config->line_prot == PROT_C_HDLC)) { LMC_BPF_DETACH; sc->ifp->if_flags |= IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; LMC_BPF_ATTACH(DLT_C_HDLC, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for change to Frame Relay protocol. */ if ((sc->config.line_prot != PROT_FRM_RLY) && (config->line_prot == PROT_FRM_RLY)) { LMC_BPF_DETACH; sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags |= PP_FR; LMC_BPF_ATTACH(DLT_FRELAY, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for disabling keep-alives. */ if ((sc->config.keep_alive != 0) && (config->keep_alive == 0)) sc->sppp->pp_flags &= ~PP_KEEPALIVE; /* Check for enabling keep-alives. */ if ((sc->config.keep_alive == 0) && (config->keep_alive != 0)) sc->sppp->pp_flags |= PP_KEEPALIVE; } #endif /* NSPPP */ /* Loop back through the TULIP Ethernet chip; (no CRC). */ /* Data sheet says stop DMA before changing OPMODE register. */ /* But that's not as simple as it sounds; works anyway. */ /* Check for enabling loopback thru Tulip chip. */ if ((sc->config.loop_back != CFG_LOOP_TULIP) && (config->loop_back == CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode |= TLP_OP_INT_LOOP; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_0; } /* Check for disabling loopback thru Tulip chip. */ if ((sc->config.loop_back == CFG_LOOP_TULIP) && (config->loop_back != CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode &= ~TLP_OP_LOOP_MODE; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_16; } } /* This is the core ioctl procedure. */ /* It handles IOCTLs from lmcconfig(8). */ /* It must not run when card watchdogs run. */ /* Called from a syscall (user context; no spinlocks). */ /* This procedure can SLEEP. */ static int core_ioctl(softc_t *sc, u_long cmd, caddr_t data) { struct iohdr *iohdr = (struct iohdr *) data; struct ioctl *ioctl = (struct ioctl *) data; struct status *status = (struct status *) data; struct config *config = (struct config *) data; int error = 0; /* All structs start with a string and a cookie. */ if (((struct iohdr *)data)->cookie != NGM_LMC_COOKIE) return EINVAL; while (TOP_TRYLOCK == 0) { sc->status.cntrs.lck_ioctl++; SLEEP(10000); /* yield? */ } switch (cmd) { case LMCIOCGSTAT: { *status = sc->status; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCGCFG: { *config = sc->config; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCSCFG: { if ((error = CHECK_CAP)) break; config_proto(sc, config); sc->config = *config; sc->card->config(sc); break; } case LMCIOCREAD: { if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } ioctl->data = READ_PCI_CFG(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } ioctl->data = READ_CSR(ioctl->address*TLP_CSR_STRIDE); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } ioctl->data = read_srom(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_BIOS) ioctl->data = read_bios(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_MII) ioctl->data = read_mii(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_FRAME) ioctl->data = read_framer(sc, ioctl->address); else error = EINVAL; break; } case LMCIOCWRITE: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } WRITE_PCI_CFG(sc, ioctl->address, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } WRITE_CSR(ioctl->address*TLP_CSR_STRIDE, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } write_srom(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_BIOS) { if (ioctl->address == 0) erase_bios(sc); write_bios(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_MII) write_mii(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_RW_FRAME) write_framer(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_WO_SYNTH) write_synth(sc, (struct synth *)&ioctl->data); else if (ioctl->cmd == IOCTL_WO_DAC) { write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, ioctl->data & 0xFFF); } else error = EINVAL; break; } case LMCIOCTL: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_XILINX_RESET) { reset_xilinx(sc); sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_ROM) { load_xilinx_from_rom(sc); /* can sleep */ sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_FILE) { /* load_xilinx_from_file() can sleep. */ error = load_xilinx_from_file(sc, ioctl->ucode, ioctl->data); if (error != 0) load_xilinx_from_rom(sc); /* try the rom */ sc->card->config(sc); set_status(sc, (error==0)); /* XXX */ } else if (ioctl->cmd == IOCTL_RESET_CNTRS) { memset(&sc->status.cntrs, 0, sizeof(struct event_cntrs)); microtime(&sc->status.cntrs.reset_time); } else error = sc->card->ioctl(sc, ioctl); /* can sleep */ break; } default: error = EINVAL; break; } TOP_UNLOCK; return error; } /* This is the core watchdog procedure. */ /* It calculates link speed, and calls the card-specific watchdog code. */ /* Calls interrupt() in case one got lost; also kick-starts the device. */ /* ioctl syscalls and card watchdog routines must be interlocked. */ /* This procedure must not sleep. */ static void core_watchdog(softc_t *sc) { /* Read and restart the Tulip timer. */ u_int32_t tx_speed = READ_CSR(TLP_TIMER); WRITE_CSR(TLP_TIMER, 0xFFFF); /* Measure MII clock using a timer in the Tulip chip. * This timer counts transmitter bits divided by 4096. * Since this is called once a second the math is easy. * This is only correct when the link is NOT sending pkts. * On a fully-loaded link, answer will be HALF actual rate. * Clock rate during pkt is HALF clk rate between pkts. * Measuring clock rate really measures link utilization! */ sc->status.tx_speed = (0xFFFF - (tx_speed & 0xFFFF)) << 12; /* The first status reset time is when the calendar clock is set. */ if (sc->status.cntrs.reset_time.tv_sec < 1000) microtime(&sc->status.cntrs.reset_time); /* Update hardware (operational) status. */ /* Call the card-specific watchdog routines. */ if (TOP_TRYLOCK != 0) { sc->status.oper_status = sc->card->watchdog(sc); /* Increment a counter which tells user-land */ /* observers that SNMP state has been updated. */ sc->status.ticks++; TOP_UNLOCK; } else sc->status.cntrs.lck_watch++; /* In case an interrupt gets lost... */ user_interrupt(sc, 1); } /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCAIFADDR: case SIOCSIFFLAGS: case SIOCSIFADDR: ifp->if_flags |= IFF_UP; /* a Unix tradition */ break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; break; default: error = EINVAL; break; } return error; } /* Called from a syscall (user context; no spinlocks). */ static int lmc_ifnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { softc_t *sc = IFP2SC(ifp); int error = 0; switch (cmd) { /* Catch the IOCTLs used by lmcconfig. */ case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: error = core_ioctl(sc, cmd, data); break; /* Pass the rest to the line protocol. */ default: if (sc->config.line_pkg == PKG_RAWIP) error = lmc_raw_ioctl(ifp, cmd, data); else # if NSPPP error = sppp_ioctl(ifp, cmd, data); # elif P2P error = p2p_ioctl(ifp, cmd, data); # else error = EINVAL; # endif break; } if (DRIVER_DEBUG && (error!=0)) printf("%s: lmc_ifnet_ioctl; cmd=0x%08lx error=%d\n", NAME_UNIT, cmd, error); return error; } /* Called from a syscall (user context; no spinlocks). */ static void lmc_ifnet_start(struct ifnet *ifp) { softc_t *sc = IFP2SC(ifp); /* Start the transmitter; incoming pkts are NOT processed. */ user_interrupt(sc, 0); } /* sppp and p2p replace this with their own proc. */ /* RAWIP mode is the only time this is used. */ /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { softc_t *sc = IFP2SC(ifp); int error = 0; /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } # if NETGRAPH /* Netgraph has priority over the ifnet kernel interface. */ if (sc->ng_hook != NULL) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: netgraph active\n", NAME_UNIT); return EBUSY; } # endif /* lmc_raw_output() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; IFQ_ENQUEUE(&ifp->if_snd, m, error); ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); if (DRIVER_DEBUG) printf("%s: lmc_raw_output: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* Called from a softirq once a second. */ static void lmc_watchdog(void *arg) { struct ifnet *ifp = arg; softc_t *sc = IFP2SC(ifp); u_int8_t old_oper_status = sc->status.oper_status; core_watchdog(sc); /* updates oper_status */ #if NETGRAPH if (sc->ng_hook != NULL) { sc->status.line_pkg = PKG_NG; sc->status.line_prot = 0; } else #endif if (sc->config.line_pkg == PKG_RAWIP) { sc->status.line_pkg = PKG_RAWIP; sc->status.line_prot = PROT_IP_HDLC; } else { # if P2P /* Notice change in link status. */ if ((old_oper_status != sc->status.oper_status) && (sc->p2p->p2p_modem)) (*sc->p2p->p2p_modem)(sc->p2p, sc->status.oper_status==STATUS_UP); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_P2P; switch (sc->ifp->if_type) { case IFT_PPP: sc->status.line_prot = PROT_PPP; break; case IFT_PTPSERIAL: sc->status.line_prot = PROT_C_HDLC; break; case IFT_FRELAY: sc->status.line_prot = PROT_FRM_RLY; break; default: sc->status.line_prot = 0; break; } # elif NSPPP /* Notice change in link status. */ if ((old_oper_status != STATUS_UP) && (sc->status.oper_status == STATUS_UP)) /* link came up */ sppp_tls(sc->sppp); if ((old_oper_status == STATUS_UP) && (sc->status.oper_status != STATUS_UP)) /* link went down */ sppp_tlf(sc->sppp); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_SPPP; if (sc->sppp->pp_flags & PP_FR) sc->status.line_prot = PROT_FRM_RLY; else if (sc->ifp->if_flags & IFF_LINK2) sc->status.line_prot = PROT_C_HDLC; else sc->status.line_prot = PROT_PPP; # else /* Suppress compiler warning. */ if (old_oper_status == STATUS_UP); # endif } ifp->if_baudrate = sc->status.tx_speed; if (sc->status.oper_status == STATUS_UP) ifp->if_link_state = LINK_STATE_UP; else ifp->if_link_state = LINK_STATE_DOWN; /* Call this procedure again after one second. */ callout_reset(&sc->callout, hz, lmc_watchdog, ifp); } static uint64_t lmc_get_counter(struct ifnet *ifp, ift_counter cnt) { softc_t *sc; struct event_cntrs *cntrs; sc = if_getsoftc(ifp); cntrs = &sc->status.cntrs; switch (cnt) { case IFCOUNTER_IPACKETS: return (cntrs->ipackets); case IFCOUNTER_OPACKETS: return (cntrs->opackets); case IFCOUNTER_IBYTES: return (cntrs->ibytes); case IFCOUNTER_OBYTES: return (cntrs->obytes); case IFCOUNTER_IERRORS: return (cntrs->ierrors); case IFCOUNTER_OERRORS: return (cntrs->oerrors); case IFCOUNTER_IQDROPS: return (cntrs->idiscards); default: return (if_get_counter_default(ifp, cnt)); } } static void setup_ifnet(struct ifnet *ifp) { softc_t *sc = ifp->if_softc; /* Initialize the generic network interface. */ ifp->if_flags = IFF_POINTOPOINT; ifp->if_flags |= IFF_RUNNING; ifp->if_ioctl = lmc_ifnet_ioctl; ifp->if_start = lmc_ifnet_start; /* sppp changes this */ ifp->if_output = lmc_raw_output; /* sppp & p2p change this */ ifp->if_input = lmc_raw_input; ifp->if_get_counter = lmc_get_counter; ifp->if_mtu = MAX_DESC_LEN; /* sppp & p2p change this */ ifp->if_type = IFT_PTPSERIAL; /* p2p changes this */ # if defined(DEVICE_POLLING) ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; # endif if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); } static int lmc_ifnet_attach(softc_t *sc) { sc->ifp = if_alloc(NSPPP ? IFT_PPP : IFT_OTHER); if (sc->ifp == NULL) return ENOMEM; # if NSPPP sc->sppp = sc->ifp->if_l2com; # elif P2P sc->ifp = &sc->p2pcom.p2p_if; sc->p2p = &sc->p2pcom; # endif /* Initialize the network interface struct. */ sc->ifp->if_softc = sc; setup_ifnet(sc->ifp); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ifp->if_snd, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ifp->if_snd); /* Attach to the ifnet kernel interface. */ if_attach(sc->ifp); /* Attach Berkeley Packet Filter. */ LMC_BPF_ATTACH(DLT_RAW, 0); callout_reset(&sc->callout, hz, lmc_watchdog, sc); return 0; } static void lmc_ifnet_detach(softc_t *sc) { # if defined(DEVICE_POLLING) if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); # endif /* Detach Berkeley Packet Filter. */ LMC_BPF_DETACH; /* Detach from the ifnet kernel interface. */ if_detach(sc->ifp); if_free(sc->ifp); } #if NETGRAPH /* These next two macros should be added to netgraph */ # define NG_TYPE_REF(type) atomic_add_int(&(type)->refs, 1) # define NG_TYPE_UNREF(type) \ do { \ if ((type)->refs == 1) \ ng_rmtype(type); \ else \ atomic_subtract_int(&(type)->refs, 1); \ } while (0) /* It is an error to construct new copies of this Netgraph node. */ /* All instances are constructed by ng_attach and are persistent. */ static int ng_constructor(node_p node) { return EINVAL; } /* Incoming Netgraph control message. */ static int ng_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg; struct ng_mesg *resp = NULL; softc_t *sc = NG_NODE_PRIVATE(node); int error = 0; NGI_GET_MSG(item, msg); if (msg->header.typecookie == NGM_LMC_COOKIE) { switch (msg->header.cmd) { case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: { /* Call the core ioctl procedure. */ error = core_ioctl(sc, msg->header.cmd, msg->data); if ((msg->header.cmd & IOC_OUT) != 0) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + IOCPARM_LEN(msg->header.cmd), M_NOWAIT); if (resp == NULL) error = ENOMEM; else memcpy(resp->data, msg->data, IOCPARM_LEN(msg->header.cmd)); } break; } default: error = EINVAL; break; } } else if ((msg->header.typecookie == NGM_GENERIC_COOKIE) && (msg->header.cmd == NGM_TEXT_STATUS)) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + NG_TEXTRESPONSE, M_NOWAIT); if (resp == NULL) error = ENOMEM; else { char *s = resp->data; sprintf(s, "Card type = <%s>\n" "This driver considers the link to be %s.\n" "Use lmcconfig to configure this interface.\n", sc->dev_desc, (sc->status.oper_status==STATUS_UP) ? "UP" : "DOWN"); resp->header.arglen = strlen(s) +1; } } else /* Netgraph should be able to read and write these * parameters with text-format control messages: * SSI HSSI T1E1 T3 * crc crc crc crc * loop loop loop loop * clksrc clksrc * dte dte format format * synth synth cablen cablen * cable timeslot scram * gain * pulse * lbo * Someday I'll implement this... */ error = EINVAL; /* Handle synchronous response. */ NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return error; } /* This is a persistent netgraph node. */ static int ng_shutdown(node_p node) { /* unless told to really die, bounce back to life */ if ((node->nd_flags & NG_REALLY_DIE)==0) node->nd_flags &= ~NG_INVALID; /* bounce back to life */ return 0; } /* ng_disconnect is the opposite of this procedure. */ static int ng_newhook(node_p node, hook_p hook, const char *name) { softc_t *sc = NG_NODE_PRIVATE(node); /* Hook name must be 'rawdata'. */ if (strncmp(name, "rawdata", 7) != 0) return EINVAL; /* Is our hook connected? */ if (sc->ng_hook != NULL) return EBUSY; /* Accept the hook. */ sc->ng_hook = hook; return 0; } /* Both ends have accepted their hooks and the links have been made. */ /* This is the last chance to reject the connection request. */ static int ng_connect(hook_p hook) { /* Probably not at splnet, force outward queueing. (huh?) */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return 0; /* always accept */ } /* Receive data in mbufs from another Netgraph node. */ /* Transmit an mbuf-chain on the communication link. */ /* This procedure is very similar to lmc_raw_output(). */ /* Called from a syscall (user context; no spinlocks). */ static int ng_rcvdata(hook_p hook, item_p item) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error = 0; struct mbuf *m; meta_p meta = NULL; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); /* This macro must not store into meta! */ NG_FREE_META(meta); /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } /* ng_rcvdata() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; if (meta==NULL) IFQ_ENQUEUE(&sc->ng_sndq, m, error); else IFQ_ENQUEUE(&sc->ng_fastq, m, error); ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* ng_newhook is the opposite of this procedure, not */ /* ng_connect, as you might expect from the names. */ static int ng_disconnect(hook_p hook) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); /* Disconnect the hook. */ sc->ng_hook = NULL; return 0; } static struct ng_type ng_type = { .version = NG_ABI_VERSION, .name = NG_LMC_NODE_TYPE, .mod_event = NULL, .constructor = ng_constructor, .rcvmsg = ng_rcvmsg, .close = NULL, .shutdown = ng_shutdown, .newhook = ng_newhook, .findhook = NULL, .connect = ng_connect, .rcvdata = ng_rcvdata, .disconnect = ng_disconnect, }; /* Attach to the Netgraph kernel interface (/sys/netgraph). * It is called once for each physical card during device attach. * This is effectively ng_constructor. */ static int ng_attach(softc_t *sc) { int error; /* If this node type is not known to Netgraph then register it. */ if (ng_type.refs == 0) /* or: if (ng_findtype(&ng_type) == NULL) */ { if ((error = ng_newtype(&ng_type))) { printf("%s: ng_newtype() failed; error %d\n", NAME_UNIT, error); return error; } } else NG_TYPE_REF(&ng_type); /* Call the superclass node constructor. */ if ((error = ng_make_node_common(&ng_type, &sc->ng_node))) { NG_TYPE_UNREF(&ng_type); printf("%s: ng_make_node_common() failed; error %d\n", NAME_UNIT, error); return error; } /* Associate a name with this netgraph node. */ if ((error = ng_name_node(sc->ng_node, NAME_UNIT))) { NG_NODE_UNREF(sc->ng_node); NG_TYPE_UNREF(&ng_type); printf("%s: ng_name_node() failed; error %d\n", NAME_UNIT, error); return error; } /* Initialize the send queue mutexes. */ mtx_init(&sc->ng_sndq.ifq_mtx, NAME_UNIT, "sndq", MTX_DEF); mtx_init(&sc->ng_fastq.ifq_mtx, NAME_UNIT, "fastq", MTX_DEF); /* Put a backpointer to the softc in the netgraph node. */ NG_NODE_SET_PRIVATE(sc->ng_node, sc); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ng_fastq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_fastq); IFQ_SET_MAXLEN(&sc->ng_sndq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_sndq); return 0; } static void ng_detach(softc_t *sc) { callout_drain(&sc->callout); mtx_destroy(&sc->ng_sndq.ifq_mtx); mtx_destroy(&sc->ng_fastq.ifq_mtx); ng_rmnode_self(sc->ng_node); /* free hook */ NG_NODE_UNREF(sc->ng_node); /* free node */ NG_TYPE_UNREF(&ng_type); } #endif /* NETGRAPH */ /* The next few procedures initialize the card. */ /* Returns 0 on success; error code on failure. */ static int startup_card(softc_t *sc) { int num_rx_descs, error = 0; u_int32_t tlp_bus_pbl, tlp_bus_cal, tlp_op_tr; u_int32_t tlp_cfdd, tlp_cfcs; u_int32_t tlp_cflt, tlp_csid, tlp_cfit; /* Make sure the COMMAND bits are reasonable. */ tlp_cfcs = READ_PCI_CFG(sc, TLP_CFCS); tlp_cfcs &= ~TLP_CFCS_MWI_ENABLE; tlp_cfcs |= TLP_CFCS_BUS_MASTER; tlp_cfcs |= TLP_CFCS_MEM_ENABLE; tlp_cfcs |= TLP_CFCS_IO_ENABLE; tlp_cfcs |= TLP_CFCS_PAR_ERROR; tlp_cfcs |= TLP_CFCS_SYS_ERROR; WRITE_PCI_CFG(sc, TLP_CFCS, tlp_cfcs); /* Set the LATENCY TIMER to the recommended value, */ /* and make sure the CACHE LINE SIZE is reasonable. */ tlp_cfit = READ_PCI_CFG(sc, TLP_CFIT); tlp_cflt = READ_PCI_CFG(sc, TLP_CFLT); tlp_cflt &= ~TLP_CFLT_LATENCY; tlp_cflt |= (tlp_cfit & TLP_CFIT_MAX_LAT)>>16; /* "prgmbl burst length" and "cache alignment" used below. */ switch(tlp_cflt & TLP_CFLT_CACHE) { case 8: /* 8 bytes per cache line */ { tlp_bus_pbl = 32; tlp_bus_cal = 1; break; } case 16: { tlp_bus_pbl = 32; tlp_bus_cal = 2; break; } case 32: { tlp_bus_pbl = 32; tlp_bus_cal = 3; break; } default: { tlp_bus_pbl = 32; tlp_bus_cal = 1; tlp_cflt &= ~TLP_CFLT_CACHE; tlp_cflt |= 8; break; } } WRITE_PCI_CFG(sc, TLP_CFLT, tlp_cflt); /* Make sure SNOOZE and SLEEP modes are disabled. */ tlp_cfdd = READ_PCI_CFG(sc, TLP_CFDD); tlp_cfdd &= ~TLP_CFDD_SLEEP; tlp_cfdd &= ~TLP_CFDD_SNOOZE; WRITE_PCI_CFG(sc, TLP_CFDD, tlp_cfdd); DELAY(11*1000); /* Tulip wakes up in 10 ms max */ /* Software Reset the Tulip chip; stops DMA and Interrupts. */ /* This does not change the PCI config regs just set above. */ WRITE_CSR(TLP_BUS_MODE, TLP_BUS_RESET); /* self-clearing */ DELAY(5); /* Tulip is dead for 50 PCI cycles after reset. */ /* Reset the Xilinx Field Programmable Gate Array. */ reset_xilinx(sc); /* side effect: turns on all four LEDs */ /* Configure card-specific stuff (framers, line interfaces, etc.). */ sc->card->config(sc); /* Initializing cards can glitch clocks and upset fifos. */ /* Reset the FIFOs between the Tulip and Xilinx chips. */ set_mii16_bits(sc, MII16_FIFO); clr_mii16_bits(sc, MII16_FIFO); /* Initialize the PCI busmode register. */ /* The PCI bus cycle type "Memory Write and Invalidate" does NOT */ /* work cleanly in any version of the 21140A, so don't enable it! */ WRITE_CSR(TLP_BUS_MODE, (tlp_bus_cal ? TLP_BUS_READ_LINE : 0) | (tlp_bus_cal ? TLP_BUS_READ_MULT : 0) | (tlp_bus_pbl<txring, NUM_TX_DESCS))) return error; WRITE_CSR(TLP_TX_LIST, sc->txring.dma_addr); if ((error = create_ring(sc, &sc->rxring, num_rx_descs))) return error; WRITE_CSR(TLP_RX_LIST, sc->rxring.dma_addr); /* Initialize the operating mode register. */ WRITE_CSR(TLP_OP_MODE, TLP_OP_INIT | (tlp_op_tr<txring); destroy_ring(sc, &sc->rxring); } /* Start the card and attach a kernel interface and line protocol. */ static int attach_card(softc_t *sc, const char *intrstr) { struct config config; u_int32_t tlp_cfrv; u_int16_t mii3; u_int8_t *ieee; int i, error = 0; /* Start the card. */ if ((error = startup_card(sc))) return error; callout_init(&sc->callout, 0); /* Attach a kernel interface. */ #if NETGRAPH if ((error = ng_attach(sc))) return error; sc->flags |= FLAG_NETGRAPH; #endif if ((error = lmc_ifnet_attach(sc))) return error; sc->flags |= FLAG_IFNET; /* Attach a line protocol stack. */ sc->config.line_pkg = PKG_RAWIP; config = sc->config; /* get current config */ config.line_pkg = 0; /* select external stack */ config.line_prot = PROT_C_HDLC; config.keep_alive = 1; config_proto(sc, &config); /* reconfigure */ sc->config = config; /* save new configuration */ /* Print interesting hardware-related things. */ mii3 = read_mii(sc, 3); tlp_cfrv = READ_PCI_CFG(sc, TLP_CFRV); printf("%s: PCI rev %d.%d, MII rev %d.%d", NAME_UNIT, (tlp_cfrv>>4) & 0xF, tlp_cfrv & 0xF, (mii3>>4) & 0xF, mii3 & 0xF); ieee = (u_int8_t *)sc->status.ieee; for (i=0; i<3; i++) sc->status.ieee[i] = read_srom(sc, 10+i); printf(", IEEE addr %02x:%02x:%02x:%02x:%02x:%02x", ieee[0], ieee[1], ieee[2], ieee[3], ieee[4], ieee[5]); sc->card->ident(sc); printf(" %s\n", intrstr); /* Print interesting software-related things. */ printf("%s: Driver rev %d.%d.%d", NAME_UNIT, DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_SUB_VERSION); printf(", Options %s%s%s%s%s%s%s%s%s\n", NETGRAPH ? "NETGRAPH " : "", GEN_HDLC ? "GEN_HDLC " : "", NSPPP ? "SPPP " : "", P2P ? "P2P " : "", ALTQ_PRESENT ? "ALTQ " : "", NBPFILTER ? "BPF " : "", DEV_POLL ? "POLL " : "", IOREF_CSR ? "IO_CSR " : "MEM_CSR ", (BYTE_ORDER == BIG_ENDIAN) ? "BIG_END " : "LITTLE_END "); /* Make the local hardware ready. */ set_status(sc, 1); return 0; } /* Detach from the kernel in all ways. */ static void detach_card(softc_t *sc) { struct config config; /* Make the local hardware NOT ready. */ set_status(sc, 0); /* Detach external line protocol stack. */ if (sc->config.line_pkg != PKG_RAWIP) { config = sc->config; config.line_pkg = PKG_RAWIP; config_proto(sc, &config); sc->config = config; } /* Detach kernel interfaces. */ #if NETGRAPH if (sc->flags & FLAG_NETGRAPH) { IFQ_PURGE(&sc->ng_fastq); IFQ_PURGE(&sc->ng_sndq); ng_detach(sc); sc->flags &= ~FLAG_NETGRAPH; } #endif if (sc->flags & FLAG_IFNET) { IFQ_PURGE(&sc->ifp->if_snd); lmc_ifnet_detach(sc); sc->flags &= ~FLAG_IFNET; } /* Reset the Tulip chip; stops DMA and Interrupts. */ shutdown_card(sc); } /* This is the I/O configuration interface for FreeBSD */ static int fbsd_probe(device_t dev) { u_int32_t cfid = pci_read_config(dev, TLP_CFID, 4); u_int32_t csid = pci_read_config(dev, TLP_CSID, 4); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return ENXIO; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: device_set_desc(dev, HSSI_DESC); break; case TLP_CSID_T3: device_set_desc(dev, T3_DESC); break; case TLP_CSID_SSI: device_set_desc(dev, SSI_DESC); break; case TLP_CSID_T1E1: device_set_desc(dev, T1E1_DESC); break; default: return ENXIO; } return 0; } static int fbsd_detach(device_t dev) { softc_t *sc = device_get_softc(dev); /* Stop the card and detach from the kernel. */ detach_card(sc); /* Release resources. */ if (sc->irq_cookie != NULL) { bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); sc->irq_cookie = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_res_id, sc->irq_res); sc->irq_res = NULL; } if (sc->csr_res != NULL) { bus_release_resource(dev, sc->csr_res_type, sc->csr_res_id, sc->csr_res); sc->csr_res = NULL; } mtx_destroy(&sc->top_mtx); mtx_destroy(&sc->bottom_mtx); return 0; /* no error */ } static int fbsd_shutdown(device_t dev) { shutdown_card(device_get_softc(dev)); return 0; } static int fbsd_attach(device_t dev) { softc_t *sc = device_get_softc(dev); int error; /* READ/WRITE_PCI_CFG need this. */ sc->dev = dev; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->card = &hssi_card; break; case TLP_CSID_T3: sc->card = &t3_card; break; case TLP_CSID_SSI: sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->card = &t1_card; break; default: return ENXIO; } sc->dev_desc = device_get_desc(dev); /* Allocate PCI memory or IO resources to access the Tulip chip CSRs. */ # if IOREF_CSR sc->csr_res_id = TLP_CBIO; sc->csr_res_type = SYS_RES_IOPORT; # else sc->csr_res_id = TLP_CBMA; sc->csr_res_type = SYS_RES_MEMORY; # endif - sc->csr_res = bus_alloc_resource(dev, sc->csr_res_type, &sc->csr_res_id, - 0, ~0, 1, RF_ACTIVE); + sc->csr_res = bus_alloc_resource_any(dev, sc->csr_res_type, &sc->csr_res_id, + RF_ACTIVE); if (sc->csr_res == NULL) { printf("%s: bus_alloc_resource(csr) failed.\n", NAME_UNIT); return ENXIO; } sc->csr_tag = rman_get_bustag(sc->csr_res); sc->csr_handle = rman_get_bushandle(sc->csr_res); /* Allocate PCI interrupt resources for the card. */ sc->irq_res_id = 0; - sc->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irq_res_id, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_res_id, + RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { printf("%s: bus_alloc_resource(irq) failed.\n", NAME_UNIT); fbsd_detach(dev); return ENXIO; } if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, bsd_interrupt, sc, &sc->irq_cookie))) { printf("%s: bus_setup_intr() failed; error %d\n", NAME_UNIT, error); fbsd_detach(dev); return error; } /* Initialize the top-half and bottom-half locks. */ mtx_init(&sc->top_mtx, NAME_UNIT, "top half lock", MTX_DEF); mtx_init(&sc->bottom_mtx, NAME_UNIT, "bottom half lock", MTX_DEF); /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, ""))) detach_card(sc); return error; } static device_method_t methods[] = { DEVMETHOD(device_probe, fbsd_probe), DEVMETHOD(device_attach, fbsd_attach), DEVMETHOD(device_detach, fbsd_detach), DEVMETHOD(device_shutdown, fbsd_shutdown), /* This driver does not suspend and resume. */ { 0, 0 } }; static driver_t driver = { .name = DEVICE_NAME, .methods = methods, .size = sizeof(softc_t), }; static devclass_t devclass; DRIVER_MODULE(lmc, pci, driver, devclass, 0, 0); MODULE_VERSION(lmc, 2); MODULE_DEPEND(lmc, pci, 1, 1, 1); # if NETGRAPH MODULE_DEPEND(lmc, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); # endif # if NSPPP MODULE_DEPEND(lmc, sppp, 1, 1, 1); # endif /* This is the I/O configuration interface for NetBSD. */ /* This is the I/O configuration interface for OpenBSD. */ /* This is the I/O configuration interface for BSD/OS. */ Index: head/sys/dev/mrsas/mrsas.c =================================================================== --- head/sys/dev/mrsas/mrsas.c (revision 295789) +++ head/sys/dev/mrsas/mrsas.c (revision 295790) @@ -1,4151 +1,4151 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include /* * Function prototypes */ static d_open_t mrsas_open; static d_close_t mrsas_close; static d_read_t mrsas_read; static d_write_t mrsas_write; static d_ioctl_t mrsas_ioctl; static d_poll_t mrsas_poll; static struct mrsas_mgmt_info mrsas_mgmt_info; static struct mrsas_ident *mrsas_find_ident(device_t); static int mrsas_setup_msix(struct mrsas_softc *sc); static int mrsas_allocate_msix(struct mrsas_softc *sc); static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); static void mrsas_flush_cache(struct mrsas_softc *sc); static void mrsas_reset_reply_desc(struct mrsas_softc *sc); static void mrsas_ocr_thread(void *arg); static int mrsas_get_map_info(struct mrsas_softc *sc); static int mrsas_get_ld_map_info(struct mrsas_softc *sc); static int mrsas_sync_map_info(struct mrsas_softc *sc); static int mrsas_get_pd_list(struct mrsas_softc *sc); static int mrsas_get_ld_list(struct mrsas_softc *sc); static int mrsas_setup_irq(struct mrsas_softc *sc); static int mrsas_alloc_mem(struct mrsas_softc *sc); static int mrsas_init_fw(struct mrsas_softc *sc); static int mrsas_setup_raidmap(struct mrsas_softc *sc); static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); static int mrsas_clear_intr(struct mrsas_softc *sc); static int mrsas_get_ctrl_info(struct mrsas_softc *sc); static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort); static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg); u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd); void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); int mrsas_init_adapter(struct mrsas_softc *sc); int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); int mrsas_ioc_init(struct mrsas_softc *sc); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_reset_ctrl(struct mrsas_softc *sc); int mrsas_wait_for_outstanding(struct mrsas_softc *sc); int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size); void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_disable_intr(struct mrsas_softc *sc); void mrsas_enable_intr(struct mrsas_softc *sc); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_mem(struct mrsas_softc *sc); void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); void mrsas_isr(void *arg); void mrsas_teardown_intr(struct mrsas_softc *sc); void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); void mrsas_kill_hba(struct mrsas_softc *sc); void mrsas_aen_handler(struct mrsas_softc *sc); void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status); void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus); struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_cam_attach(struct mrsas_softc *sc); extern void mrsas_cam_detach(struct mrsas_softc *sc); extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_xpt_freeze(struct mrsas_softc *sc); extern void mrsas_xpt_release(struct mrsas_softc *sc); extern MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); /* * PCI device struct and table * */ typedef struct mrsas_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; const char *desc; } MRSAS_CTLR_ID; MRSAS_CTLR_ID device_table[] = { {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, {0, 0, 0, 0, NULL} }; /* * Character device entry points * */ static struct cdevsw mrsas_cdevsw = { .d_version = D_VERSION, .d_open = mrsas_open, .d_close = mrsas_close, .d_read = mrsas_read, .d_write = mrsas_write, .d_ioctl = mrsas_ioctl, .d_poll = mrsas_poll, .d_name = "mrsas", }; MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); /* * In the cdevsw routines, we find our softc by using the si_drv1 member of * struct cdev. We set this variable to point to our softc in our attach * routine when we create the /dev entry. */ int mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } /* * Register Read/Write Functions * */ void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; bus_space_write_4(bus_tag, bus_handle, offset, value); } u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); } /* * Interrupt Disable/Enable/Clear Functions * */ void mrsas_disable_intr(struct mrsas_softc *sc) { u_int32_t mask = 0xFFFFFFFF; u_int32_t status; sc->mask_interrupts = 1; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); /* Dummy read to force pci flush */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } void mrsas_enable_intr(struct mrsas_softc *sc) { u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; u_int32_t status; sc->mask_interrupts = 0; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } static int mrsas_clear_intr(struct mrsas_softc *sc) { u_int32_t status, fw_status, fw_state; /* Read received interrupt */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); /* * If FW state change interrupt is received, write to it again to * clear */ if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); } mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); return (1); } /* Not our interrupt, so just return */ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return (0); /* We got a reply interrupt */ return (1); } /* * PCI Support Functions * */ static struct mrsas_ident * mrsas_find_ident(device_t dev) { struct mrsas_ident *pci_device; for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { if ((pci_device->vendor == pci_get_vendor(dev)) && (pci_device->device == pci_get_device(dev)) && ((pci_device->subvendor == pci_get_subvendor(dev)) || (pci_device->subvendor == 0xffff)) && ((pci_device->subdevice == pci_get_subdevice(dev)) || (pci_device->subdevice == 0xffff))) return (pci_device); } return (NULL); } static int mrsas_probe(device_t dev) { static u_int8_t first_ctrl = 1; struct mrsas_ident *id; if ((id = mrsas_find_ident(dev)) != NULL) { if (first_ctrl) { printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION); first_ctrl = 0; } device_set_desc(dev, id->desc); /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ return (-30); } return (ENXIO); } /* * mrsas_setup_sysctl: setup sysctl values for mrsas * input: Adapter instance soft state * * Setup sysctl entries for mrsas driver. */ static void mrsas_setup_sysctl(struct mrsas_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", device_get_unit(sc->mrsas_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, "Disable the use of OCR"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, strlen(MRSAS_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_count", CTLFLAG_RD, &sc->reset_count, 0, "number of ocr from start of the day"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "fw_outstanding", CTLFLAG_RD, &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, "Driver debug level"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 0, "Driver IO timeout value in mili-second."); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, &sc->mrsas_fw_fault_check_delay, 0, "FW fault check thread delay in seconds. "); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_in_progress", CTLFLAG_RD, &sc->reset_in_progress, 0, "ocr in progress status"); } /* * mrsas_get_tunables: get tunable parameters. * input: Adapter instance soft state * * Get tunable parameters. This will help to debug driver at boot time. */ static void mrsas_get_tunables(struct mrsas_softc *sc) { char tmpstr[80]; /* XXX default to some debugging for now */ sc->mrsas_debug = MRSAS_FAULT; sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; sc->mrsas_fw_fault_check_delay = 1; sc->reset_count = 0; sc->reset_in_progress = 0; /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", device_get_unit(sc->mrsas_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); } /* * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. * Used to get sequence number at driver load time. * input: Adapter soft state * * Allocates DMAable memory for the event log info internal command. */ int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) { int el_info_size; /* Allocate get event log info command */ el_info_size = sizeof(struct mrsas_evt_log_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, el_info_size, 1, el_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->el_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, sc->el_info_mem, el_info_size, mrsas_addr_cb, &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); return (ENOMEM); } memset(sc->el_info_mem, 0, el_info_size); return (0); } /* * mrsas_free_evt_info_cmd: Free memory for Event log info command * input: Adapter soft state * * Deallocates memory for the event log info internal command. */ void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) { if (sc->el_info_phys_addr) bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); if (sc->el_info_mem != NULL) bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); if (sc->el_info_tag != NULL) bus_dma_tag_destroy(sc->el_info_tag); } /* * mrsas_get_seq_num: Get latest event sequence number * @sc: Adapter soft state * @eli: Firmware event log sequence number information. * * Firmware maintains a log of all events in a non-volatile area. * Driver get the sequence number using DCMD * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. */ static int mrsas_get_seq_num(struct mrsas_softc *sc, struct mrsas_evt_log_info *eli) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); mrsas_issue_blocked_cmd(sc, cmd); /* * Copy the data back into callers buffer */ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); mrsas_free_evt_log_info_cmd(sc); mrsas_release_mfi_cmd(cmd); return 0; } /* * mrsas_register_aen: Register for asynchronous event notification * @sc: Adapter soft state * @seq_num: Starting sequence number * @class_locale: Class of the event * * This function subscribes for events beyond the @seq_num * and type @class_locale. * */ static int mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, u_int32_t class_locale_word) { int ret_val; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; union mrsas_evt_class_locale curr_aen; union mrsas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new AEN * request we currently have. If it is, then we don't have to do * anything. In other words, whichever events the current AEN request * is subscribing to, have already been subscribed to. If the old_cmd * is _not_ inclusive, then we have to abort that command, form a * class_locale that is superset of both old and current and re-issue * to the FW */ curr_aen.word = class_locale_word; if (sc->aen_cmd) { prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. Locale numbers don't have such hierarchy. They * are bitmap values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; sc->aen_cmd->abort_aen = 1; ret_val = mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (ret_val) { printf("mrsas: Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = mrsas_get_mfi_cmd(sc); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; sc->last_seq_num = seq_num; dcmd->mbox.w[1] = curr_aen.word; dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); if (sc->aen_cmd != NULL) { mrsas_release_mfi_cmd(cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ sc->aen_cmd = cmd; /* * Issue the aen registration frame */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); return (1); } return 0; } /* * mrsas_start_aen: Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int mrsas_start_aen(struct mrsas_softc *sc) { struct mrsas_evt_log_info eli; union mrsas_evt_class_locale class_locale; /* Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (mrsas_get_seq_num(sc, &eli)) return -1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return mrsas_register_aen(sc, eli.newest_seq_num + 1, class_locale.word); } /* * mrsas_setup_msix: Allocate MSI-x vectors * @sc: adapter soft state */ static int mrsas_setup_msix(struct mrsas_softc *sc) { int i; for (i = 0; i < sc->msix_vectors; i++) { sc->irq_context[i].sc = sc; sc->irq_context[i].MSIxIndex = i; sc->irq_id[i] = i + 1; sc->mrsas_irq[i] = bus_alloc_resource_any (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] ,RF_ACTIVE); if (sc->mrsas_irq[i] == NULL) { device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); goto irq_alloc_failed; } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[i], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[i], &sc->intr_handle[i])) { device_printf(sc->mrsas_dev, "Cannot set up MSI-x interrupt handler\n"); goto irq_alloc_failed; } } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_allocate_msix: Setup MSI-x vectors * @sc: adapter soft state */ static int mrsas_allocate_msix(struct mrsas_softc *sc) { if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { device_printf(sc->mrsas_dev, "Using MSI-X with %d number" " of vectors\n", sc->msix_vectors); } else { device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); goto irq_alloc_failed; } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_attach: PCI entry point * input: pointer to device struct * * Performs setup of PCI and registers, initializes mutexes and linked lists, * registers interrupts and CAM, and initializes the adapter/controller to * its proper state. */ static int mrsas_attach(device_t dev) { struct mrsas_softc *sc = device_get_softc(dev); uint32_t cmd, bar, error; struct cdev *linux_dev; /* Look up our softc and initialize its fields. */ sc->mrsas_dev = dev; sc->device_id = pci_get_device(dev); mrsas_get_tunables(sc); /* * Set up PCI and registers */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_PORTEN) == 0) { return (ENXIO); } /* Force the busmaster enable bit on. */ cmd |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); goto attach_fail; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Intialize mutexes */ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); /* * Intialize a counting Semaphore to take care no. of concurrent * IOCTLs */ sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION); /* Intialize linked list */ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); mrsas_atomic_set(&sc->fw_outstanding, 0); sc->io_cmds_highwater = 0; /* Create a /dev entry for this device. */ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", device_get_unit(dev)); if (device_get_unit(dev) == 0) make_dev_alias_p(MAKEDEV_CHECKNAME, &linux_dev, sc->mrsas_cdev, "megaraid_sas_ioctl_node"); if (sc->mrsas_cdev) sc->mrsas_cdev->si_drv1 = sc; sc->adprecovery = MRSAS_HBA_OPERATIONAL; sc->UnevenSpanSupport = 0; sc->msix_enable = 0; /* Initialize Firmware */ if (mrsas_init_fw(sc) != SUCCESS) { goto attach_fail_fw; } /* Register SCSI mid-layer */ if ((mrsas_cam_attach(sc) != SUCCESS)) { goto attach_fail_cam; } /* Register IRQs */ if (mrsas_setup_irq(sc) != SUCCESS) { goto attach_fail_irq; } /* Enable Interrupts */ mrsas_enable_intr(sc); error = mrsas_kproc_create(mrsas_ocr_thread, sc, &sc->ocr_thread, 0, 0, "mrsas_ocr%d", device_get_unit(sc->mrsas_dev)); if (error) { printf("Error %d starting rescan thread\n", error); goto attach_fail_irq; } mrsas_setup_sysctl(sc); /* Initiate AEN (Asynchronous Event Notification) */ if (mrsas_start_aen(sc)) { printf("Error: start aen failed\n"); goto fail_start_aen; } /* * Add this controller to mrsas_mgmt_info structure so that it can be * exported to management applications */ if (device_get_unit(dev) == 0) memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); mrsas_mgmt_info.count++; mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; mrsas_mgmt_info.max_index++; return (0); fail_start_aen: attach_fail_irq: mrsas_teardown_intr(sc); attach_fail_cam: mrsas_cam_detach(sc); attach_fail_fw: /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ if (sc->msix_enable == 1) pci_release_msi(sc->mrsas_dev); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); /* Destroy the counting semaphore created for Ioctl */ sema_destroy(&sc->ioctl_count_sema); attach_fail: destroy_dev(sc->mrsas_cdev); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } return (ENXIO); } /* * mrsas_detach: De-allocates and teardown resources * input: pointer to device struct * * This function is the entry point for device disconnect and detach. * It performs memory de-allocations, shutdown of the controller and various * teardown and destroy resource functions. */ static int mrsas_detach(device_t dev) { struct mrsas_softc *sc; int i = 0; sc = device_get_softc(dev); sc->remove_in_progress = 1; /* Destroy the character device so no other IOCTL will be handled */ destroy_dev(sc->mrsas_cdev); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < mrsas_mgmt_info.max_index; i++) { if (mrsas_mgmt_info.sc_ptr[i] == sc) { mrsas_mgmt_info.count--; mrsas_mgmt_info.sc_ptr[i] = NULL; break; } } if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for ocr to be finished\n", i); } pause("mr_shutdown", hz); } i = 0; while (sc->ocr_thread_active) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for " "mrsas_ocr thread to quit ocr %d\n", i, sc->ocr_thread_active); } pause("mr_shutdown", hz); } mrsas_flush_cache(sc); mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); mrsas_disable_intr(sc); mrsas_cam_detach(sc); mrsas_teardown_intr(sc); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); /* Wait for all the semaphores to be released */ while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5)) pause("mr_shutdown", hz); /* Destroy the counting semaphore created for Ioctl */ sema_destroy(&sc->ioctl_count_sema); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); return (0); } /* * mrsas_free_mem: Frees allocated memory * input: Adapter instance soft state * * This function is called from mrsas_detach() to free previously allocated * memory. */ void mrsas_free_mem(struct mrsas_softc *sc) { int i; u_int32_t max_cmd; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; /* * Free RAID map memory */ for (i = 0; i < 2; i++) { if (sc->raidmap_phys_addr[i]) bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); if (sc->raidmap_mem[i] != NULL) bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); if (sc->raidmap_tag[i] != NULL) bus_dma_tag_destroy(sc->raidmap_tag[i]); if (sc->ld_drv_map[i] != NULL) free(sc->ld_drv_map[i], M_MRSAS); } /* * Free version buffer memroy */ if (sc->verbuf_phys_addr) bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); if (sc->verbuf_mem != NULL) bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); if (sc->verbuf_tag != NULL) bus_dma_tag_destroy(sc->verbuf_tag); /* * Free sense buffer memory */ if (sc->sense_phys_addr) bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); if (sc->sense_mem != NULL) bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); if (sc->sense_tag != NULL) bus_dma_tag_destroy(sc->sense_tag); /* * Free chain frame memory */ if (sc->chain_frame_phys_addr) bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); if (sc->chain_frame_mem != NULL) bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); if (sc->chain_frame_tag != NULL) bus_dma_tag_destroy(sc->chain_frame_tag); /* * Free IO Request memory */ if (sc->io_request_phys_addr) bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); if (sc->io_request_mem != NULL) bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); if (sc->io_request_tag != NULL) bus_dma_tag_destroy(sc->io_request_tag); /* * Free Reply Descriptor memory */ if (sc->reply_desc_phys_addr) bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); if (sc->reply_desc_mem != NULL) bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); if (sc->reply_desc_tag != NULL) bus_dma_tag_destroy(sc->reply_desc_tag); /* * Free event detail memory */ if (sc->evt_detail_phys_addr) bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); if (sc->evt_detail_mem != NULL) bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); if (sc->evt_detail_tag != NULL) bus_dma_tag_destroy(sc->evt_detail_tag); /* * Free MFI frames */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { mfi_cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, mfi_cmd); } } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); /* * Free MPT internal command list */ max_cmd = sc->max_fw_cmds; if (sc->mpt_cmd_list) { for (i = 0; i < max_cmd; i++) { mpt_cmd = sc->mpt_cmd_list[i]; bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); free(sc->mpt_cmd_list[i], M_MRSAS); } free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; } /* * Free MFI internal command list */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { free(sc->mfi_cmd_list[i], M_MRSAS); } free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; } /* * Free request descriptor memory */ free(sc->req_desc, M_MRSAS); sc->req_desc = NULL; /* * Destroy parent tag */ if (sc->mrsas_parent_tag != NULL) bus_dma_tag_destroy(sc->mrsas_parent_tag); /* * Free ctrl_info memory */ if (sc->ctrl_info != NULL) free(sc->ctrl_info, M_MRSAS); } /* * mrsas_teardown_intr: Teardown interrupt * input: Adapter instance soft state * * This function is called from mrsas_detach() to teardown and release bus * interrupt resourse. */ void mrsas_teardown_intr(struct mrsas_softc *sc) { int i; if (!sc->msix_enable) { if (sc->intr_handle[0]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); if (sc->mrsas_irq[0] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[0], sc->mrsas_irq[0]); sc->intr_handle[0] = NULL; } else { for (i = 0; i < sc->msix_vectors; i++) { if (sc->intr_handle[i]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], sc->intr_handle[i]); if (sc->mrsas_irq[i] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[i], sc->mrsas_irq[i]); sc->intr_handle[i] = NULL; } pci_release_msi(sc->mrsas_dev); } } /* * mrsas_suspend: Suspend entry point * input: Device struct pointer * * This function is the entry point for system suspend from the OS. */ static int mrsas_suspend(device_t dev) { struct mrsas_softc *sc; sc = device_get_softc(dev); return (0); } /* * mrsas_resume: Resume entry point * input: Device struct pointer * * This function is the entry point for system resume from the OS. */ static int mrsas_resume(device_t dev) { struct mrsas_softc *sc; sc = device_get_softc(dev); return (0); } /** * mrsas_get_softc_instance: Find softc instance based on cmd type * * This function will return softc instance based on cmd type. * In some case, application fire ioctl on required management instance and * do not provide host_no. Use cdev->si_drv1 to get softc instance for those * case, else get the softc instance from host_no provided by application in * user data. */ static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) { struct mrsas_softc *sc = NULL; struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; if (cmd == MRSAS_IOC_GET_PCI_INFO) { sc = dev->si_drv1; } else { /* * get the Host number & the softc from data sent by the * Application */ sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; if ((user_ioc->host_no >= mrsas_mgmt_info.max_index) || (sc == NULL)) { if (sc == NULL) mrsas_dprint(sc, MRSAS_FAULT, "There is no Controller number %d .\n", user_ioc->host_no); else mrsas_dprint(sc, MRSAS_FAULT, "Invalid Controller number %d .\n", user_ioc->host_no); } } return sc; } /* * mrsas_ioctl: IOCtl commands entry point. * * This function is the entry point for IOCtls from the OS. It calls the * appropriate function for processing depending on the command received. */ static int mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mrsas_softc *sc; int ret = 0, i = 0; MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; sc = mrsas_get_softc_instance(dev, cmd, arg); if (!sc) return ENOENT; if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_INFO, "Driver remove or shutdown called.\n"); return ENOENT; } mtx_lock_spin(&sc->ioctl_lock); if (!sc->reset_in_progress) { mtx_unlock_spin(&sc->ioctl_lock); goto do_ioctl; } mtx_unlock_spin(&sc->ioctl_lock); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for " "OCR to be finished %d\n", i, sc->ocr_thread_active); } pause("mr_ioctl", hz); } do_ioctl: switch (cmd) { case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: #ifdef COMPAT_FREEBSD32 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: #endif /* * Decrement the Ioctl counting Semaphore before getting an * mfi command */ sema_wait(&sc->ioctl_count_sema); ret = mrsas_passthru(sc, (void *)arg, cmd); /* Increment the Ioctl counting semaphore value */ sema_post(&sc->ioctl_count_sema); break; case MRSAS_IOC_SCAN_BUS: ret = mrsas_bus_scan(sc); break; case MRSAS_IOC_GET_PCI_INFO: pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," "pci device no: %d, pci function no: %d," "pci domain ID: %d\n", pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, pciDrvInfo->functionNumber, pciDrvInfo->domainID); ret = 0; break; default: mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); ret = ENOENT; } return (ret); } /* * mrsas_poll: poll entry point for mrsas driver fd * * This function is the entry point for poll from the OS. It waits for some AEN * events to be triggered from the controller and notifies back. */ static int mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mrsas_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mrsas_aen_triggered) { revents |= poll_events & (POLLIN | POLLRDNORM); } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { mtx_lock(&sc->aen_lock); sc->mrsas_poll_waiting = 1; selrecord(td, &sc->mrsas_select); mtx_unlock(&sc->aen_lock); } } return revents; } /* * mrsas_setup_irq: Set up interrupt * input: Adapter instance soft state * * This function sets up interrupts as a bus resource, with flags indicating * resource permitting contemporaneous sharing and for resource to activate * atomically. */ static int mrsas_setup_irq(struct mrsas_softc *sc) { if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); else { device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); sc->irq_context[0].sc = sc; sc->irq_context[0].MSIxIndex = 0; sc->irq_id[0] = 0; sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); if (sc->mrsas_irq[0] == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate legcay" "interrupt\n"); return (FAIL); } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[0], &sc->intr_handle[0])) { device_printf(sc->mrsas_dev, "Cannot set up legacy" "interrupt\n"); return (FAIL); } } return (0); } /* * mrsas_isr: ISR entry point * input: argument pointer * * This function is the interrupt service routine entry point. There are two * types of interrupts, state change interrupt and response interrupt. If an * interrupt is not ours, we just return. */ void mrsas_isr(void *arg) { struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; struct mrsas_softc *sc = irq_context->sc; int status = 0; if (sc->mask_interrupts) return; if (!sc->msix_vectors) { status = mrsas_clear_intr(sc); if (!status) return; } /* If we are resetting, bail */ if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { printf(" Entered into ISR when OCR is going active. \n"); mrsas_clear_intr(sc); return; } /* Process for reply request and clear response interrupt */ if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) mrsas_clear_intr(sc); return; } /* * mrsas_complete_cmd: Process reply request * input: Adapter instance soft state * * This function is called from mrsas_isr() to process reply request and clear * response interrupt. Processing of the reply request entails walking * through the reply descriptor array for the command request pended from * Firmware. We look at the Function field to determine the command type and * perform the appropriate action. Before we return, we clear the response * interrupt. */ static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) { Mpi2ReplyDescriptorsUnion_t *desc; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int8_t reply_descript_type; u_int16_t smid, num_completed; u_int8_t status, extStatus; union desc_value desc_val; PLD_LOAD_BALANCE_INFO lbinfo; u_int32_t device_id; int threshold_reply_count = 0; /* If we have a hardware error, not need to continue */ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return (DONE); desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) + sc->last_reply_idx[MSIxIndex]; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; num_completed = 0; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; /* Find our reply descriptor for the command and process */ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { smid = reply_desc->SMID; cmd_mpt = sc->mpt_cmd_list[smid - 1]; scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; status = scsi_io_req->RaidContext.status; extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; lbinfo = &sc->load_balance_info[device_id]; if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; } /* Fall thru and complete IO */ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); mrsas_cmd_done(sc, cmd_mpt); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; mrsas_atomic_dec(&sc->fw_outstanding); break; case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); cmd_mpt->flags = 0; mrsas_release_mpt_cmd(cmd_mpt); break; } sc->last_reply_idx[MSIxIndex]++; if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) sc->last_reply_idx[MSIxIndex] = 0; desc->Words = ~((uint64_t)0x00); /* set it back to all * 0xFFFFFFFFs */ num_completed++; threshold_reply_count++; /* Get the next reply descriptor */ if (!sc->last_reply_idx[MSIxIndex]) { desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); } else desc++; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; /* * Write to reply post index after completing threshold reply * count and still there are more replies in reply queue * pending to be completed. */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { if (sc->msix_enable) { if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); threshold_reply_count = 0; } } /* No match, just return */ if (num_completed == 0) return (DONE); /* Clear response interrupt */ if (sc->msix_enable) { if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); return (0); } /* * mrsas_map_mpt_cmd_status: Allocate DMAable memory. * input: Adapter instance soft state * * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. * It checks the command status and maps the appropriate CAM status for the * CCB. */ void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) { struct mrsas_softc *sc = cmd->sc; u_int8_t *sense_data; switch (status) { case MFI_STAT_OK: cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; if (sense_data) { /* For now just copy 18 bytes back */ memcpy(sense_data, cmd->sense, 18); cmd->ccb_ptr->csio.sense_len = 18; cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: if (cmd->ccb_ptr->ccb_h.target_lun) cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; else cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; break; default: device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; cmd->ccb_ptr->csio.scsi_status = status; } return; } /* * mrsas_alloc_mem: Allocate DMAable memory * input: Adapter instance soft state * * This function creates the parent DMA tag and allocates DMAable memory. DMA * tag describes constraints of DMA mapping. Memory allocated is mapped into * Kernel virtual address. Callback argument is physical memory address. */ static int mrsas_alloc_mem(struct mrsas_softc *sc) { u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, evt_detail_size, count; /* * Allocate parent DMA tag */ if (bus_dma_tag_create(NULL, /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MRSAS_MAX_IO_SIZE, /* maxsize */ MRSAS_MAX_SGL, /* nsegments */ MRSAS_MAX_IO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mrsas_parent_tag /* tag */ )) { device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); return (ENOMEM); } /* * Allocate for version buffer */ verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, verbuf_size, 1, verbuf_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->verbuf_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); return (ENOMEM); } bzero(sc->verbuf_mem, verbuf_size); if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); return (ENOMEM); } /* * Allocate IO Request Frames */ io_req_size = sc->io_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, io_req_size, 1, io_req_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->io_request_tag)) { device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); return (ENOMEM); } bzero(sc->io_request_mem, io_req_size); if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, sc->io_request_mem, io_req_size, mrsas_addr_cb, &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (ENOMEM); } /* * Allocate Chain Frames */ chain_frame_size = sc->chain_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, chain_frame_size, 1, chain_frame_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->chain_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); return (ENOMEM); } bzero(sc->chain_frame_mem, chain_frame_size); if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); return (ENOMEM); } count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; /* * Allocate Reply Descriptor Array */ reply_desc_size = sc->reply_alloc_sz * count; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reply_desc_size, 1, reply_desc_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->reply_desc_tag)) { device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); return (ENOMEM); } /* * Allocate Sense Buffer Array. Keep in lower 4GB */ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; if (bus_dma_tag_create(sc->mrsas_parent_tag, 64, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sense_size, 1, sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, BUS_DMA_NOWAIT, &sc->sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); return (ENOMEM); } /* * Allocate for Event detail structure */ evt_detail_size = sizeof(struct mrsas_evt_detail); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, evt_detail_size, 1, evt_detail_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->evt_detail_tag)) { device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); return (ENOMEM); } bzero(sc->evt_detail_mem, evt_detail_size); if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); return (ENOMEM); } /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (280kB). */ if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MRSAS_MAX_IO_SIZE, MRSAS_MAX_SGL, MRSAS_MAX_IO_SIZE, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->io_lock, &sc->data_tag)) { device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); return (ENOMEM); } return (0); } /* * mrsas_addr_cb: Callback function of bus_dmamap_load() * input: callback argument, machine dependent type * that describes DMA segments, number of segments, error code * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_setup_raidmap: Set up RAID map. * input: Adapter instance soft state * * Allocate DMA memory for the RAID maps and perform setup. */ static int mrsas_setup_raidmap(struct mrsas_softc *sc) { int i; for (i = 0; i < 2; i++) { sc->ld_drv_map[i] = (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); /* Do Error handling */ if (!sc->ld_drv_map[i]) { device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); if (i == 1) free(sc->ld_drv_map[0], M_MRSAS); /* ABORT driver initialization */ goto ABORT; } } for (int i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->max_map_sz, 1, sc->max_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->raidmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); return (ENOMEM); } bzero(sc->raidmap_mem[i], sc->max_map_sz); if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], sc->raidmap_mem[i], sc->max_map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); return (ENOMEM); } if (!sc->raidmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); return (ENOMEM); } } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); return (0); ABORT: return (1); } /* * mrsas_init_fw: Initialize Firmware * input: Adapter soft state * * Calls transition_to_ready() to make sure Firmware is in operational state and * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It * issues internal commands to get the controller info after the IOC_INIT * command response is received by Firmware. Note: code relating to * get_pdlist, get_ld_list and max_sectors are currently not being used, it * is left here as placeholder. */ static int mrsas_init_fw(struct mrsas_softc *sc) { int ret, loop, ocr = 0; u_int32_t max_sectors_1; u_int32_t max_sectors_2; u_int32_t tmp_sectors; u_int32_t scratch_pad_2; int msix_enable = 0; int fw_msix_count = 0; /* Make sure Firmware is ready */ ret = mrsas_transition_to_ready(sc, ocr); if (ret != SUCCESS) { return (ret); } /* MSI-x index 0- reply post host index register */ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; /* Check if MSI-X is supported while in ready state */ msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; if (msix_enable) { scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); /* Check max MSI-X vectors */ if (sc->device_id == MRSAS_TBOLT) { sc->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = sc->msix_vectors; } else { /* Invader/Fury supports 96 MSI-X vectors */ sc->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; fw_msix_count = sc->msix_vectors; for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { sc->msix_reg_offset[loop] = MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10); } } /* Don't bother allocating more MSI-X vectors than cpus */ sc->msix_vectors = min(sc->msix_vectors, mp_ncpus); /* Allocate MSI-x vectors */ if (mrsas_allocate_msix(sc) == SUCCESS) sc->msix_enable = 1; else sc->msix_enable = 0; device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," "Online CPU %d Current MSIX <%d>\n", fw_msix_count, mp_ncpus, sc->msix_vectors); } if (mrsas_init_adapter(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); return (1); } /* Allocate internal commands for pass-thru */ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); return (1); } sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); if (!sc->ctrl_info) { device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); return (1); } /* * Get the controller info from FW, so that the MAX VD support * availability can be decided. */ if (mrsas_get_ctrl_info(sc)) { device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); return (1); } sc->secure_jbod_support = (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; if (sc->secure_jbod_support) device_printf(sc->mrsas_dev, "FW supports SED \n"); if (mrsas_setup_raidmap(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); return (1); } /* For pass-thru, get PD/LD list and controller info */ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); mrsas_get_pd_list(sc); memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); mrsas_get_ld_list(sc); /* * Compute the max allowed sectors per IO: The controller info has * two limits on max sectors. Driver should use the minimum of these * two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information to * calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * sc->ctrl_info->max_strips_per_io; max_sectors_2 = sc->ctrl_info->max_request_size; tmp_sectors = min(max_sectors_1, max_sectors_2); sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) sc->max_sectors_per_req = tmp_sectors; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; sc->UnevenSpanSupport = sc->ctrl_info->adapterOperations2.supportUnevenSpans; if (sc->UnevenSpanSupport) { device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", sc->UnevenSpanSupport); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 1; else sc->fast_path_io = 0; } return (0); } /* * mrsas_init_adapter: Initializes the adapter/controller * input: Adapter soft state * * Prepares for the issuing of the IOC Init cmd to FW for initializing the * ROC/controller. The FW register is read to determined the number of * commands that is supported. All memory allocations for IO is based on * max_cmd. Appropriate calculations are performed in this function. */ int mrsas_init_adapter(struct mrsas_softc *sc) { uint32_t status; u_int32_t max_cmd; int ret; int i = 0; /* Read FW status register */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); /* Get operational params from status register */ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; /* Decrement the max supported by 1, to correlate with FW */ sc->max_fw_cmds = sc->max_fw_cmds - 1; max_cmd = sc->max_fw_cmds; /* Determine allocation size of command frames */ sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); sc->chain_frames_alloc_sz = 1024 * max_cmd; sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; /* Used for pass thru MFI frame (DCMD) */ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(MPI2_SGE_IO_UNION)) / 16; int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; ret = mrsas_alloc_mem(sc); if (ret != SUCCESS) return (ret); ret = mrsas_alloc_mpt_cmds(sc); if (ret != SUCCESS) return (ret); ret = mrsas_ioc_init(sc); if (ret != SUCCESS) return (ret); return (0); } /* * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) { int ioc_init_size; /* Allocate IOC INIT command */ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioc_init_size, 1, ioc_init_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ioc_init_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); return (ENOMEM); } bzero(sc->ioc_init_mem, ioc_init_size); if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); return (ENOMEM); } return (0); } /* * mrsas_free_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Deallocates memory of the IOC Init cmd. */ void mrsas_free_ioc_cmd(struct mrsas_softc *sc) { if (sc->ioc_init_phys_mem) bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); if (sc->ioc_init_mem != NULL) bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); if (sc->ioc_init_tag != NULL) bus_dma_tag_destroy(sc->ioc_init_tag); } /* * mrsas_ioc_init: Sends IOC Init command to FW * input: Adapter soft state * * Issues the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_ioc_init(struct mrsas_softc *sc) { struct mrsas_init_frame *init_frame; pMpi2IOCInitRequest_t IOCInitMsg; MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; bus_addr_t phys_addr; int i, retcode = 0; /* Allocate memory for the IOC INIT command */ if (mrsas_alloc_ioc_cmd(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); return (1); } IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMsg->MsgVersion = MPI2_VERSION; IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* driver support Extended MSIX */ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { init_frame->driver_operations. mfi_capabilities.support_additional_msix = 1; } if (sc->verbuf_mem) { snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", MRSAS_VERSION); init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; init_frame->driver_ver_hi = 0; } init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; init_frame->queue_info_new_phys_addr_lo = phys_addr; init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; req_desc.MFAIo.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); mrsas_disable_intr(sc); mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (init_frame->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (init_frame->cmd_status == 0xFF) DELAY(1000); else break; } } if (init_frame->cmd_status == 0) mrsas_dprint(sc, MRSAS_OCR, "IOC INIT response received from FW.\n"); else { if (init_frame->cmd_status == 0xFF) device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); else device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); retcode = 1; } mrsas_free_ioc_cmd(sc); return (retcode); } /* * mrsas_alloc_mpt_cmds: Allocates the command packets * input: Adapter instance soft state * * This function allocates the internal commands for IOs. Each command that is * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An * array is allocated with mrsas_mpt_cmd context. The free commands are * maintained in a linked list (cmd pool). SMID value range is from 1 to * max_fw_cmds. */ int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd, count; struct mrsas_mpt_cmd *cmd; pMpi2ReplyDescriptorsUnion_t reply_desc; u_int32_t offset, chain_offset, sense_offset; bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; u_int8_t *io_req_base, *chain_frame_base, *sense_base; max_cmd = sc->max_fw_cmds; sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); if (!sc->req_desc) { device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); return (ENOMEM); } memset(sc->req_desc, 0, sc->request_alloc_sz); /* * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); return (ENOMEM); } memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mpt_cmd_list[j], M_MRSAS); free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; return (ENOMEM); } } io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; chain_frame_base = (u_int8_t *)sc->chain_frame_mem; chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; sense_base = (u_int8_t *)sc->sense_mem; sense_base_phys = (bus_addr_t)sc->sense_phys_addr; for (i = 0; i < max_cmd; i++) { cmd = sc->mpt_cmd_list[i]; offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; chain_offset = 1024 * i; sense_offset = MRSAS_SENSE_LEN * i; memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); cmd->index = i + 1; cmd->ccb_ptr = NULL; callout_init(&cmd->cm_callout, 0); cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; cmd->sc = sc; cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; cmd->sense = sense_base + sense_offset; cmd->sense_phys_addr = sense_base_phys + sense_offset; if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { return (FAIL); } TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); } /* Initialize reply descriptor array to 0xFFFFFFFF */ reply_desc = sc->reply_desc_mem; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } return (0); } /* * mrsas_fire_cmd: Sends command to FW * input: Adapter softstate * request descriptor address low * request descriptor address high * * This functions fires the command to Firmware by writing to the * inbound_low_queue_port and inbound_high_queue_port. */ void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi) { mtx_lock(&sc->pci_lock); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), req_desc_lo); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), req_desc_hi); mtx_unlock(&sc->pci_lock); } /* * mrsas_transition_to_ready: Move FW to Ready state input: * Adapter instance soft state * * During the initialization, FW passes can potentially be in any one of several * possible states. If the FW in operational, waiting-for-handshake states, * driver must take steps to bring it to ready state. Otherwise, it has to * wait for the ready state. */ int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) { int i; u_int8_t max_wait; u_int32_t val, fw_state; u_int32_t cur_state; u_int32_t abs_state, curr_abs_state; val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = val & MFI_STATE_MASK; max_wait = MRSAS_RESET_WAIT_TIME; if (fw_state != MFI_STATE_READY) device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); while (fw_state != MFI_STATE_READY) { abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); switch (fw_state) { case MFI_STATE_FAULT: device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); if (ocr) { cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* Set the CLR bit in inbound doorbell */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_HOTPLUG); cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 * secs */ mrsas_disable_intr(sc); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); for (i = 0; i < max_wait * 1000; i++) { if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) DELAY(1000); else break; } cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 * seconds */ cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: cur_state = MFI_STATE_FLUSH_CACHE; break; default: device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK); curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); if (abs_state == curr_abs_state) DELAY(1000); else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); return 0; } /* * mrsas_get_mfi_cmd: Get a cmd from free command pool * input: Adapter soft state * * This function removes an MFI command from the command list. */ struct mrsas_mfi_cmd * mrsas_get_mfi_cmd(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd = NULL; mtx_lock(&sc->mfi_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); } mtx_unlock(&sc->mfi_cmd_pool_lock); return cmd; } /* * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. * input: Adapter Context. * * This function will check FW status register and flag do_timeout_reset flag. * It will do OCR/Kill adapter if FW is in fault state or IO timed out has * trigger reset. */ static void mrsas_ocr_thread(void *arg) { struct mrsas_softc *sc; u_int32_t fw_status, fw_state; sc = (struct mrsas_softc *)arg; mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); sc->ocr_thread_active = 1; mtx_lock(&sc->sim_lock); for (;;) { /* Sleep for 1 second and check the queue status */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_OCR, "Exit due to shutdown from %s\n", __func__); break; } fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { device_printf(sc->mrsas_dev, "OCR started due to %s!\n", sc->do_timedout_reset ? "IO Timeout" : "FW fault detected"); mtx_lock_spin(&sc->ioctl_lock); sc->reset_in_progress = 1; sc->reset_count++; mtx_unlock_spin(&sc->ioctl_lock); mrsas_xpt_freeze(sc); mrsas_reset_ctrl(sc); mrsas_xpt_release(sc); sc->reset_in_progress = 0; sc->do_timedout_reset = 0; } } mtx_unlock(&sc->sim_lock); sc->ocr_thread_active = 0; mrsas_kproc_exit(0); } /* * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. * input: Adapter Context. * * This function will clear reply descriptor so that post OCR driver and FW will * lost old history. */ void mrsas_reset_reply_desc(struct mrsas_softc *sc) { int i, count; pMpi2ReplyDescriptorsUnion_t reply_desc; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; reply_desc = sc->reply_desc_mem; for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } } /* * mrsas_reset_ctrl: Core function to OCR/Kill adapter. * input: Adapter Context. * * This function will run from thread context so that it can sleep. 1. Do not * handle OCR if FW is in HW critical error. 2. Wait for outstanding command * to complete for 180 seconds. 3. If #2 does not find any outstanding * command Controller is in working state, so skip OCR. Otherwise, do * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post * OCR, Re-fire Managment command and move Controller to Operation state. */ int mrsas_reset_ctrl(struct mrsas_softc *sc) { int retval = SUCCESS, i, j, retry = 0; u_int32_t host_diag, abs_state, status_reg, reset_adapter; union ccb *ccb; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { device_printf(sc->mrsas_dev, "mrsas: Hardware critical error, returning FAIL.\n"); return FAIL; } mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; mrsas_disable_intr(sc); DELAY(1000 * 1000); /* First try waiting for commands to complete */ if (mrsas_wait_for_outstanding(sc)) { mrsas_dprint(sc, MRSAS_OCR, "resetting adapter from %s.\n", __func__); /* Now return commands back to the CAM layer */ mtx_unlock(&sc->sim_lock); for (i = 0; i < sc->max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; if (mpt_cmd->ccb_ptr) { ccb = (union ccb *)(mpt_cmd->ccb_ptr); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; mrsas_cmd_done(sc, mpt_cmd); mrsas_atomic_dec(&sc->fw_outstanding); } } mtx_lock(&sc->sim_lock); status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (sc->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; goto out; } /* Now try to reset the chip */ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_FLUSH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_1ST_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_2ND_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_3RD_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_4TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_5TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_6TH_KEY_VALUE); /* Check that the diag write enable (DRWE) bit is on */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 100) { mrsas_dprint(sc, MRSAS_OCR, "Host diag unlock failed!\n"); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) continue; /* Send chip reset command */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), host_diag | HOST_DIAG_RESET_ADAPTER); DELAY(3000 * 1000); /* Make sure reset adapter bit is cleared */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 1000) { mrsas_dprint(sc, MRSAS_OCR, "Diag reset adapter never cleared!\n"); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) continue; abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { DELAY(100 * 1000); abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," " state = 0x%x\n", abs_state); continue; } /* Wait for FW to become ready */ if (mrsas_transition_to_ready(sc, 1)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas: Failed to transition controller to ready.\n"); continue; } mrsas_reset_reply_desc(sc); if (mrsas_ioc_init(sc)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); continue; } /* Re-fire management commands */ for (j = 0; j < sc->max_fw_cmds; j++) { mpt_cmd = sc->mpt_cmd_list[j]; if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; if (mfi_cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) { mrsas_release_mfi_cmd(mfi_cmd); mrsas_release_mpt_cmd(mpt_cmd); } else { req_desc = mrsas_get_request_desc(sc, mfi_cmd->cmd_id.context.smid - 1); mrsas_dprint(sc, MRSAS_OCR, "Re-fire command DCMD opcode 0x%x index %d\n ", mfi_cmd->frame->dcmd.opcode, j); if (!req_desc) device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); else mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); } } } /* Reset load balance info */ memset(sc->load_balance_info, 0, sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); if (mrsas_get_ctrl_info(sc)) { mrsas_kill_hba(sc); retval = FAIL; goto out; } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; /* Adapter reset completed successfully */ device_printf(sc->mrsas_dev, "Reset successful\n"); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; } else { mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; } out: mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_dprint(sc, MRSAS_OCR, "Reset Exit with %d.\n", retval); return retval; } /* * mrsas_kill_hba: Kill HBA when OCR is not supported * input: Adapter Context. * * This function will kill HBA when OCR is not supported. */ void mrsas_kill_hba(struct mrsas_softc *sc) { sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; pause("mrsas_kill_hba", 1000); mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_STOP_ADP); /* Flush */ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); mrsas_complete_outstanding_ioctls(sc); } /** * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba * input: Controller softc * * Returns void */ void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) { int i; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int32_t count, MSIxIndex; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->max_fw_cmds; i++) { cmd_mpt = sc->mpt_cmd_list[i]; if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_mptmfi_passthru(sc, cmd_mfi, cmd_mpt->io_request->RaidContext.status); } } } } /* * mrsas_wait_for_outstanding: Wait for outstanding commands * input: Adapter Context. * * This function will wait for 180 seconds for outstanding commands to be * completed. */ int mrsas_wait_for_outstanding(struct mrsas_softc *sc) { int i, outstanding, retval = 0; u_int32_t fw_state, count, MSIxIndex; for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_OCR, "Driver remove or shutdown called.\n"); retval = 1; goto out; } /* Check if firmware is in fault state */ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { mrsas_dprint(sc, MRSAS_OCR, "Found FW in FAULT state, will reset adapter.\n"); retval = 1; goto out; } outstanding = mrsas_atomic_read(&sc->fw_outstanding); if (!outstanding) goto out; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " "commands to complete\n", i, outstanding); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); } DELAY(1000 * 1000); } if (mrsas_atomic_read(&sc->fw_outstanding)) { mrsas_dprint(sc, MRSAS_OCR, " pending commands remain after waiting," " will reset adapter.\n"); retval = 1; } out: return retval; } /* * mrsas_release_mfi_cmd: Return a cmd to free command pool * input: Command packet for return to free cmd pool * * This function returns the MFI command to the command list. */ void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) { struct mrsas_softc *sc = cmd->sc; mtx_lock(&sc->mfi_cmd_pool_lock); cmd->ccb_ptr = NULL; cmd->cmd_id.frame_count = 0; TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); mtx_unlock(&sc->mfi_cmd_pool_lock); return; } /* * mrsas_get_controller_info: Returns FW's controller structure * input: Adapter soft state * Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. This * information is mainly used to find out the maximum IO transfer per command * supported by the FW. */ static int mrsas_get_ctrl_info(struct mrsas_softc *sc) { int retcode = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); if (!mrsas_issue_polled(sc, cmd)) memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); else retcode = 1; mrsas_update_ext_vd_details(sc); mrsas_free_ctlr_info_cmd(sc); mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_update_ext_vd_details : Update details w.r.t Extended VD * input: * sc - Controller's softc */ static void mrsas_update_ext_vd_details(struct mrsas_softc *sc) { sc->max256vdSupport = sc->ctrl_info->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (sc->ctrl_info->max_lds > 64) sc->max256vdSupport = 1; sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; if (sc->max256vdSupport) { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count - 1)); sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); if (sc->max256vdSupport) sc->current_map_sz = sc->new_map_sz; else sc->current_map_sz = sc->old_map_sz; } /* * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command * input: Adapter soft state * * Allocates DMAable memory for the controller info internal command. */ int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) { int ctlr_info_size; /* Allocate get controller info command */ ctlr_info_size = sizeof(struct mrsas_ctrl_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ctlr_info_size, 1, ctlr_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ctlr_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); return (ENOMEM); } memset(sc->ctlr_info_mem, 0, ctlr_info_size); return (0); } /* * mrsas_free_ctlr_info_cmd: Free memory for controller info command * input: Adapter soft state * * Deallocates memory of the get controller info cmd. */ void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) { if (sc->ctlr_info_phys_addr) bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); if (sc->ctlr_info_mem != NULL) bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); if (sc->ctlr_info_tag != NULL) bus_dma_tag_destroy(sc->ctlr_info_tag); } /* * mrsas_issue_polled: Issues a polling command * inputs: Adapter soft state * Command packet to be issued * * This function is for posting of internal commands to Firmware. MFI requires * the cmd_status to be set to 0xFF before posting. The maximun wait time of * the poll response timer is 180 seconds. */ int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { struct mrsas_header *frame_hdr = &cmd->frame->hdr; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; int i, retcode = 0; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* Issue the frame using inbound queue port */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (frame_hdr->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (frame_hdr->cmd_status == 0xFF) DELAY(1000); else break; } } if (frame_hdr->cmd_status != 0) { if (frame_hdr->cmd_status == 0xFF) device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); else device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); retcode = 1; } return (retcode); } /* * mrsas_issue_dcmd: Issues a MFI Pass thru cmd * input: Adapter soft state mfi cmd pointer * * This function is called by mrsas_issued_blocked_cmd() and * mrsas_issued_polled(), to build the MPT command and then fire the command * to Firmware. */ int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = mrsas_build_mpt_cmd(sc, cmd); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); return (1); } mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return (0); } /* * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd * input: Adapter soft state mfi cmd to build * * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru * command and prepares the MPT command to send to Firmware. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; u_int16_t index; if (mrsas_build_mptmfi_passthru(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); return NULL; } index = cmd->cmd_id.context.smid; req_desc = mrsas_get_request_desc(sc, index - 1); if (!req_desc) return NULL; req_desc->addr.Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = index; return (req_desc); } /* * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command * input: Adapter soft state mfi cmd pointer * * The MPT command and the io_request are setup as a passthru command. The SGE * chain address is set to frame_phys_addr of the MFI command. */ u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) { MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; struct mrsas_mpt_cmd *mpt_cmd; struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; mpt_cmd = mrsas_get_mpt_cmd(sc); if (!mpt_cmd) return (1); /* Save the smid. To be used for returning the cmd */ mfi_cmd->cmd_id.context.smid = mpt_cmd->index; mpt_cmd->sync_cmd_idx = mfi_cmd->index; /* * For cmds where the flag is set, store the flag and check on * completion. For cmds with this flag, don't call * mrsas_complete_cmd. */ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; io_req = mpt_cmd->io_request; if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = sc->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; return (0); } /* * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds * input: Adapter soft state Command to be issued * * This function waits on an event for the command to be returned from the ISR. * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing * internal and ioctl commands. */ int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; unsigned long total_time = 0; int retcode = 0; /* Initialize cmd_status */ cmd->cmd_status = ECONNREFUSED; /* Build MPT-MFI command for issue to FW */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == ECONNREFUSED) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait); retcode = 1; break; } } return (retcode); } /* * mrsas_complete_mptmfi_passthru: Completes a command * input: @sc: Adapter soft state * @cmd: Command to be completed * @status: cmd completion status * * This function is called from mrsas_complete_cmd() after an interrupt is * received from Firmware, and io_request->Function is * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. */ void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status) { struct mrsas_header *hdr = &cmd->frame->hdr; u_int8_t cmd_status = cmd->frame->hdr.cmd_status; /* Reset the retry counter for future re-tries */ cmd->retry_for_fw_reset = 0; if (cmd->ccb_ptr) cmd->ccb_ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; mrsas_wakeup(sc, cmd); break; } case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: /* Check for LD map update */ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { sc->fast_path_io = 0; mtx_lock(&sc->raidmap_lock); if (cmd_status != 0) { if (cmd_status != MFI_STAT_NOT_FOUND) device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); else { mrsas_release_mfi_cmd(cmd); mtx_unlock(&sc->raidmap_lock); break; } } else sc->map_id++; mrsas_release_mfi_cmd(cmd); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 0; else sc->fast_path_io = 1; mrsas_sync_map_info(sc); mtx_unlock(&sc->raidmap_lock); break; } if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { sc->mrsas_aen_triggered = 0; } /* See if got an event notification */ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) mrsas_complete_aen(sc, cmd); else mrsas_wakeup(sc, cmd); break; case MFI_CMD_ABORT: /* Command issued to abort another cmd return */ mrsas_complete_abort(sc, cmd); break; default: device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /* * mrsas_wakeup: Completes an internal command * input: Adapter soft state * Command to be completed * * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait * timer is started. This function is called from * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up * from the command wait. */ void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == ECONNREFUSED) cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); return; } /* * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: * Adapter soft state Shutdown/Hibernate * * This function issues a DCMD internal command to Firmware to initiate shutdown * of the controller. */ static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); return; } if (sc->aen_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (sc->map_update_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } /* * mrsas_flush_cache: Requests FW to flush all its caches input: * Adapter soft state * * This function is issues a DCMD internal command to Firmware to initiate * flushing of all caches. */ static void mrsas_flush_cache(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); return; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } /* * mrsas_get_map_info: Load and validate RAID map input: * Adapter instance soft state * * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load * and validate RAID map. It returns 0 if successful, 1 other- wise. */ static int mrsas_get_map_info(struct mrsas_softc *sc) { uint8_t retcode = 0; sc->fast_path_io = 0; if (!mrsas_get_ld_map_info(sc)) { retcode = MR_ValidateMapInfo(sc); if (retcode == 0) { sc->fast_path_io = 1; return 0; } } return 1; } /* * mrsas_get_ld_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_get_ld_map_info(struct mrsas_softc *sc) { int retcode = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; void *map; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; if (!map) { device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(map, 0, sizeof(sc->max_map_sz)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; if (!mrsas_issue_polled(sc, cmd)) retcode = 0; else { device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n"); retcode = 1; } mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_sync_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_sync_map_info(struct mrsas_softc *sc) { int retcode = 0, i; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t size_sync_info, num_lds; MR_LD_TARGET_SYNC *target_map = NULL; MR_DRV_RAID_MAP_ALL *map; MR_LD_RAID *raid; MR_LD_TARGET_SYNC *ld_sync; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); return 1; } map = sc->ld_drv_map[sc->map_id & 1]; num_lds = map->raidMap.ldCount; dcmd = &cmd->frame->dcmd; size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; memset(target_map, 0, sc->max_map_sz); map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; ld_sync = (MR_LD_TARGET_SYNC *) target_map; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; sc->map_update_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return (1); } return (retcode); } /* * mrsas_get_pd_list: Returns FW's PD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about system * supported by Firmware. */ static int mrsas_get_pd_list(struct mrsas_softc *sc) { int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_PD_LIST *pd_list_mem; struct MR_PD_ADDRESS *pd_addr; bus_addr_t pd_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } else { pd_list_mem = tcmd->tmp_dcmd_mem; pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (!mrsas_issue_polled(sc, cmd)) retcode = 0; else retcode = 1; /* Get the instance PD list */ pd_count = MRSAS_MAX_PD; pd_addr = pd_list_mem->addr; if (retcode == 0 && pd_list_mem->count < pd_count) { memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } } /* * Use mutext/spinlock if pd_list component size increase more than * 32 bit. */ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); mrsas_free_tmp_dcmd(tcmd); mrsas_release_mfi_cmd(cmd); free(tcmd, M_MRSAS); return (retcode); } /* * mrsas_get_ld_list: Returns FW's LD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about supported by * the FW. */ static int mrsas_get_ld_list(struct mrsas_softc *sc) { int ld_list_size, retcode = 0, ld_index = 0, ids = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_LD_LIST *ld_list_mem; bus_addr_t ld_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); ld_list_size = sizeof(struct MR_LD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } else { ld_list_mem = tcmd->tmp_dcmd_mem; ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (sc->max256vdSupport) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->pad_0 = 0; if (!mrsas_issue_polled(sc, cmd)) retcode = 0; else retcode = 1; #if VD_EXT_DEBUG printf("Number of LDs %d\n", ld_list_mem->ldCount); #endif /* Get the instance LD list */ if ((retcode == 0) && (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) { sc->CurLdCount = ld_list_mem->ldCount; memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { if (ld_list_mem->ldList[ld_index].state != 0) { ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; } } } mrsas_free_tmp_dcmd(tcmd); mrsas_release_mfi_cmd(cmd); free(tcmd, M_MRSAS); return (retcode); } /* * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: * Adapter soft state Temp command Size of alloction * * Allocates DMAable memory for a temporary internal command. The allocated * memory is initialized to all zeros upon successful loading of the dma * mapped memory. */ int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_ALLOCNOW, NULL, NULL, &tcmd->tmp_dcmd_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); return (ENOMEM); } memset(tcmd->tmp_dcmd_mem, 0, size); return (0); } /* * mrsas_free_tmp_dcmd: Free memory for temporary command input: * temporary dcmd pointer * * Deallocates memory of the temporary command for use in the construction of * the internal DCMD. */ void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) { if (tmp->tmp_dcmd_phys_addr) bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_mem != NULL) bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_tag != NULL) bus_dma_tag_destroy(tmp->tmp_dcmd_tag); } /* * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: * Adapter soft state Previously issued cmd to be aborted * * This function is used to abort previously issued commands, such as AEN and * RAID map sync map commands. The abort command is sent as a DCMD internal * command and subsequently the driver will wait for a return status. The * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. */ static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort) { struct mrsas_mfi_cmd *cmd; struct mrsas_abort_frame *abort_fr; u_int8_t retcode = 0; unsigned long total_time = 0; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); return (1); } abort_fr = &cmd->frame->abort; /* Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); return (1); } /* Wait for this cmd to complete */ sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); retcode = 1; break; } } cmd->sync_cmd = 0; mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_complete_abort: Completes aborting a command input: * Adapter soft state Cmd that was issued to abort another cmd * * The mrsas_issue_blocked_abort_cmd() function waits for the command status to * change after sending the command. This function is called from * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. */ void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); } return; } /* * mrsas_aen_handler: AEN processing callback function from thread context * input: Adapter soft state * * Asynchronous event handler */ void mrsas_aen_handler(struct mrsas_softc *sc) { union mrsas_evt_class_locale class_locale; int doscan = 0; u_int32_t seq_num; int error; if (!sc) { device_printf(sc->mrsas_dev, "invalid instance!\n"); return; } if (sc->evt_detail_mem) { switch (sc->evt_detail_mem->code) { case MR_EVT_PD_INSERTED: mrsas_get_pd_list(sc); mrsas_bus_scan_sim(sc, sc->sim_1); doscan = 0; break; case MR_EVT_PD_REMOVED: mrsas_get_pd_list(sc); mrsas_bus_scan_sim(sc, sc->sim_1); doscan = 0; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: mrsas_bus_scan_sim(sc, sc->sim_0); doscan = 0; break; case MR_EVT_LD_CREATED: mrsas_get_ld_list(sc); mrsas_bus_scan_sim(sc, sc->sim_0); doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; default: doscan = 0; break; } } else { device_printf(sc->mrsas_dev, "invalid evt_detail\n"); return; } if (doscan) { mrsas_get_pd_list(sc); mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); mrsas_bus_scan_sim(sc, sc->sim_1); mrsas_get_ld_list(sc); mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); mrsas_bus_scan_sim(sc, sc->sim_0); } seq_num = sc->evt_detail_mem->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (sc->aen_cmd != NULL) return; mtx_lock(&sc->aen_lock); error = mrsas_register_aen(sc, seq_num, class_locale.word); mtx_unlock(&sc->aen_lock); if (error) device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); } /* * mrsas_complete_aen: Completes AEN command * input: Adapter soft state * Cmd that was issued to abort another cmd * * This function will be called from ISR and will continue event processing from * thread context by enqueuing task in ev_tq (callback function * "mrsas_aen_handler"). */ void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { /* * Don't signal app if it is just an aborted previously registered * aen */ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { sc->mrsas_aen_triggered = 1; mtx_lock(&sc->aen_lock); if (sc->mrsas_poll_waiting) { sc->mrsas_poll_waiting = 0; selwakeup(&sc->mrsas_select); } mtx_unlock(&sc->aen_lock); } else cmd->abort_aen = 0; sc->aen_cmd = NULL; mrsas_release_mfi_cmd(cmd); if (!sc->remove_in_progress) taskqueue_enqueue(sc->ev_tq, &sc->ev_task); return; } static device_method_t mrsas_methods[] = { DEVMETHOD(device_probe, mrsas_probe), DEVMETHOD(device_attach, mrsas_attach), DEVMETHOD(device_detach, mrsas_detach), DEVMETHOD(device_suspend, mrsas_suspend), DEVMETHOD(device_resume, mrsas_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), {0, 0} }; static driver_t mrsas_driver = { "mrsas", mrsas_methods, sizeof(struct mrsas_softc) }; static devclass_t mrsas_devclass; DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); MODULE_DEPEND(mrsas, cam, 1, 1, 1); Index: head/sys/dev/mxge/if_mxge.c =================================================================== --- head/sys/dev/mxge/if_mxge.c (revision 295789) +++ head/sys/dev/mxge/if_mxge.c (revision 295790) @@ -1,5032 +1,5032 @@ /****************************************************************************** Copyright (c) 2006-2013, Myricom Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Myricom Inc, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for pci_cfg_restore */ #include /* for pmap_mapdev() */ #include #if defined(__i386) || defined(__amd64) #include #endif #include #include /*#define MXGE_FAKE_IFP*/ #include #ifdef IFNET_BUF_RING #include #endif #include "opt_inet.h" #include "opt_inet6.h" /* tunable params */ static int mxge_nvidia_ecrc_enable = 1; static int mxge_force_firmware = 0; static int mxge_intr_coal_delay = 30; static int mxge_deassert_wait = 1; static int mxge_flow_control = 1; static int mxge_verbose = 0; static int mxge_ticks; static int mxge_max_slices = 1; static int mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; static int mxge_always_promisc = 0; static int mxge_initial_mtu = ETHERMTU_JUMBO; static int mxge_throttle = 0; static char *mxge_fw_unaligned = "mxge_ethp_z8e"; static char *mxge_fw_aligned = "mxge_eth_z8e"; static char *mxge_fw_rss_aligned = "mxge_rss_eth_z8e"; static char *mxge_fw_rss_unaligned = "mxge_rss_ethp_z8e"; static int mxge_probe(device_t dev); static int mxge_attach(device_t dev); static int mxge_detach(device_t dev); static int mxge_shutdown(device_t dev); static void mxge_intr(void *arg); static device_method_t mxge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mxge_probe), DEVMETHOD(device_attach, mxge_attach), DEVMETHOD(device_detach, mxge_detach), DEVMETHOD(device_shutdown, mxge_shutdown), DEVMETHOD_END }; static driver_t mxge_driver = { "mxge", mxge_methods, sizeof(mxge_softc_t), }; static devclass_t mxge_devclass; /* Declare ourselves to be a child of the PCI bus.*/ DRIVER_MODULE(mxge, pci, mxge_driver, mxge_devclass, 0, 0); MODULE_DEPEND(mxge, firmware, 1, 1, 1); MODULE_DEPEND(mxge, zlib, 1, 1, 1); static int mxge_load_firmware(mxge_softc_t *sc, int adopt); static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data); static int mxge_close(mxge_softc_t *sc, int down); static int mxge_open(mxge_softc_t *sc); static void mxge_tick(void *arg); static int mxge_probe(device_t dev) { int rev; if ((pci_get_vendor(dev) == MXGE_PCI_VENDOR_MYRICOM) && ((pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E) || (pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E_9))) { rev = pci_get_revid(dev); switch (rev) { case MXGE_PCI_REV_Z8E: device_set_desc(dev, "Myri10G-PCIE-8A"); break; case MXGE_PCI_REV_Z8ES: device_set_desc(dev, "Myri10G-PCIE-8B"); break; default: device_set_desc(dev, "Myri10G-PCIE-8??"); device_printf(dev, "Unrecognized rev %d NIC\n", rev); break; } return 0; } return ENXIO; } static void mxge_enable_wc(mxge_softc_t *sc) { #if defined(__i386) || defined(__amd64) vm_offset_t len; int err; sc->wc = 1; len = rman_get_size(sc->mem_res); err = pmap_change_attr((vm_offset_t) sc->sram, len, PAT_WRITE_COMBINING); if (err != 0) { device_printf(sc->dev, "pmap_change_attr failed, %d\n", err); sc->wc = 0; } #endif } /* callback to get our DMA address */ static void mxge_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error == 0) { *(bus_addr_t *) arg = segs->ds_addr; } } static int mxge_dma_alloc(mxge_softc_t *sc, mxge_dma_t *dma, size_t bytes, bus_size_t alignment) { int err; device_t dev = sc->dev; bus_size_t boundary, maxsegsize; if (bytes > 4096 && alignment == 4096) { boundary = 0; maxsegsize = bytes; } else { boundary = 4096; maxsegsize = 4096; } /* allocate DMAable memory tags */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ alignment, /* alignment */ boundary, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ bytes, /* maxsize */ 1, /* num segs */ maxsegsize, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lock */ &dma->dmat); /* tag */ if (err != 0) { device_printf(dev, "couldn't alloc tag (err = %d)\n", err); return err; } /* allocate DMAable memory & map */ err = bus_dmamem_alloc(dma->dmat, &dma->addr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO), &dma->map); if (err != 0) { device_printf(dev, "couldn't alloc mem (err = %d)\n", err); goto abort_with_dmat; } /* load the memory */ err = bus_dmamap_load(dma->dmat, dma->map, dma->addr, bytes, mxge_dmamap_callback, (void *)&dma->bus_addr, 0); if (err != 0) { device_printf(dev, "couldn't load map (err = %d)\n", err); goto abort_with_mem; } return 0; abort_with_mem: bus_dmamem_free(dma->dmat, dma->addr, dma->map); abort_with_dmat: (void)bus_dma_tag_destroy(dma->dmat); return err; } static void mxge_dma_free(mxge_dma_t *dma) { bus_dmamap_unload(dma->dmat, dma->map); bus_dmamem_free(dma->dmat, dma->addr, dma->map); (void)bus_dma_tag_destroy(dma->dmat); } /* * The eeprom strings on the lanaiX have the format * SN=x\0 * MAC=x:x:x:x:x:x\0 * PC=text\0 */ static int mxge_parse_strings(mxge_softc_t *sc) { char *ptr; int i, found_mac, found_sn2; char *endptr; ptr = sc->eeprom_strings; found_mac = 0; found_sn2 = 0; while (*ptr != '\0') { if (strncmp(ptr, "MAC=", 4) == 0) { ptr += 4; for (i = 0;;) { sc->mac_addr[i] = strtoul(ptr, &endptr, 16); if (endptr - ptr != 2) goto abort; ptr = endptr; if (++i == 6) break; if (*ptr++ != ':') goto abort; } found_mac = 1; } else if (strncmp(ptr, "PC=", 3) == 0) { ptr += 3; strlcpy(sc->product_code_string, ptr, sizeof(sc->product_code_string)); } else if (!found_sn2 && (strncmp(ptr, "SN=", 3) == 0)) { ptr += 3; strlcpy(sc->serial_number_string, ptr, sizeof(sc->serial_number_string)); } else if (strncmp(ptr, "SN2=", 4) == 0) { /* SN2 takes precedence over SN */ ptr += 4; found_sn2 = 1; strlcpy(sc->serial_number_string, ptr, sizeof(sc->serial_number_string)); } while (*ptr++ != '\0') {} } if (found_mac) return 0; abort: device_printf(sc->dev, "failed to parse eeprom_strings\n"); return ENXIO; } #if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__ static void mxge_enable_nvidia_ecrc(mxge_softc_t *sc) { uint32_t val; unsigned long base, off; char *va, *cfgptr; device_t pdev, mcp55; uint16_t vendor_id, device_id, word; uintptr_t bus, slot, func, ivend, idev; uint32_t *ptr32; if (!mxge_nvidia_ecrc_enable) return; pdev = device_get_parent(device_get_parent(sc->dev)); if (pdev == NULL) { device_printf(sc->dev, "could not find parent?\n"); return; } vendor_id = pci_read_config(pdev, PCIR_VENDOR, 2); device_id = pci_read_config(pdev, PCIR_DEVICE, 2); if (vendor_id != 0x10de) return; base = 0; if (device_id == 0x005d) { /* ck804, base address is magic */ base = 0xe0000000UL; } else if (device_id >= 0x0374 && device_id <= 0x378) { /* mcp55, base address stored in chipset */ mcp55 = pci_find_bsf(0, 0, 0); if (mcp55 && 0x10de == pci_read_config(mcp55, PCIR_VENDOR, 2) && 0x0369 == pci_read_config(mcp55, PCIR_DEVICE, 2)) { word = pci_read_config(mcp55, 0x90, 2); base = ((unsigned long)word & 0x7ffeU) << 25; } } if (!base) return; /* XXXX Test below is commented because it is believed that doing config read/write beyond 0xff will access the config space for the next larger function. Uncomment this and remove the hacky pmap_mapdev() way of accessing config space when FreeBSD grows support for extended pcie config space access */ #if 0 /* See if we can, by some miracle, access the extended config space */ val = pci_read_config(pdev, 0x178, 4); if (val != 0xffffffff) { val |= 0x40; pci_write_config(pdev, 0x178, val, 4); return; } #endif /* Rather than using normal pci config space writes, we must * map the Nvidia config space ourselves. This is because on * opteron/nvidia class machine the 0xe000000 mapping is * handled by the nvidia chipset, that means the internal PCI * device (the on-chip northbridge), or the amd-8131 bridge * and things behind them are not visible by this method. */ BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_BUS, &bus); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_SLOT, &slot); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_FUNCTION, &func); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_VENDOR, &ivend); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_DEVICE, &idev); off = base + 0x00100000UL * (unsigned long)bus + 0x00001000UL * (unsigned long)(func + 8 * slot); /* map it into the kernel */ va = pmap_mapdev(trunc_page((vm_paddr_t)off), PAGE_SIZE); if (va == NULL) { device_printf(sc->dev, "pmap_kenter_temporary didn't\n"); return; } /* get a pointer to the config space mapped into the kernel */ cfgptr = va + (off & PAGE_MASK); /* make sure that we can really access it */ vendor_id = *(uint16_t *)(cfgptr + PCIR_VENDOR); device_id = *(uint16_t *)(cfgptr + PCIR_DEVICE); if (! (vendor_id == ivend && device_id == idev)) { device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n", vendor_id, device_id); pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); return; } ptr32 = (uint32_t*)(cfgptr + 0x178); val = *ptr32; if (val == 0xffffffff) { device_printf(sc->dev, "extended mapping failed\n"); pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); return; } *ptr32 = val | 0x40; pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); if (mxge_verbose) device_printf(sc->dev, "Enabled ECRC on upstream Nvidia bridge " "at %d:%d:%d\n", (int)bus, (int)slot, (int)func); return; } #else static void mxge_enable_nvidia_ecrc(mxge_softc_t *sc) { device_printf(sc->dev, "Nforce 4 chipset on non-x86/amd64!?!?!\n"); return; } #endif static int mxge_dma_test(mxge_softc_t *sc, int test_type) { mxge_cmd_t cmd; bus_addr_t dmatest_bus = sc->dmabench_dma.bus_addr; int status; uint32_t len; char *test = " "; /* Run a small DMA test. * The magic multipliers to the length tell the firmware * to do DMA read, write, or read+write tests. The * results are returned in cmd.data0. The upper 16 * bits of the return is the number of transfers completed. * The lower 16 bits is the time in 0.5us ticks that the * transfers took to complete. */ len = sc->tx_boundary; cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x10000; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "read"; goto abort; } sc->read_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x1; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "write"; goto abort; } sc->write_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x10001; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "read/write"; goto abort; } sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) / (cmd.data0 & 0xffff); abort: if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) device_printf(sc->dev, "DMA %s benchmark failed: %d\n", test, status); return status; } /* * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput * when the PCI-E Completion packets are aligned on an 8-byte * boundary. Some PCI-E chip sets always align Completion packets; on * the ones that do not, the alignment can be enforced by enabling * ECRC generation (if supported). * * When PCI-E Completion packets are not aligned, it is actually more * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. * * If the driver can neither enable ECRC nor verify that it has * already been enabled, then it must use a firmware image which works * around unaligned completion packets (ethp_z8e.dat), and it should * also ensure that it never gives the device a Read-DMA which is * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is * enabled, then the driver should use the aligned (eth_z8e.dat) * firmware image, and set tx_boundary to 4KB. */ static int mxge_firmware_probe(mxge_softc_t *sc) { device_t dev = sc->dev; int reg, status; uint16_t pectl; sc->tx_boundary = 4096; /* * Verify the max read request size was set to 4KB * before trying the test with 4KB. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { pectl = pci_read_config(dev, reg + 0x8, 2); if ((pectl & (5 << 12)) != (5 << 12)) { device_printf(dev, "Max Read Req. size != 4k (0x%x\n", pectl); sc->tx_boundary = 2048; } } /* * load the optimized firmware (which assumes aligned PCIe * completions) in order to see if it works on this host. */ sc->fw_name = mxge_fw_aligned; status = mxge_load_firmware(sc, 1); if (status != 0) { return status; } /* * Enable ECRC if possible */ mxge_enable_nvidia_ecrc(sc); /* * Run a DMA test which watches for unaligned completions and * aborts on the first one seen. Not required on Z8ES or newer. */ if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES) return 0; status = mxge_dma_test(sc, MXGEFW_CMD_UNALIGNED_TEST); if (status == 0) return 0; /* keep the aligned firmware */ if (status != E2BIG) device_printf(dev, "DMA test failed: %d\n", status); if (status == ENOSYS) device_printf(dev, "Falling back to ethp! " "Please install up to date fw\n"); return status; } static int mxge_select_firmware(mxge_softc_t *sc) { int aligned = 0; int force_firmware = mxge_force_firmware; if (sc->throttle) force_firmware = sc->throttle; if (force_firmware != 0) { if (force_firmware == 1) aligned = 1; else aligned = 0; if (mxge_verbose) device_printf(sc->dev, "Assuming %s completions (forced)\n", aligned ? "aligned" : "unaligned"); goto abort; } /* if the PCIe link width is 4 or less, we can use the aligned firmware and skip any checks */ if (sc->link_width != 0 && sc->link_width <= 4) { device_printf(sc->dev, "PCIe x%d Link, expect reduced performance\n", sc->link_width); aligned = 1; goto abort; } if (0 == mxge_firmware_probe(sc)) return 0; abort: if (aligned) { sc->fw_name = mxge_fw_aligned; sc->tx_boundary = 4096; } else { sc->fw_name = mxge_fw_unaligned; sc->tx_boundary = 2048; } return (mxge_load_firmware(sc, 0)); } static int mxge_validate_firmware(mxge_softc_t *sc, const mcp_gen_header_t *hdr) { if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) { device_printf(sc->dev, "Bad firmware type: 0x%x\n", be32toh(hdr->mcp_type)); return EIO; } /* save firmware version for sysctl */ strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version)); if (mxge_verbose) device_printf(sc->dev, "firmware id: %s\n", hdr->version); sscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major, &sc->fw_ver_minor, &sc->fw_ver_tiny); if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR && sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) { device_printf(sc->dev, "Found firmware version %s\n", sc->fw_version); device_printf(sc->dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, MXGEFW_VERSION_MINOR); return EINVAL; } return 0; } static void * z_alloc(void *nil, u_int items, u_int size) { void *ptr; ptr = malloc(items * size, M_TEMP, M_NOWAIT); return ptr; } static void z_free(void *nil, void *ptr) { free(ptr, M_TEMP); } static int mxge_load_firmware_helper(mxge_softc_t *sc, uint32_t *limit) { z_stream zs; char *inflate_buffer; const struct firmware *fw; const mcp_gen_header_t *hdr; unsigned hdr_offset; int status; unsigned int i; char dummy; size_t fw_len; fw = firmware_get(sc->fw_name); if (fw == NULL) { device_printf(sc->dev, "Could not find firmware image %s\n", sc->fw_name); return ENOENT; } /* setup zlib and decompress f/w */ bzero(&zs, sizeof (zs)); zs.zalloc = z_alloc; zs.zfree = z_free; status = inflateInit(&zs); if (status != Z_OK) { status = EIO; goto abort_with_fw; } /* the uncompressed size is stored as the firmware version, which would otherwise go unused */ fw_len = (size_t) fw->version; inflate_buffer = malloc(fw_len, M_TEMP, M_NOWAIT); if (inflate_buffer == NULL) goto abort_with_zs; zs.avail_in = fw->datasize; zs.next_in = __DECONST(char *, fw->data); zs.avail_out = fw_len; zs.next_out = inflate_buffer; status = inflate(&zs, Z_FINISH); if (status != Z_STREAM_END) { device_printf(sc->dev, "zlib %d\n", status); status = EIO; goto abort_with_buffer; } /* check id */ hdr_offset = htobe32(*(const uint32_t *) (inflate_buffer + MCP_HEADER_PTR_OFFSET)); if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw_len) { device_printf(sc->dev, "Bad firmware file"); status = EIO; goto abort_with_buffer; } hdr = (const void*)(inflate_buffer + hdr_offset); status = mxge_validate_firmware(sc, hdr); if (status != 0) goto abort_with_buffer; /* Copy the inflated firmware to NIC SRAM. */ for (i = 0; i < fw_len; i += 256) { mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i, inflate_buffer + i, min(256U, (unsigned)(fw_len - i))); wmb(); dummy = *sc->sram; wmb(); } *limit = fw_len; status = 0; abort_with_buffer: free(inflate_buffer, M_TEMP); abort_with_zs: inflateEnd(&zs); abort_with_fw: firmware_put(fw, FIRMWARE_UNLOAD); return status; } /* * Enable or disable periodic RDMAs from the host to make certain * chipsets resend dropped PCIe messages */ static void mxge_dummy_rdma(mxge_softc_t *sc, int enable) { char buf_bytes[72]; volatile uint32_t *confirm; volatile char *submit; uint32_t *buf, dma_low, dma_high; int i; buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); /* clear confirmation addr */ confirm = (volatile uint32_t *)sc->cmd; *confirm = 0; wmb(); /* send an rdma command to the PCIe engine, and wait for the response in the confirmation address. The firmware should write a -1 there to indicate it is alive and well */ dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf[0] = htobe32(dma_high); /* confirm addr MSW */ buf[1] = htobe32(dma_low); /* confirm addr LSW */ buf[2] = htobe32(0xffffffff); /* confirm data */ dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr); buf[3] = htobe32(dma_high); /* dummy addr MSW */ buf[4] = htobe32(dma_low); /* dummy addr LSW */ buf[5] = htobe32(enable); /* enable? */ submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA); mxge_pio_copy(submit, buf, 64); wmb(); DELAY(1000); wmb(); i = 0; while (*confirm != 0xffffffff && i < 20) { DELAY(1000); i++; } if (*confirm != 0xffffffff) { device_printf(sc->dev, "dummy rdma %s failed (%p = 0x%x)", (enable ? "enable" : "disable"), confirm, *confirm); } return; } static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data) { mcp_cmd_t *buf; char buf_bytes[sizeof(*buf) + 8]; volatile mcp_cmd_response_t *response = sc->cmd; volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD; uint32_t dma_low, dma_high; int err, sleep_total = 0; /* ensure buf is aligned to 8 bytes */ buf = (mcp_cmd_t *)((unsigned long)(buf_bytes + 7) & ~7UL); buf->data0 = htobe32(data->data0); buf->data1 = htobe32(data->data1); buf->data2 = htobe32(data->data2); buf->cmd = htobe32(cmd); dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf->response_addr.low = htobe32(dma_low); buf->response_addr.high = htobe32(dma_high); mtx_lock(&sc->cmd_mtx); response->result = 0xffffffff; wmb(); mxge_pio_copy((volatile void *)cmd_addr, buf, sizeof (*buf)); /* wait up to 20ms */ err = EAGAIN; for (sleep_total = 0; sleep_total < 20; sleep_total++) { bus_dmamap_sync(sc->cmd_dma.dmat, sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); wmb(); switch (be32toh(response->result)) { case 0: data->data0 = be32toh(response->data); err = 0; break; case 0xffffffff: DELAY(1000); break; case MXGEFW_CMD_UNKNOWN: err = ENOSYS; break; case MXGEFW_CMD_ERROR_UNALIGNED: err = E2BIG; break; case MXGEFW_CMD_ERROR_BUSY: err = EBUSY; break; case MXGEFW_CMD_ERROR_I2C_ABSENT: err = ENXIO; break; default: device_printf(sc->dev, "mxge: command %d " "failed, result = %d\n", cmd, be32toh(response->result)); err = ENXIO; break; } if (err != EAGAIN) break; } if (err == EAGAIN) device_printf(sc->dev, "mxge: command %d timed out" "result = %d\n", cmd, be32toh(response->result)); mtx_unlock(&sc->cmd_mtx); return err; } static int mxge_adopt_running_firmware(mxge_softc_t *sc) { struct mcp_gen_header *hdr; const size_t bytes = sizeof (struct mcp_gen_header); size_t hdr_offset; int status; /* find running firmware header */ hdr_offset = htobe32(*(volatile uint32_t *) (sc->sram + MCP_HEADER_PTR_OFFSET)); if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) { device_printf(sc->dev, "Running firmware has bad header offset (%d)\n", (int)hdr_offset); return EIO; } /* copy header of running firmware from SRAM to host memory to * validate firmware */ hdr = malloc(bytes, M_DEVBUF, M_NOWAIT); if (hdr == NULL) { device_printf(sc->dev, "could not malloc firmware hdr\n"); return ENOMEM; } bus_space_read_region_1(rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), hdr_offset, (char *)hdr, bytes); status = mxge_validate_firmware(sc, hdr); free(hdr, M_DEVBUF); /* * check to see if adopted firmware has bug where adopting * it will cause broadcasts to be filtered unless the NIC * is kept in ALLMULTI mode */ if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) { sc->adopted_rx_filter_bug = 1; device_printf(sc->dev, "Adopting fw %d.%d.%d: " "working around rx filter bug\n", sc->fw_ver_major, sc->fw_ver_minor, sc->fw_ver_tiny); } return status; } static int mxge_load_firmware(mxge_softc_t *sc, int adopt) { volatile uint32_t *confirm; volatile char *submit; char buf_bytes[72]; uint32_t *buf, size, dma_low, dma_high; int status, i; buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); size = sc->sram_size; status = mxge_load_firmware_helper(sc, &size); if (status) { if (!adopt) return status; /* Try to use the currently running firmware, if it is new enough */ status = mxge_adopt_running_firmware(sc); if (status) { device_printf(sc->dev, "failed to adopt running firmware\n"); return status; } device_printf(sc->dev, "Successfully adopted running firmware\n"); if (sc->tx_boundary == 4096) { device_printf(sc->dev, "Using firmware currently running on NIC" ". For optimal\n"); device_printf(sc->dev, "performance consider loading optimized " "firmware\n"); } sc->fw_name = mxge_fw_unaligned; sc->tx_boundary = 2048; return 0; } /* clear confirmation addr */ confirm = (volatile uint32_t *)sc->cmd; *confirm = 0; wmb(); /* send a reload command to the bootstrap MCP, and wait for the response in the confirmation address. The firmware should write a -1 there to indicate it is alive and well */ dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf[0] = htobe32(dma_high); /* confirm addr MSW */ buf[1] = htobe32(dma_low); /* confirm addr LSW */ buf[2] = htobe32(0xffffffff); /* confirm data */ /* FIX: All newest firmware should un-protect the bottom of the sram before handoff. However, the very first interfaces do not. Therefore the handoff copy must skip the first 8 bytes */ /* where the code starts*/ buf[3] = htobe32(MXGE_FW_OFFSET + 8); buf[4] = htobe32(size - 8); /* length of code */ buf[5] = htobe32(8); /* where to copy to */ buf[6] = htobe32(0); /* where to jump to */ submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF); mxge_pio_copy(submit, buf, 64); wmb(); DELAY(1000); wmb(); i = 0; while (*confirm != 0xffffffff && i < 20) { DELAY(1000*10); i++; bus_dmamap_sync(sc->cmd_dma.dmat, sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); } if (*confirm != 0xffffffff) { device_printf(sc->dev,"handoff failed (%p = 0x%x)", confirm, *confirm); return ENXIO; } return 0; } static int mxge_update_mac_address(mxge_softc_t *sc) { mxge_cmd_t cmd; uint8_t *addr = sc->mac_addr; int status; cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); cmd.data1 = ((addr[4] << 8) | (addr[5])); status = mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd); return status; } static int mxge_change_pause(mxge_softc_t *sc, int pause) { mxge_cmd_t cmd; int status; if (pause) status = mxge_send_cmd(sc, MXGEFW_ENABLE_FLOW_CONTROL, &cmd); else status = mxge_send_cmd(sc, MXGEFW_DISABLE_FLOW_CONTROL, &cmd); if (status) { device_printf(sc->dev, "Failed to set flow control mode\n"); return ENXIO; } sc->pause = pause; return 0; } static void mxge_change_promisc(mxge_softc_t *sc, int promisc) { mxge_cmd_t cmd; int status; if (mxge_always_promisc) promisc = 1; if (promisc) status = mxge_send_cmd(sc, MXGEFW_ENABLE_PROMISC, &cmd); else status = mxge_send_cmd(sc, MXGEFW_DISABLE_PROMISC, &cmd); if (status) { device_printf(sc->dev, "Failed to set promisc mode\n"); } } static void mxge_set_multicast_list(mxge_softc_t *sc) { mxge_cmd_t cmd; struct ifmultiaddr *ifma; struct ifnet *ifp = sc->ifp; int err; /* This firmware is known to not support multicast */ if (!sc->fw_multicast_support) return; /* Disable multicast filtering while we play with the lists*/ err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_ENABLE_ALLMULTI," " error status: %d\n", err); return; } if (sc->adopted_rx_filter_bug) return; if (ifp->if_flags & IFF_ALLMULTI) /* request to disable multicast filtering, so quit here */ return; /* Flush all the filters */ err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS" ", error status: %d\n", err); return; } /* Walk the multicast list, and add each address */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &cmd.data0, 4); bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr) + 4, &cmd.data1, 2); cmd.data0 = htonl(cmd.data0); cmd.data1 = htonl(cmd.data1); err = mxge_send_cmd(sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd); if (err != 0) { device_printf(sc->dev, "Failed " "MXGEFW_JOIN_MULTICAST_GROUP, error status:" "%d\t", err); /* abort, leaving multicast filtering off */ if_maddr_runlock(ifp); return; } } if_maddr_runlock(ifp); /* Enable multicast filtering */ err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_DISABLE_ALLMULTI" ", error status: %d\n", err); } } static int mxge_max_mtu(mxge_softc_t *sc) { mxge_cmd_t cmd; int status; if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU) return MXGEFW_MAX_MTU - MXGEFW_PAD; /* try to set nbufs to see if it we can use virtually contiguous jumbos */ cmd.data0 = 0; status = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, &cmd); if (status == 0) return MXGEFW_MAX_MTU - MXGEFW_PAD; /* otherwise, we're limited to MJUMPAGESIZE */ return MJUMPAGESIZE - MXGEFW_PAD; } static int mxge_reset(mxge_softc_t *sc, int interrupts_setup) { struct mxge_slice_state *ss; mxge_rx_done_t *rx_done; volatile uint32_t *irq_claim; mxge_cmd_t cmd; int slice, status; /* try to send a reset command to the card to see if it is alive */ memset(&cmd, 0, sizeof (cmd)); status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); if (status != 0) { device_printf(sc->dev, "failed reset\n"); return ENXIO; } mxge_dummy_rdma(sc, 1); /* set the intrq size */ cmd.data0 = sc->rx_ring_size; status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); /* * Even though we already know how many slices are supported * via mxge_slice_probe(), MXGEFW_CMD_GET_MAX_RSS_QUEUES * has magic side effects, and must be called after a reset. * It must be called prior to calling any RSS related cmds, * including assigning an interrupt queue for anything but * slice 0. It must also be called *after* * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by * the firmware to compute offsets. */ if (sc->num_slices > 1) { /* ask the maximum number of slices it supports */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed to get number of slices\n"); return status; } /* * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior * to setting up the interrupt queue DMA */ cmd.data0 = sc->num_slices; cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; #ifdef IFNET_BUF_RING cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; #endif status = mxge_send_cmd(sc, MXGEFW_CMD_ENABLE_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed to set number of slices\n"); return status; } } if (interrupts_setup) { /* Now exchange information about interrupts */ for (slice = 0; slice < sc->num_slices; slice++) { rx_done = &sc->ss[slice].rx_done; memset(rx_done->entry, 0, sc->rx_ring_size); cmd.data0 = MXGE_LOWPART_TO_U32(rx_done->dma.bus_addr); cmd.data1 = MXGE_HIGHPART_TO_U32(rx_done->dma.bus_addr); cmd.data2 = slice; status |= mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_DMA, &cmd); } } status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd); sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0); status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd); irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0); status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd); sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0); if (status != 0) { device_printf(sc->dev, "failed set interrupt parameters\n"); return status; } *sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay); /* run a DMA benchmark */ (void) mxge_dma_test(sc, MXGEFW_DMA_TEST); for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->irq_claim = irq_claim + (2 * slice); /* reset mcp/driver shared state back to 0 */ ss->rx_done.idx = 0; ss->rx_done.cnt = 0; ss->tx.req = 0; ss->tx.done = 0; ss->tx.pkt_done = 0; ss->tx.queue_active = 0; ss->tx.activate = 0; ss->tx.deactivate = 0; ss->tx.wake = 0; ss->tx.defrag = 0; ss->tx.stall = 0; ss->rx_big.cnt = 0; ss->rx_small.cnt = 0; ss->lc.lro_bad_csum = 0; ss->lc.lro_queued = 0; ss->lc.lro_flushed = 0; if (ss->fw_stats != NULL) { bzero(ss->fw_stats, sizeof *ss->fw_stats); } } sc->rdma_tags_available = 15; status = mxge_update_mac_address(sc); mxge_change_promisc(sc, sc->ifp->if_flags & IFF_PROMISC); mxge_change_pause(sc, sc->pause); mxge_set_multicast_list(sc); if (sc->throttle) { cmd.data0 = sc->throttle; if (mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd)) { device_printf(sc->dev, "can't enable throttle\n"); } } return status; } static int mxge_change_throttle(SYSCTL_HANDLER_ARGS) { mxge_cmd_t cmd; mxge_softc_t *sc; int err; unsigned int throttle; sc = arg1; throttle = sc->throttle; err = sysctl_handle_int(oidp, &throttle, arg2, req); if (err != 0) { return err; } if (throttle == sc->throttle) return 0; if (throttle < MXGE_MIN_THROTTLE || throttle > MXGE_MAX_THROTTLE) return EINVAL; mtx_lock(&sc->driver_mtx); cmd.data0 = throttle; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd); if (err == 0) sc->throttle = throttle; mtx_unlock(&sc->driver_mtx); return err; } static int mxge_change_intr_coal(SYSCTL_HANDLER_ARGS) { mxge_softc_t *sc; unsigned int intr_coal_delay; int err; sc = arg1; intr_coal_delay = sc->intr_coal_delay; err = sysctl_handle_int(oidp, &intr_coal_delay, arg2, req); if (err != 0) { return err; } if (intr_coal_delay == sc->intr_coal_delay) return 0; if (intr_coal_delay == 0 || intr_coal_delay > 1000*1000) return EINVAL; mtx_lock(&sc->driver_mtx); *sc->intr_coal_delay_ptr = htobe32(intr_coal_delay); sc->intr_coal_delay = intr_coal_delay; mtx_unlock(&sc->driver_mtx); return err; } static int mxge_change_flow_control(SYSCTL_HANDLER_ARGS) { mxge_softc_t *sc; unsigned int enabled; int err; sc = arg1; enabled = sc->pause; err = sysctl_handle_int(oidp, &enabled, arg2, req); if (err != 0) { return err; } if (enabled == sc->pause) return 0; mtx_lock(&sc->driver_mtx); err = mxge_change_pause(sc, enabled); mtx_unlock(&sc->driver_mtx); return err; } static int mxge_handle_be32(SYSCTL_HANDLER_ARGS) { int err; if (arg1 == NULL) return EFAULT; arg2 = be32toh(*(int *)arg1); arg1 = NULL; err = sysctl_handle_int(oidp, arg1, arg2, req); return err; } static void mxge_rem_sysctls(mxge_softc_t *sc) { struct mxge_slice_state *ss; int slice; if (sc->slice_sysctl_tree == NULL) return; for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; if (ss == NULL || ss->sysctl_tree == NULL) continue; sysctl_ctx_free(&ss->sysctl_ctx); ss->sysctl_tree = NULL; } sysctl_ctx_free(&sc->slice_sysctl_ctx); sc->slice_sysctl_tree = NULL; } static void mxge_add_sysctls(mxge_softc_t *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; mcp_irq_data_t *fw; struct mxge_slice_state *ss; int slice; char slice_num[8]; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); fw = sc->ss[0].fw_stats; /* random information */ SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "serial_number", CTLFLAG_RD, sc->serial_number_string, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "product_code", CTLFLAG_RD, sc->product_code_string, 0, "product_code"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcie_link_width", CTLFLAG_RD, &sc->link_width, 0, "tx_boundary"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_boundary", CTLFLAG_RD, &sc->tx_boundary, 0, "tx_boundary"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_combine", CTLFLAG_RD, &sc->wc, 0, "write combining PIO?"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_dma_MBs", CTLFLAG_RD, &sc->read_dma, 0, "DMA Read speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_dma_MBs", CTLFLAG_RD, &sc->write_dma, 0, "DMA Write speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_write_dma_MBs", CTLFLAG_RD, &sc->read_write_dma, 0, "DMA concurrent Read/Write speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "watchdog_resets", CTLFLAG_RD, &sc->watchdog_resets, 0, "Number of times NIC was reset"); /* performance related tunables */ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_delay", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_intr_coal, "I", "interrupt coalescing delay in usecs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "throttle", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_throttle, "I", "transmit throttling"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "flow_control_enabled", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_flow_control, "I", "interrupt coalescing delay in usecs"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "deassert_wait", CTLFLAG_RW, &mxge_deassert_wait, 0, "Wait for IRQ line to go low in ihandler"); /* stats block from firmware is in network byte order. Need to swap it */ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_up", CTLTYPE_INT|CTLFLAG_RD, &fw->link_up, 0, mxge_handle_be32, "I", "link up"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_tags_available", CTLTYPE_INT|CTLFLAG_RD, &fw->rdma_tags_available, 0, mxge_handle_be32, "I", "rdma_tags_available"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_crc32", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_crc32, 0, mxge_handle_be32, "I", "dropped_bad_crc32"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_phy", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_phy, 0, mxge_handle_be32, "I", "dropped_bad_phy"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_error_or_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_error_or_filtered, 0, mxge_handle_be32, "I", "dropped_link_error_or_filtered"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_overflow", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_overflow, 0, mxge_handle_be32, "I", "dropped_link_overflow"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_multicast_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_multicast_filtered, 0, mxge_handle_be32, "I", "dropped_multicast_filtered"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_big_buffer", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_big_buffer, 0, mxge_handle_be32, "I", "dropped_no_big_buffer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_small_buffer", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_small_buffer, 0, mxge_handle_be32, "I", "dropped_no_small_buffer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_overrun", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_overrun, 0, mxge_handle_be32, "I", "dropped_overrun"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_pause", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_pause, 0, mxge_handle_be32, "I", "dropped_pause"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_runt", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_runt, 0, mxge_handle_be32, "I", "dropped_runt"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_unicast_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_unicast_filtered, 0, mxge_handle_be32, "I", "dropped_unicast_filtered"); /* verbose printing? */ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "verbose", CTLFLAG_RW, &mxge_verbose, 0, "verbose printing"); /* add counters exported for debugging from all slices */ sysctl_ctx_init(&sc->slice_sysctl_ctx); sc->slice_sysctl_tree = SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, children, OID_AUTO, "slice", CTLFLAG_RD, 0, ""); for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; sysctl_ctx_init(&ss->sysctl_ctx); ctx = &ss->sysctl_ctx; children = SYSCTL_CHILDREN(sc->slice_sysctl_tree); sprintf(slice_num, "%d", slice); ss->sysctl_tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, slice_num, CTLFLAG_RD, 0, ""); children = SYSCTL_CHILDREN(ss->sysctl_tree); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_small_cnt", CTLFLAG_RD, &ss->rx_small.cnt, 0, "rx_small_cnt"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_big_cnt", CTLFLAG_RD, &ss->rx_big.cnt, 0, "rx_small_cnt"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, &ss->lc.lro_flushed, 0, "number of lro merge queues flushed"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_bad_csum", CTLFLAG_RD, &ss->lc.lro_bad_csum, 0, "number of bad csums preventing LRO"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, &ss->lc.lro_queued, 0, "number of frames appended to lro merge" "queues"); #ifndef IFNET_BUF_RING /* only transmit from slice 0 for now */ if (slice > 0) continue; #endif SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_req", CTLFLAG_RD, &ss->tx.req, 0, "tx_req"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_done", CTLFLAG_RD, &ss->tx.done, 0, "tx_done"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pkt_done", CTLFLAG_RD, &ss->tx.pkt_done, 0, "tx_done"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_stall", CTLFLAG_RD, &ss->tx.stall, 0, "tx_stall"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wake", CTLFLAG_RD, &ss->tx.wake, 0, "tx_wake"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_defrag", CTLFLAG_RD, &ss->tx.defrag, 0, "tx_defrag"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_queue_active", CTLFLAG_RD, &ss->tx.queue_active, 0, "tx_queue_active"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_activate", CTLFLAG_RD, &ss->tx.activate, 0, "tx_activate"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_deactivate", CTLFLAG_RD, &ss->tx.deactivate, 0, "tx_deactivate"); } } /* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy backwards one at a time and handle ring wraps */ static inline void mxge_submit_req_backwards(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src, int cnt) { int idx, starting_slot; starting_slot = tx->req; while (cnt > 1) { cnt--; idx = (starting_slot + cnt) & tx->mask; mxge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); wmb(); } } /* * copy an array of mcp_kreq_ether_send_t's to the mcp. Copy * at most 32 bytes at a time, so as to avoid involving the software * pio handler in the nic. We re-write the first segment's flags * to mark them valid only after writing the entire chain */ static inline void mxge_submit_req(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src, int cnt) { int idx, i; uint32_t *src_ints; volatile uint32_t *dst_ints; mcp_kreq_ether_send_t *srcp; volatile mcp_kreq_ether_send_t *dstp, *dst; uint8_t last_flags; idx = tx->req & tx->mask; last_flags = src->flags; src->flags = 0; wmb(); dst = dstp = &tx->lanai[idx]; srcp = src; if ((idx + cnt) < tx->mask) { for (i = 0; i < (cnt - 1); i += 2) { mxge_pio_copy(dstp, srcp, 2 * sizeof(*src)); wmb(); /* force write every 32 bytes */ srcp += 2; dstp += 2; } } else { /* submit all but the first request, and ensure that it is submitted below */ mxge_submit_req_backwards(tx, src, cnt); i = 0; } if (i < cnt) { /* submit the first request */ mxge_pio_copy(dstp, srcp, sizeof(*src)); wmb(); /* barrier before setting valid flag */ } /* re-write the last 32-bits with the valid flags */ src->flags = last_flags; src_ints = (uint32_t *)src; src_ints+=3; dst_ints = (volatile uint32_t *)dst; dst_ints+=3; *dst_ints = *src_ints; tx->req += cnt; wmb(); } static int mxge_parse_tx(struct mxge_slice_state *ss, struct mbuf *m, struct mxge_pkt_info *pi) { struct ether_vlan_header *eh; uint16_t etype; int tso = m->m_pkthdr.csum_flags & (CSUM_TSO); #if IFCAP_TSO6 && defined(INET6) int nxt; #endif eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); pi->ip_off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); pi->ip_off = ETHER_HDR_LEN; } switch (etype) { case ETHERTYPE_IP: /* * ensure ip header is in first mbuf, copy it to a * scratch buffer if not */ pi->ip = (struct ip *)(m->m_data + pi->ip_off); pi->ip6 = NULL; if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip))) { m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip), ss->scratch); pi->ip = (struct ip *)(ss->scratch + pi->ip_off); } pi->ip_hlen = pi->ip->ip_hl << 2; if (!tso) return 0; if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr))) { m_copydata(m, 0, pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr), ss->scratch); pi->ip = (struct ip *)(ss->scratch + pi->ip_off); } pi->tcp = (struct tcphdr *)((char *)pi->ip + pi->ip_hlen); break; #if IFCAP_TSO6 && defined(INET6) case ETHERTYPE_IPV6: pi->ip6 = (struct ip6_hdr *)(m->m_data + pi->ip_off); if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip6))) { m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip6), ss->scratch); pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); } nxt = 0; pi->ip_hlen = ip6_lasthdr(m, pi->ip_off, IPPROTO_IPV6, &nxt); pi->ip_hlen -= pi->ip_off; if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) return EINVAL; if (!tso) return 0; if (pi->ip_off + pi->ip_hlen > ss->sc->max_tso6_hlen) return EINVAL; if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr))) { m_copydata(m, 0, pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr), ss->scratch); pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); } pi->tcp = (struct tcphdr *)((char *)pi->ip6 + pi->ip_hlen); break; #endif default: return EINVAL; } return 0; } #if IFCAP_TSO4 static void mxge_encap_tso(struct mxge_slice_state *ss, struct mbuf *m, int busdma_seg_cnt, struct mxge_pkt_info *pi) { mxge_tx_ring_t *tx; mcp_kreq_ether_send_t *req; bus_dma_segment_t *seg; uint32_t low, high_swapped; int len, seglen, cum_len, cum_len_next; int next_is_first, chop, cnt, rdma_count, small; uint16_t pseudo_hdr_offset, cksum_offset, mss, sum; uint8_t flags, flags_next; static int once; mss = m->m_pkthdr.tso_segsz; /* negative cum_len signifies to the * send loop that we are still in the * header portion of the TSO packet. */ cksum_offset = pi->ip_off + pi->ip_hlen; cum_len = -(cksum_offset + (pi->tcp->th_off << 2)); /* TSO implies checksum offload on this hardware */ if (__predict_false((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) == 0)) { /* * If packet has full TCP csum, replace it with pseudo hdr * sum that the NIC expects, otherwise the NIC will emit * packets with bad TCP checksums. */ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); if (pi->ip6) { #if (CSUM_TCP_IPV6 != 0) && defined(INET6) m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; sum = in6_cksum_pseudo(pi->ip6, m->m_pkthdr.len - cksum_offset, IPPROTO_TCP, 0); #endif } else { #ifdef INET m->m_pkthdr.csum_flags |= CSUM_TCP; sum = in_pseudo(pi->ip->ip_src.s_addr, pi->ip->ip_dst.s_addr, htons(IPPROTO_TCP + (m->m_pkthdr.len - cksum_offset))); #endif } m_copyback(m, offsetof(struct tcphdr, th_sum) + cksum_offset, sizeof(sum), (caddr_t)&sum); } flags = MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST; /* for TSO, pseudo_hdr_offset holds mss. * The firmware figures out where to put * the checksum by parsing the header. */ pseudo_hdr_offset = htobe16(mss); if (pi->ip6) { /* * for IPv6 TSO, the "checksum offset" is re-purposed * to store the TCP header len */ cksum_offset = (pi->tcp->th_off << 2); } tx = &ss->tx; req = tx->req_list; seg = tx->seg_list; cnt = 0; rdma_count = 0; /* "rdma_count" is the number of RDMAs belonging to the * current packet BEFORE the current send request. For * non-TSO packets, this is equal to "count". * For TSO packets, rdma_count needs to be reset * to 0 after a segment cut. * * The rdma_count field of the send request is * the number of RDMAs of the packet starting at * that request. For TSO send requests with one ore more cuts * in the middle, this is the number of RDMAs starting * after the last cut in the request. All previous * segments before the last cut implicitly have 1 RDMA. * * Since the number of RDMAs is not known beforehand, * it must be filled-in retroactively - after each * segmentation cut or at the end of the entire packet. */ while (busdma_seg_cnt) { /* Break the busdma segment up into pieces*/ low = MXGE_LOWPART_TO_U32(seg->ds_addr); high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); len = seg->ds_len; while (len) { flags_next = flags & ~MXGEFW_FLAGS_FIRST; seglen = len; cum_len_next = cum_len + seglen; (req-rdma_count)->rdma_count = rdma_count + 1; if (__predict_true(cum_len >= 0)) { /* payload */ chop = (cum_len_next > mss); cum_len_next = cum_len_next % mss; next_is_first = (cum_len_next == 0); flags |= chop * MXGEFW_FLAGS_TSO_CHOP; flags_next |= next_is_first * MXGEFW_FLAGS_FIRST; rdma_count |= -(chop | next_is_first); rdma_count += chop & !next_is_first; } else if (cum_len_next >= 0) { /* header ends */ rdma_count = -1; cum_len_next = 0; seglen = -cum_len; small = (mss <= MXGEFW_SEND_SMALL_SIZE); flags_next = MXGEFW_FLAGS_TSO_PLD | MXGEFW_FLAGS_FIRST | (small * MXGEFW_FLAGS_SMALL); } req->addr_high = high_swapped; req->addr_low = htobe32(low); req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; req->rdma_count = 1; req->length = htobe16(seglen); req->cksum_offset = cksum_offset; req->flags = flags | ((cum_len & 1) * MXGEFW_FLAGS_ALIGN_ODD); low += seglen; len -= seglen; cum_len = cum_len_next; flags = flags_next; req++; cnt++; rdma_count++; if (cksum_offset != 0 && !pi->ip6) { if (__predict_false(cksum_offset > seglen)) cksum_offset -= seglen; else cksum_offset = 0; } if (__predict_false(cnt > tx->max_desc)) goto drop; } busdma_seg_cnt--; seg++; } (req-rdma_count)->rdma_count = rdma_count; do { req--; req->flags |= MXGEFW_FLAGS_TSO_LAST; } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST))); tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; mxge_submit_req(tx, tx->req_list, cnt); #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { /* tell the NIC to start polling this slice */ *tx->send_go = 1; tx->queue_active = 1; tx->activate++; wmb(); } #endif return; drop: bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map); m_freem(m); ss->oerrors++; if (!once) { printf("tx->max_desc exceeded via TSO!\n"); printf("mss = %d, %ld, %d!\n", mss, (long)seg - (long)tx->seg_list, tx->max_desc); once = 1; } return; } #endif /* IFCAP_TSO4 */ #ifdef MXGE_NEW_VLAN_API /* * We reproduce the software vlan tag insertion from * net/if_vlan.c:vlan_start() here so that we can advertise "hardware" * vlan tag insertion. We need to advertise this in order to have the * vlan interface respect our csum offload flags. */ static struct mbuf * mxge_vlan_tag_insert(struct mbuf *m) { struct ether_vlan_header *evl; M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT); if (__predict_false(m == NULL)) return NULL; if (m->m_len < sizeof(*evl)) { m = m_pullup(m, sizeof(*evl)); if (__predict_false(m == NULL)) return NULL; } /* * Transform the Ethernet header into an Ethernet header * with 802.1Q encapsulation. */ evl = mtod(m, struct ether_vlan_header *); bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN, (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); evl->evl_encap_proto = htons(ETHERTYPE_VLAN); evl->evl_tag = htons(m->m_pkthdr.ether_vtag); m->m_flags &= ~M_VLANTAG; return m; } #endif /* MXGE_NEW_VLAN_API */ static void mxge_encap(struct mxge_slice_state *ss, struct mbuf *m) { struct mxge_pkt_info pi = {0,0,0,0}; mxge_softc_t *sc; mcp_kreq_ether_send_t *req; bus_dma_segment_t *seg; struct mbuf *m_tmp; struct ifnet *ifp; mxge_tx_ring_t *tx; int cnt, cum_len, err, i, idx, odd_flag; uint16_t pseudo_hdr_offset; uint8_t flags, cksum_offset; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; #ifdef MXGE_NEW_VLAN_API if (m->m_flags & M_VLANTAG) { m = mxge_vlan_tag_insert(m); if (__predict_false(m == NULL)) goto drop_without_m; } #endif if (m->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { if (mxge_parse_tx(ss, m, &pi)) goto drop; } /* (try to) map the frame for DMA */ idx = tx->req & tx->mask; err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map, m, tx->seg_list, &cnt, BUS_DMA_NOWAIT); if (__predict_false(err == EFBIG)) { /* Too many segments in the chain. Try to defrag */ m_tmp = m_defrag(m, M_NOWAIT); if (m_tmp == NULL) { goto drop; } ss->tx.defrag++; m = m_tmp; err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map, m, tx->seg_list, &cnt, BUS_DMA_NOWAIT); } if (__predict_false(err != 0)) { device_printf(sc->dev, "bus_dmamap_load_mbuf_sg returned %d" " packet len = %d\n", err, m->m_pkthdr.len); goto drop; } bus_dmamap_sync(tx->dmat, tx->info[idx].map, BUS_DMASYNC_PREWRITE); tx->info[idx].m = m; #if IFCAP_TSO4 /* TSO is different enough, we handle it in another routine */ if (m->m_pkthdr.csum_flags & (CSUM_TSO)) { mxge_encap_tso(ss, m, cnt, &pi); return; } #endif req = tx->req_list; cksum_offset = 0; pseudo_hdr_offset = 0; flags = MXGEFW_FLAGS_NO_TSO; /* checksum offloading? */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { /* ensure ip header is in first mbuf, copy it to a scratch buffer if not */ cksum_offset = pi.ip_off + pi.ip_hlen; pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data; pseudo_hdr_offset = htobe16(pseudo_hdr_offset); req->cksum_offset = cksum_offset; flags |= MXGEFW_FLAGS_CKSUM; odd_flag = MXGEFW_FLAGS_ALIGN_ODD; } else { odd_flag = 0; } if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE) flags |= MXGEFW_FLAGS_SMALL; /* convert segments into a request list */ cum_len = 0; seg = tx->seg_list; req->flags = MXGEFW_FLAGS_FIRST; for (i = 0; i < cnt; i++) { req->addr_low = htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); req->length = htobe16(seg->ds_len); req->cksum_offset = cksum_offset; if (cksum_offset > seg->ds_len) cksum_offset -= seg->ds_len; else cksum_offset = 0; req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; /* complete solid 16-byte block */ req->rdma_count = 1; req->flags |= flags | ((cum_len & 1) * odd_flag); cum_len += seg->ds_len; seg++; req++; req->flags = 0; } req--; /* pad runts to 60 bytes */ if (cum_len < 60) { req++; req->addr_low = htobe32(MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr)); req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr)); req->length = htobe16(60 - cum_len); req->cksum_offset = 0; req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; /* complete solid 16-byte block */ req->rdma_count = 1; req->flags |= flags | ((cum_len & 1) * odd_flag); cnt++; } tx->req_list[0].rdma_count = cnt; #if 0 /* print what the firmware will see */ for (i = 0; i < cnt; i++) { printf("%d: addr: 0x%x 0x%x len:%d pso%d," "cso:%d, flags:0x%x, rdma:%d\n", i, (int)ntohl(tx->req_list[i].addr_high), (int)ntohl(tx->req_list[i].addr_low), (int)ntohs(tx->req_list[i].length), (int)ntohs(tx->req_list[i].pseudo_hdr_offset), tx->req_list[i].cksum_offset, tx->req_list[i].flags, tx->req_list[i].rdma_count); } printf("--------------\n"); #endif tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; mxge_submit_req(tx, tx->req_list, cnt); #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { /* tell the NIC to start polling this slice */ *tx->send_go = 1; tx->queue_active = 1; tx->activate++; wmb(); } #endif return; drop: m_freem(m); drop_without_m: ss->oerrors++; return; } #ifdef IFNET_BUF_RING static void mxge_qflush(struct ifnet *ifp) { mxge_softc_t *sc = ifp->if_softc; mxge_tx_ring_t *tx; struct mbuf *m; int slice; for (slice = 0; slice < sc->num_slices; slice++) { tx = &sc->ss[slice].tx; mtx_lock(&tx->mtx); while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) m_freem(m); mtx_unlock(&tx->mtx); } if_qflush(ifp); } static inline void mxge_start_locked(struct mxge_slice_state *ss) { mxge_softc_t *sc; struct mbuf *m; struct ifnet *ifp; mxge_tx_ring_t *tx; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) { m = drbr_dequeue(ifp, tx->br); if (m == NULL) { return; } /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } /* ran out of transmit slots */ if (((ss->if_drv_flags & IFF_DRV_OACTIVE) == 0) && (!drbr_empty(ifp, tx->br))) { ss->if_drv_flags |= IFF_DRV_OACTIVE; tx->stall++; } } static int mxge_transmit_locked(struct mxge_slice_state *ss, struct mbuf *m) { mxge_softc_t *sc; struct ifnet *ifp; mxge_tx_ring_t *tx; int err; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; if ((ss->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { err = drbr_enqueue(ifp, tx->br, m); return (err); } if (!drbr_needs_enqueue(ifp, tx->br) && ((tx->mask - (tx->req - tx->done)) > tx->max_desc)) { /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } else if ((err = drbr_enqueue(ifp, tx->br, m)) != 0) { return (err); } if (!drbr_empty(ifp, tx->br)) mxge_start_locked(ss); return (0); } static int mxge_transmit(struct ifnet *ifp, struct mbuf *m) { mxge_softc_t *sc = ifp->if_softc; struct mxge_slice_state *ss; mxge_tx_ring_t *tx; int err = 0; int slice; slice = m->m_pkthdr.flowid; slice &= (sc->num_slices - 1); /* num_slices always power of 2 */ ss = &sc->ss[slice]; tx = &ss->tx; if (mtx_trylock(&tx->mtx)) { err = mxge_transmit_locked(ss, m); mtx_unlock(&tx->mtx); } else { err = drbr_enqueue(ifp, tx->br, m); } return (err); } #else static inline void mxge_start_locked(struct mxge_slice_state *ss) { mxge_softc_t *sc; struct mbuf *m; struct ifnet *ifp; mxge_tx_ring_t *tx; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { return; } /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } /* ran out of transmit slots */ if ((sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; tx->stall++; } } #endif static void mxge_start(struct ifnet *ifp) { mxge_softc_t *sc = ifp->if_softc; struct mxge_slice_state *ss; /* only use the first slice for now */ ss = &sc->ss[0]; mtx_lock(&ss->tx.mtx); mxge_start_locked(ss); mtx_unlock(&ss->tx.mtx); } /* * copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy * at most 32 bytes at a time, so as to avoid involving the software * pio handler in the nic. We re-write the first segment's low * DMA address to mark it valid only after we write the entire chunk * in a burst */ static inline void mxge_submit_8rx(volatile mcp_kreq_ether_recv_t *dst, mcp_kreq_ether_recv_t *src) { uint32_t low; low = src->addr_low; src->addr_low = 0xffffffff; mxge_pio_copy(dst, src, 4 * sizeof (*src)); wmb(); mxge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src)); wmb(); src->addr_low = low; dst->addr_low = low; wmb(); } static int mxge_get_buf_small(struct mxge_slice_state *ss, bus_dmamap_t map, int idx) { bus_dma_segment_t seg; struct mbuf *m; mxge_rx_ring_t *rx = &ss->rx_small; int cnt, err; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { rx->alloc_fail++; err = ENOBUFS; goto done; } m->m_len = MHLEN; err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, &seg, &cnt, BUS_DMA_NOWAIT); if (err != 0) { m_free(m); goto done; } rx->info[idx].m = m; rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr)); rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr)); done: if ((idx & 7) == 7) mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); return err; } static int mxge_get_buf_big(struct mxge_slice_state *ss, bus_dmamap_t map, int idx) { bus_dma_segment_t seg[3]; struct mbuf *m; mxge_rx_ring_t *rx = &ss->rx_big; int cnt, err, i; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size); if (m == NULL) { rx->alloc_fail++; err = ENOBUFS; goto done; } m->m_len = rx->mlen; err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, seg, &cnt, BUS_DMA_NOWAIT); if (err != 0) { m_free(m); goto done; } rx->info[idx].m = m; rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); #if MXGE_VIRT_JUMBOS for (i = 1; i < cnt; i++) { rx->shadow[idx + i].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg[i].ds_addr)); rx->shadow[idx + i].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg[i].ds_addr)); } #endif done: for (i = 0; i < rx->nbufs; i++) { if ((idx & 7) == 7) { mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); } idx++; } return err; } #ifdef INET6 static uint16_t mxge_csum_generic(uint16_t *raw, int len) { uint32_t csum; csum = 0; while (len > 0) { csum += *raw; raw++; len -= 2; } csum = (csum >> 16) + (csum & 0xffff); csum = (csum >> 16) + (csum & 0xffff); return (uint16_t)csum; } static inline uint16_t mxge_rx_csum6(void *p, struct mbuf *m, uint32_t csum) { uint32_t partial; int nxt, cksum_offset; struct ip6_hdr *ip6 = p; uint16_t c; nxt = ip6->ip6_nxt; cksum_offset = sizeof (*ip6) + ETHER_HDR_LEN; if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) { cksum_offset = ip6_lasthdr(m, ETHER_HDR_LEN, IPPROTO_IPV6, &nxt); if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) return (1); } /* * IPv6 headers do not contain a checksum, and hence * do not checksum to zero, so they don't "fall out" * of the partial checksum calculation like IPv4 * headers do. We need to fix the partial checksum by * subtracting the checksum of the IPv6 header. */ partial = mxge_csum_generic((uint16_t *)ip6, cksum_offset - ETHER_HDR_LEN); csum += ~partial; csum += (csum < ~partial); csum = (csum >> 16) + (csum & 0xFFFF); csum = (csum >> 16) + (csum & 0xFFFF); c = in6_cksum_pseudo(ip6, m->m_pkthdr.len - cksum_offset, nxt, csum); c ^= 0xffff; return (c); } #endif /* INET6 */ /* * Myri10GE hardware checksums are not valid if the sender * padded the frame with non-zero padding. This is because * the firmware just does a simple 16-bit 1s complement * checksum across the entire frame, excluding the first 14 * bytes. It is best to simply to check the checksum and * tell the stack about it only if the checksum is good */ static inline uint16_t mxge_rx_csum(struct mbuf *m, int csum) { struct ether_header *eh; #ifdef INET struct ip *ip; #endif #if defined(INET) || defined(INET6) int cap = m->m_pkthdr.rcvif->if_capenable; #endif uint16_t c, etype; eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); switch (etype) { #ifdef INET case ETHERTYPE_IP: if ((cap & IFCAP_RXCSUM) == 0) return (1); ip = (struct ip *)(eh + 1); if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP) return (1); c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl(ntohs(csum) + ntohs(ip->ip_len) - (ip->ip_hl << 2) + ip->ip_p)); c ^= 0xffff; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: if ((cap & IFCAP_RXCSUM_IPV6) == 0) return (1); c = mxge_rx_csum6((eh + 1), m, csum); break; #endif default: c = 1; } return (c); } static void mxge_vlan_tag_remove(struct mbuf *m, uint32_t *csum) { struct ether_vlan_header *evl; struct ether_header *eh; uint32_t partial; evl = mtod(m, struct ether_vlan_header *); eh = mtod(m, struct ether_header *); /* * fix checksum by subtracting ETHER_VLAN_ENCAP_LEN bytes * after what the firmware thought was the end of the ethernet * header. */ /* put checksum into host byte order */ *csum = ntohs(*csum); partial = ntohl(*(uint32_t *)(mtod(m, char *) + ETHER_HDR_LEN)); (*csum) += ~partial; (*csum) += ((*csum) < ~partial); (*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF); (*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF); /* restore checksum to network byte order; later consumers expect this */ *csum = htons(*csum); /* save the tag */ #ifdef MXGE_NEW_VLAN_API m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); #else { struct m_tag *mtag; mtag = m_tag_alloc(MTAG_VLAN, MTAG_VLAN_TAG, sizeof(u_int), M_NOWAIT); if (mtag == NULL) return; VLAN_TAG_VALUE(mtag) = ntohs(evl->evl_tag); m_tag_prepend(m, mtag); } #endif m->m_flags |= M_VLANTAG; /* * Remove the 802.1q header by copying the Ethernet * addresses over it and adjusting the beginning of * the data in the mbuf. The encapsulated Ethernet * type field is already in place. */ bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static inline void mxge_rx_done_big(struct mxge_slice_state *ss, uint32_t len, uint32_t csum, int lro) { mxge_softc_t *sc; struct ifnet *ifp; struct mbuf *m; struct ether_header *eh; mxge_rx_ring_t *rx; bus_dmamap_t old_map; int idx; sc = ss->sc; ifp = sc->ifp; rx = &ss->rx_big; idx = rx->cnt & rx->mask; rx->cnt += rx->nbufs; /* save a pointer to the received mbuf */ m = rx->info[idx].m; /* try to replace the received mbuf */ if (mxge_get_buf_big(ss, rx->extra_map, idx)) { /* drop the frame -- the old mbuf is re-cycled */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* unmap the received buffer */ old_map = rx->info[idx].map; bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx->dmat, old_map); /* swap the bus_dmamap_t's */ rx->info[idx].map = rx->extra_map; rx->extra_map = old_map; /* mcp implicitly skips 1st 2 bytes so that packet is properly * aligned */ m->m_data += MXGEFW_PAD; m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len; ss->ipackets++; eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { mxge_vlan_tag_remove(m, &csum); } /* if the checksum is valid, mark it in the mbuf header */ if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) && (0 == mxge_rx_csum(m, csum))) { /* Tell the stack that the checksum is good */ m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | CSUM_DATA_VALID; #if defined(INET) || defined (INET6) if (lro && (0 == tcp_lro_rx(&ss->lc, m, 0))) return; #endif } /* flowid only valid if RSS hashing is enabled */ if (sc->num_slices > 1) { m->m_pkthdr.flowid = (ss - sc->ss); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); } /* pass the frame up the stack */ (*ifp->if_input)(ifp, m); } static inline void mxge_rx_done_small(struct mxge_slice_state *ss, uint32_t len, uint32_t csum, int lro) { mxge_softc_t *sc; struct ifnet *ifp; struct ether_header *eh; struct mbuf *m; mxge_rx_ring_t *rx; bus_dmamap_t old_map; int idx; sc = ss->sc; ifp = sc->ifp; rx = &ss->rx_small; idx = rx->cnt & rx->mask; rx->cnt++; /* save a pointer to the received mbuf */ m = rx->info[idx].m; /* try to replace the received mbuf */ if (mxge_get_buf_small(ss, rx->extra_map, idx)) { /* drop the frame -- the old mbuf is re-cycled */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* unmap the received buffer */ old_map = rx->info[idx].map; bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx->dmat, old_map); /* swap the bus_dmamap_t's */ rx->info[idx].map = rx->extra_map; rx->extra_map = old_map; /* mcp implicitly skips 1st 2 bytes so that packet is properly * aligned */ m->m_data += MXGEFW_PAD; m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len; ss->ipackets++; eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { mxge_vlan_tag_remove(m, &csum); } /* if the checksum is valid, mark it in the mbuf header */ if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) && (0 == mxge_rx_csum(m, csum))) { /* Tell the stack that the checksum is good */ m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | CSUM_DATA_VALID; #if defined(INET) || defined (INET6) if (lro && (0 == tcp_lro_rx(&ss->lc, m, csum))) return; #endif } /* flowid only valid if RSS hashing is enabled */ if (sc->num_slices > 1) { m->m_pkthdr.flowid = (ss - sc->ss); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); } /* pass the frame up the stack */ (*ifp->if_input)(ifp, m); } static inline void mxge_clean_rx_done(struct mxge_slice_state *ss) { mxge_rx_done_t *rx_done = &ss->rx_done; int limit = 0; uint16_t length; uint16_t checksum; int lro; lro = ss->sc->ifp->if_capenable & IFCAP_LRO; while (rx_done->entry[rx_done->idx].length != 0) { length = ntohs(rx_done->entry[rx_done->idx].length); rx_done->entry[rx_done->idx].length = 0; checksum = rx_done->entry[rx_done->idx].checksum; if (length <= (MHLEN - MXGEFW_PAD)) mxge_rx_done_small(ss, length, checksum, lro); else mxge_rx_done_big(ss, length, checksum, lro); rx_done->cnt++; rx_done->idx = rx_done->cnt & rx_done->mask; /* limit potential for livelock */ if (__predict_false(++limit > rx_done->mask / 2)) break; } #if defined(INET) || defined (INET6) while (!SLIST_EMPTY(&ss->lc.lro_active)) { struct lro_entry *lro = SLIST_FIRST(&ss->lc.lro_active); SLIST_REMOVE_HEAD(&ss->lc.lro_active, next); tcp_lro_flush(&ss->lc, lro); } #endif } static inline void mxge_tx_done(struct mxge_slice_state *ss, uint32_t mcp_idx) { struct ifnet *ifp; mxge_tx_ring_t *tx; struct mbuf *m; bus_dmamap_t map; int idx; int *flags; tx = &ss->tx; ifp = ss->sc->ifp; while (tx->pkt_done != mcp_idx) { idx = tx->done & tx->mask; tx->done++; m = tx->info[idx].m; /* mbuf and DMA map only attached to the first segment per-mbuf */ if (m != NULL) { ss->obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) ss->omcasts++; ss->opackets++; tx->info[idx].m = NULL; map = tx->info[idx].map; bus_dmamap_unload(tx->dmat, map); m_freem(m); } if (tx->info[idx].flag) { tx->info[idx].flag = 0; tx->pkt_done++; } } /* If we have space, clear IFF_OACTIVE to tell the stack that its OK to send packets */ #ifdef IFNET_BUF_RING flags = &ss->if_drv_flags; #else flags = &ifp->if_drv_flags; #endif mtx_lock(&ss->tx.mtx); if ((*flags) & IFF_DRV_OACTIVE && tx->req - tx->done < (tx->mask + 1)/4) { *(flags) &= ~IFF_DRV_OACTIVE; ss->tx.wake++; mxge_start_locked(ss); } #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && (tx->req == tx->done)) { /* let the NIC stop polling this queue, since there * are no more transmits pending */ if (tx->req == tx->done) { *tx->send_stop = 1; tx->queue_active = 0; tx->deactivate++; wmb(); } } #endif mtx_unlock(&ss->tx.mtx); } static struct mxge_media_type mxge_xfp_media_types[] = { {IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"}, {IFM_10G_SR, (1 << 7), "10GBASE-SR"}, {IFM_10G_LR, (1 << 6), "10GBASE-LR"}, {0, (1 << 5), "10GBASE-ER"}, {IFM_10G_LRM, (1 << 4), "10GBASE-LRM"}, {0, (1 << 3), "10GBASE-SW"}, {0, (1 << 2), "10GBASE-LW"}, {0, (1 << 1), "10GBASE-EW"}, {0, (1 << 0), "Reserved"} }; static struct mxge_media_type mxge_sfp_media_types[] = { {IFM_10G_TWINAX, 0, "10GBASE-Twinax"}, {0, (1 << 7), "Reserved"}, {IFM_10G_LRM, (1 << 6), "10GBASE-LRM"}, {IFM_10G_LR, (1 << 5), "10GBASE-LR"}, {IFM_10G_SR, (1 << 4), "10GBASE-SR"}, {IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"} }; static void mxge_media_set(mxge_softc_t *sc, int media_type) { ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type); sc->current_media = media_type; sc->media.ifm_media = sc->media.ifm_cur->ifm_media; } static void mxge_media_init(mxge_softc_t *sc) { char *ptr; int i; ifmedia_removeall(&sc->media); mxge_media_set(sc, IFM_AUTO); /* * parse the product code to deterimine the interface type * (CX4, XFP, Quad Ribbon Fiber) by looking at the character * after the 3rd dash in the driver's cached copy of the * EEPROM's product code string. */ ptr = sc->product_code_string; if (ptr == NULL) { device_printf(sc->dev, "Missing product code\n"); return; } for (i = 0; i < 3; i++, ptr++) { ptr = strchr(ptr, '-'); if (ptr == NULL) { device_printf(sc->dev, "only %d dashes in PC?!?\n", i); return; } } if (*ptr == 'C' || *(ptr +1) == 'C') { /* -C is CX4 */ sc->connector = MXGE_CX4; mxge_media_set(sc, IFM_10G_CX4); } else if (*ptr == 'Q') { /* -Q is Quad Ribbon Fiber */ sc->connector = MXGE_QRF; device_printf(sc->dev, "Quad Ribbon Fiber Media\n"); /* FreeBSD has no media type for Quad ribbon fiber */ } else if (*ptr == 'R') { /* -R is XFP */ sc->connector = MXGE_XFP; } else if (*ptr == 'S' || *(ptr +1) == 'S') { /* -S or -2S is SFP+ */ sc->connector = MXGE_SFP; } else { device_printf(sc->dev, "Unknown media type: %c\n", *ptr); } } /* * Determine the media type for a NIC. Some XFPs will identify * themselves only when their link is up, so this is initiated via a * link up interrupt. However, this can potentially take up to * several milliseconds, so it is run via the watchdog routine, rather * than in the interrupt handler itself. */ static void mxge_media_probe(mxge_softc_t *sc) { mxge_cmd_t cmd; char *cage_type; struct mxge_media_type *mxge_media_types = NULL; int i, err, ms, mxge_media_type_entries; uint32_t byte; sc->need_media_probe = 0; if (sc->connector == MXGE_XFP) { /* -R is XFP */ mxge_media_types = mxge_xfp_media_types; mxge_media_type_entries = sizeof (mxge_xfp_media_types) / sizeof (mxge_xfp_media_types[0]); byte = MXGE_XFP_COMPLIANCE_BYTE; cage_type = "XFP"; } else if (sc->connector == MXGE_SFP) { /* -S or -2S is SFP+ */ mxge_media_types = mxge_sfp_media_types; mxge_media_type_entries = sizeof (mxge_sfp_media_types) / sizeof (mxge_sfp_media_types[0]); cage_type = "SFP+"; byte = 3; } else { /* nothing to do; media type cannot change */ return; } /* * At this point we know the NIC has an XFP cage, so now we * try to determine what is in the cage by using the * firmware's XFP I2C commands to read the XFP 10GbE compilance * register. We read just one byte, which may take over * a millisecond */ cmd.data0 = 0; /* just fetch 1 byte, not all 256 */ cmd.data1 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd); if (err == MXGEFW_CMD_ERROR_I2C_FAILURE) { device_printf(sc->dev, "failed to read XFP\n"); } if (err == MXGEFW_CMD_ERROR_I2C_ABSENT) { device_printf(sc->dev, "Type R/S with no XFP!?!?\n"); } if (err != MXGEFW_CMD_OK) { return; } /* now we wait for the data to be cached */ cmd.data0 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); for (ms = 0; (err == EBUSY) && (ms < 50); ms++) { DELAY(1000); cmd.data0 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); } if (err != MXGEFW_CMD_OK) { device_printf(sc->dev, "failed to read %s (%d, %dms)\n", cage_type, err, ms); return; } if (cmd.data0 == mxge_media_types[0].bitmask) { if (mxge_verbose) device_printf(sc->dev, "%s:%s\n", cage_type, mxge_media_types[0].name); if (sc->current_media != mxge_media_types[0].flag) { mxge_media_init(sc); mxge_media_set(sc, mxge_media_types[0].flag); } return; } for (i = 1; i < mxge_media_type_entries; i++) { if (cmd.data0 & mxge_media_types[i].bitmask) { if (mxge_verbose) device_printf(sc->dev, "%s:%s\n", cage_type, mxge_media_types[i].name); if (sc->current_media != mxge_media_types[i].flag) { mxge_media_init(sc); mxge_media_set(sc, mxge_media_types[i].flag); } return; } } if (mxge_verbose) device_printf(sc->dev, "%s media 0x%x unknown\n", cage_type, cmd.data0); return; } static void mxge_intr(void *arg) { struct mxge_slice_state *ss = arg; mxge_softc_t *sc = ss->sc; mcp_irq_data_t *stats = ss->fw_stats; mxge_tx_ring_t *tx = &ss->tx; mxge_rx_done_t *rx_done = &ss->rx_done; uint32_t send_done_count; uint8_t valid; #ifndef IFNET_BUF_RING /* an interrupt on a non-zero slice is implicitly valid since MSI-X irqs are not shared */ if (ss != sc->ss) { mxge_clean_rx_done(ss); *ss->irq_claim = be32toh(3); return; } #endif /* make sure the DMA has finished */ if (!stats->valid) { return; } valid = stats->valid; if (sc->legacy_irq) { /* lower legacy IRQ */ *sc->irq_deassert = 0; if (!mxge_deassert_wait) /* don't wait for conf. that irq is low */ stats->valid = 0; } else { stats->valid = 0; } /* loop while waiting for legacy irq deassertion */ do { /* check for transmit completes and receives */ send_done_count = be32toh(stats->send_done_count); while ((send_done_count != tx->pkt_done) || (rx_done->entry[rx_done->idx].length != 0)) { if (send_done_count != tx->pkt_done) mxge_tx_done(ss, (int)send_done_count); mxge_clean_rx_done(ss); send_done_count = be32toh(stats->send_done_count); } if (sc->legacy_irq && mxge_deassert_wait) wmb(); } while (*((volatile uint8_t *) &stats->valid)); /* fw link & error stats meaningful only on the first slice */ if (__predict_false((ss == sc->ss) && stats->stats_updated)) { if (sc->link_state != stats->link_up) { sc->link_state = stats->link_up; if (sc->link_state) { if_link_state_change(sc->ifp, LINK_STATE_UP); if (mxge_verbose) device_printf(sc->dev, "link up\n"); } else { if_link_state_change(sc->ifp, LINK_STATE_DOWN); if (mxge_verbose) device_printf(sc->dev, "link down\n"); } sc->need_media_probe = 1; } if (sc->rdma_tags_available != be32toh(stats->rdma_tags_available)) { sc->rdma_tags_available = be32toh(stats->rdma_tags_available); device_printf(sc->dev, "RDMA timed out! %d tags " "left\n", sc->rdma_tags_available); } if (stats->link_down) { sc->down_cnt += stats->link_down; sc->link_state = 0; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } } /* check to see if we have rx token to pass back */ if (valid & 0x1) *ss->irq_claim = be32toh(3); *(ss->irq_claim + 1) = be32toh(3); } static void mxge_init(void *arg) { mxge_softc_t *sc = arg; struct ifnet *ifp = sc->ifp; mtx_lock(&sc->driver_mtx); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) (void) mxge_open(sc); mtx_unlock(&sc->driver_mtx); } static void mxge_free_slice_mbufs(struct mxge_slice_state *ss) { int i; #if defined(INET) || defined(INET6) tcp_lro_free(&ss->lc); #endif for (i = 0; i <= ss->rx_big.mask; i++) { if (ss->rx_big.info[i].m == NULL) continue; bus_dmamap_unload(ss->rx_big.dmat, ss->rx_big.info[i].map); m_freem(ss->rx_big.info[i].m); ss->rx_big.info[i].m = NULL; } for (i = 0; i <= ss->rx_small.mask; i++) { if (ss->rx_small.info[i].m == NULL) continue; bus_dmamap_unload(ss->rx_small.dmat, ss->rx_small.info[i].map); m_freem(ss->rx_small.info[i].m); ss->rx_small.info[i].m = NULL; } /* transmit ring used only on the first slice */ if (ss->tx.info == NULL) return; for (i = 0; i <= ss->tx.mask; i++) { ss->tx.info[i].flag = 0; if (ss->tx.info[i].m == NULL) continue; bus_dmamap_unload(ss->tx.dmat, ss->tx.info[i].map); m_freem(ss->tx.info[i].m); ss->tx.info[i].m = NULL; } } static void mxge_free_mbufs(mxge_softc_t *sc) { int slice; for (slice = 0; slice < sc->num_slices; slice++) mxge_free_slice_mbufs(&sc->ss[slice]); } static void mxge_free_slice_rings(struct mxge_slice_state *ss) { int i; if (ss->rx_done.entry != NULL) mxge_dma_free(&ss->rx_done.dma); ss->rx_done.entry = NULL; if (ss->tx.req_bytes != NULL) free(ss->tx.req_bytes, M_DEVBUF); ss->tx.req_bytes = NULL; if (ss->tx.seg_list != NULL) free(ss->tx.seg_list, M_DEVBUF); ss->tx.seg_list = NULL; if (ss->rx_small.shadow != NULL) free(ss->rx_small.shadow, M_DEVBUF); ss->rx_small.shadow = NULL; if (ss->rx_big.shadow != NULL) free(ss->rx_big.shadow, M_DEVBUF); ss->rx_big.shadow = NULL; if (ss->tx.info != NULL) { if (ss->tx.dmat != NULL) { for (i = 0; i <= ss->tx.mask; i++) { bus_dmamap_destroy(ss->tx.dmat, ss->tx.info[i].map); } bus_dma_tag_destroy(ss->tx.dmat); } free(ss->tx.info, M_DEVBUF); } ss->tx.info = NULL; if (ss->rx_small.info != NULL) { if (ss->rx_small.dmat != NULL) { for (i = 0; i <= ss->rx_small.mask; i++) { bus_dmamap_destroy(ss->rx_small.dmat, ss->rx_small.info[i].map); } bus_dmamap_destroy(ss->rx_small.dmat, ss->rx_small.extra_map); bus_dma_tag_destroy(ss->rx_small.dmat); } free(ss->rx_small.info, M_DEVBUF); } ss->rx_small.info = NULL; if (ss->rx_big.info != NULL) { if (ss->rx_big.dmat != NULL) { for (i = 0; i <= ss->rx_big.mask; i++) { bus_dmamap_destroy(ss->rx_big.dmat, ss->rx_big.info[i].map); } bus_dmamap_destroy(ss->rx_big.dmat, ss->rx_big.extra_map); bus_dma_tag_destroy(ss->rx_big.dmat); } free(ss->rx_big.info, M_DEVBUF); } ss->rx_big.info = NULL; } static void mxge_free_rings(mxge_softc_t *sc) { int slice; for (slice = 0; slice < sc->num_slices; slice++) mxge_free_slice_rings(&sc->ss[slice]); } static int mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries, int tx_ring_entries) { mxge_softc_t *sc = ss->sc; size_t bytes; int err, i; /* allocate per-slice receive resources */ ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; ss->rx_done.mask = (2 * rx_ring_entries) - 1; /* allocate the rx shadow rings */ bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow); ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow); ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the rx host info rings */ bytes = rx_ring_entries * sizeof (*ss->rx_small.info); ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); bytes = rx_ring_entries * sizeof (*ss->rx_big.info); ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the rx busdma resources */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ 4096, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ MHLEN, /* maxsize */ 1, /* num segs */ MHLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->rx_small.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating rx_small dmat\n", err); return err; } err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ #if MXGE_VIRT_JUMBOS 4096, /* boundary */ #else 0, /* boundary */ #endif BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 3*4096, /* maxsize */ #if MXGE_VIRT_JUMBOS 3, /* num segs */ 4096, /* maxsegsize*/ #else 1, /* num segs */ MJUM9BYTES, /* maxsegsize*/ #endif BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->rx_big.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating rx_big dmat\n", err); return err; } for (i = 0; i <= ss->rx_small.mask; i++) { err = bus_dmamap_create(ss->rx_small.dmat, 0, &ss->rx_small.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d rx_small dmamap\n", err); return err; } } err = bus_dmamap_create(ss->rx_small.dmat, 0, &ss->rx_small.extra_map); if (err != 0) { device_printf(sc->dev, "Err %d extra rx_small dmamap\n", err); return err; } for (i = 0; i <= ss->rx_big.mask; i++) { err = bus_dmamap_create(ss->rx_big.dmat, 0, &ss->rx_big.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d rx_big dmamap\n", err); return err; } } err = bus_dmamap_create(ss->rx_big.dmat, 0, &ss->rx_big.extra_map); if (err != 0) { device_printf(sc->dev, "Err %d extra rx_big dmamap\n", err); return err; } /* now allocate TX resources */ #ifndef IFNET_BUF_RING /* only use a single TX ring for now */ if (ss != ss->sc->ss) return 0; #endif ss->tx.mask = tx_ring_entries - 1; ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4); /* allocate the tx request copy block */ bytes = 8 + sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4); ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK); /* ensure req_list entries are aligned to 8 bytes */ ss->tx.req_list = (mcp_kreq_ether_send_t *) ((unsigned long)(ss->tx.req_bytes + 7) & ~7UL); /* allocate the tx busdma segment list */ bytes = sizeof (*ss->tx.seg_list) * ss->tx.max_desc; ss->tx.seg_list = (bus_dma_segment_t *) malloc(bytes, M_DEVBUF, M_WAITOK); /* allocate the tx host info ring */ bytes = tx_ring_entries * sizeof (*ss->tx.info); ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the tx busdma resources */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ sc->tx_boundary, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 65536 + 256, /* maxsize */ ss->tx.max_desc - 2, /* num segs */ sc->tx_boundary, /* maxsegsz */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->tx.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating tx dmat\n", err); return err; } /* now use these tags to setup dmamaps for each slot in the ring */ for (i = 0; i <= ss->tx.mask; i++) { err = bus_dmamap_create(ss->tx.dmat, 0, &ss->tx.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d tx dmamap\n", err); return err; } } return 0; } static int mxge_alloc_rings(mxge_softc_t *sc) { mxge_cmd_t cmd; int tx_ring_size; int tx_ring_entries, rx_ring_entries; int err, slice; /* get ring sizes */ err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd); tx_ring_size = cmd.data0; if (err != 0) { device_printf(sc->dev, "Cannot determine tx ring sizes\n"); goto abort; } tx_ring_entries = tx_ring_size / sizeof (mcp_kreq_ether_send_t); rx_ring_entries = sc->rx_ring_size / sizeof (mcp_dma_addr_t); IFQ_SET_MAXLEN(&sc->ifp->if_snd, tx_ring_entries - 1); sc->ifp->if_snd.ifq_drv_maxlen = sc->ifp->if_snd.ifq_maxlen; IFQ_SET_READY(&sc->ifp->if_snd); for (slice = 0; slice < sc->num_slices; slice++) { err = mxge_alloc_slice_rings(&sc->ss[slice], rx_ring_entries, tx_ring_entries); if (err != 0) goto abort; } return 0; abort: mxge_free_rings(sc); return err; } static void mxge_choose_params(int mtu, int *big_buf_size, int *cl_size, int *nbufs) { int bufsize = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD; if (bufsize < MCLBYTES) { /* easy, everything fits in a single buffer */ *big_buf_size = MCLBYTES; *cl_size = MCLBYTES; *nbufs = 1; return; } if (bufsize < MJUMPAGESIZE) { /* still easy, everything still fits in a single buffer */ *big_buf_size = MJUMPAGESIZE; *cl_size = MJUMPAGESIZE; *nbufs = 1; return; } #if MXGE_VIRT_JUMBOS /* now we need to use virtually contiguous buffers */ *cl_size = MJUM9BYTES; *big_buf_size = 4096; *nbufs = mtu / 4096 + 1; /* needs to be a power of two, so round up */ if (*nbufs == 3) *nbufs = 4; #else *cl_size = MJUM9BYTES; *big_buf_size = MJUM9BYTES; *nbufs = 1; #endif } static int mxge_slice_open(struct mxge_slice_state *ss, int nbufs, int cl_size) { mxge_softc_t *sc; mxge_cmd_t cmd; bus_dmamap_t map; int err, i, slice; sc = ss->sc; slice = ss - sc->ss; #if defined(INET) || defined(INET6) (void)tcp_lro_init(&ss->lc); #endif ss->lc.ifp = sc->ifp; /* get the lanai pointers to the send and receive rings */ err = 0; #ifndef IFNET_BUF_RING /* We currently only send from the first slice */ if (slice == 0) { #endif cmd.data0 = slice; err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd); ss->tx.lanai = (volatile mcp_kreq_ether_send_t *)(sc->sram + cmd.data0); ss->tx.send_go = (volatile uint32_t *) (sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice); ss->tx.send_stop = (volatile uint32_t *) (sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); #ifndef IFNET_BUF_RING } #endif cmd.data0 = slice; err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd); ss->rx_small.lanai = (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); cmd.data0 = slice; err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd); ss->rx_big.lanai = (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); if (err != 0) { device_printf(sc->dev, "failed to get ring sizes or locations\n"); return EIO; } /* stock receive rings */ for (i = 0; i <= ss->rx_small.mask; i++) { map = ss->rx_small.info[i].map; err = mxge_get_buf_small(ss, map, i); if (err) { device_printf(sc->dev, "alloced %d/%d smalls\n", i, ss->rx_small.mask + 1); return ENOMEM; } } for (i = 0; i <= ss->rx_big.mask; i++) { ss->rx_big.shadow[i].addr_low = 0xffffffff; ss->rx_big.shadow[i].addr_high = 0xffffffff; } ss->rx_big.nbufs = nbufs; ss->rx_big.cl_size = cl_size; ss->rx_big.mlen = ss->sc->ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD; for (i = 0; i <= ss->rx_big.mask; i += ss->rx_big.nbufs) { map = ss->rx_big.info[i].map; err = mxge_get_buf_big(ss, map, i); if (err) { device_printf(sc->dev, "alloced %d/%d bigs\n", i, ss->rx_big.mask + 1); return ENOMEM; } } return 0; } static int mxge_open(mxge_softc_t *sc) { mxge_cmd_t cmd; int err, big_bytes, nbufs, slice, cl_size, i; bus_addr_t bus; volatile uint8_t *itable; struct mxge_slice_state *ss; /* Copy the MAC address in case it was overridden */ bcopy(IF_LLADDR(sc->ifp), sc->mac_addr, ETHER_ADDR_LEN); err = mxge_reset(sc, 1); if (err != 0) { device_printf(sc->dev, "failed to reset\n"); return EIO; } if (sc->num_slices > 1) { /* setup the indirection table */ cmd.data0 = sc->num_slices; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_TABLE_SIZE, &cmd); err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_TABLE_OFFSET, &cmd); if (err != 0) { device_printf(sc->dev, "failed to setup rss tables\n"); return err; } /* just enable an identity mapping */ itable = sc->sram + cmd.data0; for (i = 0; i < sc->num_slices; i++) itable[i] = (uint8_t)i; cmd.data0 = 1; cmd.data1 = mxge_rss_hash_type; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd); if (err != 0) { device_printf(sc->dev, "failed to enable slices\n"); return err; } } mxge_choose_params(sc->ifp->if_mtu, &big_bytes, &cl_size, &nbufs); cmd.data0 = nbufs; err = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, &cmd); /* error is only meaningful if we're trying to set MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS > 1 */ if (err && nbufs > 1) { device_printf(sc->dev, "Failed to set alway-use-n to %d\n", nbufs); return EIO; } /* Give the firmware the mtu and the big and small buffer sizes. The firmware wants the big buf size to be a power of two. Luckily, FreeBSD's clusters are powers of two */ cmd.data0 = sc->ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd); cmd.data0 = MHLEN - MXGEFW_PAD; err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd); cmd.data0 = big_bytes; err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd); if (err != 0) { device_printf(sc->dev, "failed to setup params\n"); goto abort; } /* Now give him the pointer to the stats block */ for (slice = 0; #ifdef IFNET_BUF_RING slice < sc->num_slices; #else slice < 1; #endif slice++) { ss = &sc->ss[slice]; cmd.data0 = MXGE_LOWPART_TO_U32(ss->fw_stats_dma.bus_addr); cmd.data1 = MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.bus_addr); cmd.data2 = sizeof(struct mcp_irq_data); cmd.data2 |= (slice << 16); err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd); } if (err != 0) { bus = sc->ss->fw_stats_dma.bus_addr; bus += offsetof(struct mcp_irq_data, send_done_count); cmd.data0 = MXGE_LOWPART_TO_U32(bus); cmd.data1 = MXGE_HIGHPART_TO_U32(bus); err = mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, &cmd); /* Firmware cannot support multicast without STATS_DMA_V2 */ sc->fw_multicast_support = 0; } else { sc->fw_multicast_support = 1; } if (err != 0) { device_printf(sc->dev, "failed to setup params\n"); goto abort; } for (slice = 0; slice < sc->num_slices; slice++) { err = mxge_slice_open(&sc->ss[slice], nbufs, cl_size); if (err != 0) { device_printf(sc->dev, "couldn't open slice %d\n", slice); goto abort; } } /* Finally, start the firmware running */ err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd); if (err) { device_printf(sc->dev, "Couldn't bring up link\n"); goto abort; } #ifdef IFNET_BUF_RING for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->if_drv_flags |= IFF_DRV_RUNNING; ss->if_drv_flags &= ~IFF_DRV_OACTIVE; } #endif sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return 0; abort: mxge_free_mbufs(sc); return err; } static int mxge_close(mxge_softc_t *sc, int down) { mxge_cmd_t cmd; int err, old_down_cnt; #ifdef IFNET_BUF_RING struct mxge_slice_state *ss; int slice; #endif #ifdef IFNET_BUF_RING for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->if_drv_flags &= ~IFF_DRV_RUNNING; } #endif sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; if (!down) { old_down_cnt = sc->down_cnt; wmb(); err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd); if (err) { device_printf(sc->dev, "Couldn't bring down link\n"); } if (old_down_cnt == sc->down_cnt) { /* wait for down irq */ DELAY(10 * sc->intr_coal_delay); } wmb(); if (old_down_cnt == sc->down_cnt) { device_printf(sc->dev, "never got down irq\n"); } } mxge_free_mbufs(sc); return 0; } static void mxge_setup_cfg_space(mxge_softc_t *sc) { device_t dev = sc->dev; int reg; uint16_t lnk, pectl; /* find the PCIe link width and set max read request to 4KB*/ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { lnk = pci_read_config(dev, reg + 0x12, 2); sc->link_width = (lnk >> 4) & 0x3f; if (sc->pectl == 0) { pectl = pci_read_config(dev, reg + 0x8, 2); pectl = (pectl & ~0x7000) | (5 << 12); pci_write_config(dev, reg + 0x8, pectl, 2); sc->pectl = pectl; } else { /* restore saved pectl after watchdog reset */ pci_write_config(dev, reg + 0x8, sc->pectl, 2); } } /* Enable DMA and Memory space access */ pci_enable_busmaster(dev); } static uint32_t mxge_read_reboot(mxge_softc_t *sc) { device_t dev = sc->dev; uint32_t vs; /* find the vendor specific offset */ if (pci_find_cap(dev, PCIY_VENDOR, &vs) != 0) { device_printf(sc->dev, "could not find vendor specific offset\n"); return (uint32_t)-1; } /* enable read32 mode */ pci_write_config(dev, vs + 0x10, 0x3, 1); /* tell NIC which register to read */ pci_write_config(dev, vs + 0x18, 0xfffffff0, 4); return (pci_read_config(dev, vs + 0x14, 4)); } static void mxge_watchdog_reset(mxge_softc_t *sc) { struct pci_devinfo *dinfo; struct mxge_slice_state *ss; int err, running, s, num_tx_slices = 1; uint32_t reboot; uint16_t cmd; err = ENXIO; device_printf(sc->dev, "Watchdog reset!\n"); /* * check to see if the NIC rebooted. If it did, then all of * PCI config space has been reset, and things like the * busmaster bit will be zero. If this is the case, then we * must restore PCI config space before the NIC can be used * again */ cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (cmd == 0xffff) { /* * maybe the watchdog caught the NIC rebooting; wait * up to 100ms for it to finish. If it does not come * back, then give up */ DELAY(1000*100); cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (cmd == 0xffff) { device_printf(sc->dev, "NIC disappeared!\n"); } } if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { /* print the reboot status */ reboot = mxge_read_reboot(sc); device_printf(sc->dev, "NIC rebooted, status = 0x%x\n", reboot); running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING; if (running) { /* * quiesce NIC so that TX routines will not try to * xmit after restoration of BAR */ /* Mark the link as down */ if (sc->link_state) { sc->link_state = 0; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } #ifdef IFNET_BUF_RING num_tx_slices = sc->num_slices; #endif /* grab all TX locks to ensure no tx */ for (s = 0; s < num_tx_slices; s++) { ss = &sc->ss[s]; mtx_lock(&ss->tx.mtx); } mxge_close(sc, 1); } /* restore PCI configuration space */ dinfo = device_get_ivars(sc->dev); pci_cfg_restore(sc->dev, dinfo); /* and redo any changes we made to our config space */ mxge_setup_cfg_space(sc); /* reload f/w */ err = mxge_load_firmware(sc, 0); if (err) { device_printf(sc->dev, "Unable to re-load f/w\n"); } if (running) { if (!err) err = mxge_open(sc); /* release all TX locks */ for (s = 0; s < num_tx_slices; s++) { ss = &sc->ss[s]; #ifdef IFNET_BUF_RING mxge_start_locked(ss); #endif mtx_unlock(&ss->tx.mtx); } } sc->watchdog_resets++; } else { device_printf(sc->dev, "NIC did not reboot, not resetting\n"); err = 0; } if (err) { device_printf(sc->dev, "watchdog reset failed\n"); } else { if (sc->dying == 2) sc->dying = 0; callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); } } static void mxge_watchdog_task(void *arg, int pending) { mxge_softc_t *sc = arg; mtx_lock(&sc->driver_mtx); mxge_watchdog_reset(sc); mtx_unlock(&sc->driver_mtx); } static void mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice) { tx = &sc->ss[slice].tx; device_printf(sc->dev, "slice %d struck? ring state:\n", slice); device_printf(sc->dev, "tx.req=%d tx.done=%d, tx.queue_active=%d\n", tx->req, tx->done, tx->queue_active); device_printf(sc->dev, "tx.activate=%d tx.deactivate=%d\n", tx->activate, tx->deactivate); device_printf(sc->dev, "pkt_done=%d fw=%d\n", tx->pkt_done, be32toh(sc->ss->fw_stats->send_done_count)); } static int mxge_watchdog(mxge_softc_t *sc) { mxge_tx_ring_t *tx; uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause); int i, err = 0; /* see if we have outstanding transmits, which have been pending for more than mxge_ticks */ for (i = 0; #ifdef IFNET_BUF_RING (i < sc->num_slices) && (err == 0); #else (i < 1) && (err == 0); #endif i++) { tx = &sc->ss[i].tx; if (tx->req != tx->done && tx->watchdog_req != tx->watchdog_done && tx->done == tx->watchdog_done) { /* check for pause blocking before resetting */ if (tx->watchdog_rx_pause == rx_pause) { mxge_warn_stuck(sc, tx, i); taskqueue_enqueue(sc->tq, &sc->watchdog_task); return (ENXIO); } else device_printf(sc->dev, "Flow control blocking " "xmits, check link partner\n"); } tx->watchdog_req = tx->req; tx->watchdog_done = tx->done; tx->watchdog_rx_pause = rx_pause; } if (sc->need_media_probe) mxge_media_probe(sc); return (err); } static uint64_t mxge_get_counter(struct ifnet *ifp, ift_counter cnt) { struct mxge_softc *sc; uint64_t rv; sc = if_getsoftc(ifp); rv = 0; switch (cnt) { case IFCOUNTER_IPACKETS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].ipackets; return (rv); case IFCOUNTER_OPACKETS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].opackets; return (rv); case IFCOUNTER_OERRORS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].oerrors; return (rv); #ifdef IFNET_BUF_RING case IFCOUNTER_OBYTES: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].obytes; return (rv); case IFCOUNTER_OMCASTS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].omcasts; return (rv); case IFCOUNTER_OQDROPS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].tx.br->br_drops; return (rv); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void mxge_tick(void *arg) { mxge_softc_t *sc = arg; u_long pkts = 0; int err = 0; int running, ticks; uint16_t cmd; ticks = mxge_ticks; running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING; if (running) { if (!sc->watchdog_countdown) { err = mxge_watchdog(sc); sc->watchdog_countdown = 4; } sc->watchdog_countdown--; } if (pkts == 0) { /* ensure NIC did not suffer h/w fault while idle */ cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { sc->dying = 2; taskqueue_enqueue(sc->tq, &sc->watchdog_task); err = ENXIO; } /* look less often if NIC is idle */ ticks *= 4; } if (err == 0) callout_reset(&sc->co_hdl, ticks, mxge_tick, sc); } static int mxge_media_change(struct ifnet *ifp) { return EINVAL; } static int mxge_change_mtu(mxge_softc_t *sc, int mtu) { struct ifnet *ifp = sc->ifp; int real_mtu, old_mtu; int err = 0; real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; if ((real_mtu > sc->max_mtu) || real_mtu < 60) return EINVAL; mtx_lock(&sc->driver_mtx); old_mtu = ifp->if_mtu; ifp->if_mtu = mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mxge_close(sc, 0); err = mxge_open(sc); if (err != 0) { ifp->if_mtu = old_mtu; mxge_close(sc, 0); (void) mxge_open(sc); } } mtx_unlock(&sc->driver_mtx); return err; } static void mxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { mxge_softc_t *sc = ifp->if_softc; if (sc == NULL) return; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0; ifmr->ifm_active |= sc->current_media; } static int mxge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { mxge_softc_t *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int err, mask; err = 0; switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: err = mxge_change_mtu(sc, ifr->ifr_mtu); break; case SIOCSIFFLAGS: mtx_lock(&sc->driver_mtx); if (sc->dying) { mtx_unlock(&sc->driver_mtx); return EINVAL; } if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { err = mxge_open(sc); } else { /* take care of promis can allmulti flag chages */ mxge_change_promisc(sc, ifp->if_flags & IFF_PROMISC); mxge_set_multicast_list(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mxge_close(sc, 0); } } mtx_unlock(&sc->driver_mtx); break; case SIOCADDMULTI: case SIOCDELMULTI: mtx_lock(&sc->driver_mtx); mxge_set_multicast_list(sc); mtx_unlock(&sc->driver_mtx); break; case SIOCSIFCAP: mtx_lock(&sc->driver_mtx); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP); } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); } } else if (mask & IFCAP_RXCSUM) { if (IFCAP_RXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_RXCSUM; } else { ifp->if_capenable |= IFCAP_RXCSUM; } } if (mask & IFCAP_TSO4) { if (IFCAP_TSO4 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO4; } else if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } else { printf("mxge requires tx checksum offload" " be enabled to use TSO\n"); err = EINVAL; } } #if IFCAP_TSO6 if (mask & IFCAP_TXCSUM_IPV6) { if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); ifp->if_hwassist &= ~(CSUM_TCP_IPV6 | CSUM_UDP); } else { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); } } else if (mask & IFCAP_RXCSUM_IPV6) { if (IFCAP_RXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_RXCSUM_IPV6; } else { ifp->if_capenable |= IFCAP_RXCSUM_IPV6; } } if (mask & IFCAP_TSO6) { if (IFCAP_TSO6 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO6; } else if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO6; ifp->if_hwassist |= CSUM_TSO; } else { printf("mxge requires tx checksum offload" " be enabled to use TSO\n"); err = EINVAL; } } #endif /*IFCAP_TSO6 */ if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (!(ifp->if_capabilities & IFCAP_VLAN_HWTSO) || !(ifp->if_capenable & IFCAP_VLAN_HWTAGGING)) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; mtx_unlock(&sc->driver_mtx); VLAN_CAPABILITIES(ifp); break; case SIOCGIFMEDIA: mtx_lock(&sc->driver_mtx); mxge_media_probe(sc); mtx_unlock(&sc->driver_mtx); err = ifmedia_ioctl(ifp, (struct ifreq *)data, &sc->media, command); break; default: err = ENOTTY; } return err; } static void mxge_fetch_tunables(mxge_softc_t *sc) { TUNABLE_INT_FETCH("hw.mxge.max_slices", &mxge_max_slices); TUNABLE_INT_FETCH("hw.mxge.flow_control_enabled", &mxge_flow_control); TUNABLE_INT_FETCH("hw.mxge.intr_coal_delay", &mxge_intr_coal_delay); TUNABLE_INT_FETCH("hw.mxge.nvidia_ecrc_enable", &mxge_nvidia_ecrc_enable); TUNABLE_INT_FETCH("hw.mxge.force_firmware", &mxge_force_firmware); TUNABLE_INT_FETCH("hw.mxge.deassert_wait", &mxge_deassert_wait); TUNABLE_INT_FETCH("hw.mxge.verbose", &mxge_verbose); TUNABLE_INT_FETCH("hw.mxge.ticks", &mxge_ticks); TUNABLE_INT_FETCH("hw.mxge.always_promisc", &mxge_always_promisc); TUNABLE_INT_FETCH("hw.mxge.rss_hash_type", &mxge_rss_hash_type); TUNABLE_INT_FETCH("hw.mxge.rss_hashtype", &mxge_rss_hash_type); TUNABLE_INT_FETCH("hw.mxge.initial_mtu", &mxge_initial_mtu); TUNABLE_INT_FETCH("hw.mxge.throttle", &mxge_throttle); if (bootverbose) mxge_verbose = 1; if (mxge_intr_coal_delay < 0 || mxge_intr_coal_delay > 10*1000) mxge_intr_coal_delay = 30; if (mxge_ticks == 0) mxge_ticks = hz / 2; sc->pause = mxge_flow_control; if (mxge_rss_hash_type < MXGEFW_RSS_HASH_TYPE_IPV4 || mxge_rss_hash_type > MXGEFW_RSS_HASH_TYPE_MAX) { mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; } if (mxge_initial_mtu > ETHERMTU_JUMBO || mxge_initial_mtu < ETHER_MIN_LEN) mxge_initial_mtu = ETHERMTU_JUMBO; if (mxge_throttle && mxge_throttle > MXGE_MAX_THROTTLE) mxge_throttle = MXGE_MAX_THROTTLE; if (mxge_throttle && mxge_throttle < MXGE_MIN_THROTTLE) mxge_throttle = MXGE_MIN_THROTTLE; sc->throttle = mxge_throttle; } static void mxge_free_slices(mxge_softc_t *sc) { struct mxge_slice_state *ss; int i; if (sc->ss == NULL) return; for (i = 0; i < sc->num_slices; i++) { ss = &sc->ss[i]; if (ss->fw_stats != NULL) { mxge_dma_free(&ss->fw_stats_dma); ss->fw_stats = NULL; #ifdef IFNET_BUF_RING if (ss->tx.br != NULL) { drbr_free(ss->tx.br, M_DEVBUF); ss->tx.br = NULL; } #endif mtx_destroy(&ss->tx.mtx); } if (ss->rx_done.entry != NULL) { mxge_dma_free(&ss->rx_done.dma); ss->rx_done.entry = NULL; } } free(sc->ss, M_DEVBUF); sc->ss = NULL; } static int mxge_alloc_slices(mxge_softc_t *sc) { mxge_cmd_t cmd; struct mxge_slice_state *ss; size_t bytes; int err, i, max_intr_slots; err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); if (err != 0) { device_printf(sc->dev, "Cannot determine rx ring size\n"); return err; } sc->rx_ring_size = cmd.data0; max_intr_slots = 2 * (sc->rx_ring_size / sizeof (mcp_dma_addr_t)); bytes = sizeof (*sc->ss) * sc->num_slices; sc->ss = malloc(bytes, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->ss == NULL) return (ENOMEM); for (i = 0; i < sc->num_slices; i++) { ss = &sc->ss[i]; ss->sc = sc; /* allocate per-slice rx interrupt queues */ bytes = max_intr_slots * sizeof (*ss->rx_done.entry); err = mxge_dma_alloc(sc, &ss->rx_done.dma, bytes, 4096); if (err != 0) goto abort; ss->rx_done.entry = ss->rx_done.dma.addr; bzero(ss->rx_done.entry, bytes); /* * allocate the per-slice firmware stats; stats * (including tx) are used used only on the first * slice for now */ #ifndef IFNET_BUF_RING if (i > 0) continue; #endif bytes = sizeof (*ss->fw_stats); err = mxge_dma_alloc(sc, &ss->fw_stats_dma, sizeof (*ss->fw_stats), 64); if (err != 0) goto abort; ss->fw_stats = (mcp_irq_data_t *)ss->fw_stats_dma.addr; snprintf(ss->tx.mtx_name, sizeof(ss->tx.mtx_name), "%s:tx(%d)", device_get_nameunit(sc->dev), i); mtx_init(&ss->tx.mtx, ss->tx.mtx_name, NULL, MTX_DEF); #ifdef IFNET_BUF_RING ss->tx.br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &ss->tx.mtx); #endif } return (0); abort: mxge_free_slices(sc); return (ENOMEM); } static void mxge_slice_probe(mxge_softc_t *sc) { mxge_cmd_t cmd; char *old_fw; int msix_cnt, status, max_intr_slots; sc->num_slices = 1; /* * don't enable multiple slices if they are not enabled, * or if this is not an SMP system */ if (mxge_max_slices == 0 || mxge_max_slices == 1 || mp_ncpus < 2) return; /* see how many MSI-X interrupts are available */ msix_cnt = pci_msix_count(sc->dev); if (msix_cnt < 2) return; /* now load the slice aware firmware see what it supports */ old_fw = sc->fw_name; if (old_fw == mxge_fw_aligned) sc->fw_name = mxge_fw_rss_aligned; else sc->fw_name = mxge_fw_rss_unaligned; status = mxge_load_firmware(sc, 0); if (status != 0) { device_printf(sc->dev, "Falling back to a single slice\n"); return; } /* try to send a reset command to the card to see if it is alive */ memset(&cmd, 0, sizeof (cmd)); status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); if (status != 0) { device_printf(sc->dev, "failed reset\n"); goto abort_with_fw; } /* get rx ring size */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); if (status != 0) { device_printf(sc->dev, "Cannot determine rx ring size\n"); goto abort_with_fw; } max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t)); /* tell it the size of the interrupt queues */ cmd.data0 = max_intr_slots * sizeof (struct mcp_slot); status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); if (status != 0) { device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); goto abort_with_fw; } /* ask the maximum number of slices it supports */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed MXGEFW_CMD_GET_MAX_RSS_QUEUES\n"); goto abort_with_fw; } sc->num_slices = cmd.data0; if (sc->num_slices > msix_cnt) sc->num_slices = msix_cnt; if (mxge_max_slices == -1) { /* cap to number of CPUs in system */ if (sc->num_slices > mp_ncpus) sc->num_slices = mp_ncpus; } else { if (sc->num_slices > mxge_max_slices) sc->num_slices = mxge_max_slices; } /* make sure it is a power of two */ while (sc->num_slices & (sc->num_slices - 1)) sc->num_slices--; if (mxge_verbose) device_printf(sc->dev, "using %d slices\n", sc->num_slices); return; abort_with_fw: sc->fw_name = old_fw; (void) mxge_load_firmware(sc, 0); } static int mxge_add_msix_irqs(mxge_softc_t *sc) { size_t bytes; int count, err, i, rid; rid = PCIR_BAR(2); sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_table_res == NULL) { device_printf(sc->dev, "couldn't alloc MSIX table res\n"); return ENXIO; } count = sc->num_slices; err = pci_alloc_msix(sc->dev, &count); if (err != 0) { device_printf(sc->dev, "pci_alloc_msix: failed, wanted %d" "err = %d \n", sc->num_slices, err); goto abort_with_msix_table; } if (count < sc->num_slices) { device_printf(sc->dev, "pci_alloc_msix: need %d, got %d\n", count, sc->num_slices); device_printf(sc->dev, "Try setting hw.mxge.max_slices to %d\n", count); err = ENOSPC; goto abort_with_msix; } bytes = sizeof (*sc->msix_irq_res) * sc->num_slices; sc->msix_irq_res = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); if (sc->msix_irq_res == NULL) { err = ENOMEM; goto abort_with_msix; } for (i = 0; i < sc->num_slices; i++) { rid = i + 1; sc->msix_irq_res[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->msix_irq_res[i] == NULL) { device_printf(sc->dev, "couldn't allocate IRQ res" " for message %d\n", i); err = ENXIO; goto abort_with_res; } } bytes = sizeof (*sc->msix_ih) * sc->num_slices; sc->msix_ih = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); for (i = 0; i < sc->num_slices; i++) { err = bus_setup_intr(sc->dev, sc->msix_irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, #if __FreeBSD_version > 700030 NULL, #endif mxge_intr, &sc->ss[i], &sc->msix_ih[i]); if (err != 0) { device_printf(sc->dev, "couldn't setup intr for " "message %d\n", i); goto abort_with_intr; } bus_describe_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i], "s%d", i); } if (mxge_verbose) { device_printf(sc->dev, "using %d msix IRQs:", sc->num_slices); for (i = 0; i < sc->num_slices; i++) printf(" %ld", rman_get_start(sc->msix_irq_res[i])); printf("\n"); } return (0); abort_with_intr: for (i = 0; i < sc->num_slices; i++) { if (sc->msix_ih[i] != NULL) { bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i]); sc->msix_ih[i] = NULL; } } free(sc->msix_ih, M_DEVBUF); abort_with_res: for (i = 0; i < sc->num_slices; i++) { rid = i + 1; if (sc->msix_irq_res[i] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, rid, sc->msix_irq_res[i]); sc->msix_irq_res[i] = NULL; } free(sc->msix_irq_res, M_DEVBUF); abort_with_msix: pci_release_msi(sc->dev); abort_with_msix_table: bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->msix_table_res); return err; } static int mxge_add_single_irq(mxge_softc_t *sc) { int count, err, rid; count = pci_msi_count(sc->dev); if (count == 1 && pci_alloc_msi(sc->dev, &count) == 0) { rid = 1; } else { rid = 0; sc->legacy_irq = 1; } - sc->irq_res = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &rid, 0, ~0, - 1, RF_SHAREABLE | RF_ACTIVE); + sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(sc->dev, "could not alloc interrupt\n"); return ENXIO; } if (mxge_verbose) device_printf(sc->dev, "using %s irq %ld\n", sc->legacy_irq ? "INTx" : "MSI", rman_get_start(sc->irq_res)); err = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, #if __FreeBSD_version > 700030 NULL, #endif mxge_intr, &sc->ss[0], &sc->ih); if (err != 0) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->legacy_irq ? 0 : 1, sc->irq_res); if (!sc->legacy_irq) pci_release_msi(sc->dev); } return err; } static void mxge_rem_msix_irqs(mxge_softc_t *sc) { int i, rid; for (i = 0; i < sc->num_slices; i++) { if (sc->msix_ih[i] != NULL) { bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i]); sc->msix_ih[i] = NULL; } } free(sc->msix_ih, M_DEVBUF); for (i = 0; i < sc->num_slices; i++) { rid = i + 1; if (sc->msix_irq_res[i] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, rid, sc->msix_irq_res[i]); sc->msix_irq_res[i] = NULL; } free(sc->msix_irq_res, M_DEVBUF); bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->msix_table_res); pci_release_msi(sc->dev); return; } static void mxge_rem_single_irq(mxge_softc_t *sc) { bus_teardown_intr(sc->dev, sc->irq_res, sc->ih); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->legacy_irq ? 0 : 1, sc->irq_res); if (!sc->legacy_irq) pci_release_msi(sc->dev); } static void mxge_rem_irq(mxge_softc_t *sc) { if (sc->num_slices > 1) mxge_rem_msix_irqs(sc); else mxge_rem_single_irq(sc); } static int mxge_add_irq(mxge_softc_t *sc) { int err; if (sc->num_slices > 1) err = mxge_add_msix_irqs(sc); else err = mxge_add_single_irq(sc); if (0 && err == 0 && sc->num_slices > 1) { mxge_rem_msix_irqs(sc); err = mxge_add_msix_irqs(sc); } return err; } static int mxge_attach(device_t dev) { mxge_cmd_t cmd; mxge_softc_t *sc = device_get_softc(dev); struct ifnet *ifp; int err, rid; sc->dev = dev; mxge_fetch_tunables(sc); TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc); sc->tq = taskqueue_create("mxge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->tq); if (sc->tq == NULL) { err = ENOMEM; goto abort_with_nothing; } err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 65536 + 256, /* maxsize */ MXGE_MAX_SEND_DESC, /* num segs */ 65536, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock */ &sc->parent_dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating parent dmat\n", err); goto abort_with_tq; } ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); err = ENOSPC; goto abort_with_parent_dmat; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd", device_get_nameunit(dev)); mtx_init(&sc->cmd_mtx, sc->cmd_mtx_name, NULL, MTX_DEF); snprintf(sc->driver_mtx_name, sizeof(sc->driver_mtx_name), "%s:drv", device_get_nameunit(dev)); mtx_init(&sc->driver_mtx, sc->driver_mtx_name, MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->co_hdl, &sc->driver_mtx, 0); mxge_setup_cfg_space(sc); /* Map the board into the kernel */ rid = PCIR_BARS; - sc->mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, - ~0, 1, RF_ACTIVE); + sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not map memory\n"); err = ENXIO; goto abort_with_lock; } sc->sram = rman_get_virtual(sc->mem_res); sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100; if (sc->sram_size > rman_get_size(sc->mem_res)) { device_printf(dev, "impossible memory region size %ld\n", rman_get_size(sc->mem_res)); err = ENXIO; goto abort_with_mem_res; } /* make NULL terminated copy of the EEPROM strings section of lanai SRAM */ bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE); bus_space_read_region_1(rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), sc->sram_size - MXGE_EEPROM_STRINGS_SIZE, sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE - 2); err = mxge_parse_strings(sc); if (err != 0) goto abort_with_mem_res; /* Enable write combining for efficient use of PCIe bus */ mxge_enable_wc(sc); /* Allocate the out of band dma memory */ err = mxge_dma_alloc(sc, &sc->cmd_dma, sizeof (mxge_cmd_t), 64); if (err != 0) goto abort_with_mem_res; sc->cmd = (mcp_cmd_response_t *) sc->cmd_dma.addr; err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64); if (err != 0) goto abort_with_cmd_dma; err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096); if (err != 0) goto abort_with_zeropad_dma; /* select & load the firmware */ err = mxge_select_firmware(sc); if (err != 0) goto abort_with_dmabench; sc->intr_coal_delay = mxge_intr_coal_delay; mxge_slice_probe(sc); err = mxge_alloc_slices(sc); if (err != 0) goto abort_with_dmabench; err = mxge_reset(sc, 0); if (err != 0) goto abort_with_slices; err = mxge_alloc_rings(sc); if (err != 0) { device_printf(sc->dev, "failed to allocate rings\n"); goto abort_with_slices; } err = mxge_add_irq(sc); if (err != 0) { device_printf(sc->dev, "failed to add irq\n"); goto abort_with_rings; } ifp->if_baudrate = IF_Gbps(10); ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4 | IFCAP_VLAN_MTU | IFCAP_LINKSTATE | IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6; #if defined(INET) || defined(INET6) ifp->if_capabilities |= IFCAP_LRO; #endif #ifdef MXGE_NEW_VLAN_API ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; /* Only FW 1.4.32 and newer can do TSO over vlans */ if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && sc->fw_ver_tiny >= 32) ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif sc->max_mtu = mxge_max_mtu(sc); if (sc->max_mtu >= 9000) ifp->if_capabilities |= IFCAP_JUMBO_MTU; else device_printf(dev, "MTU limited to %d. Install " "latest firmware for 9000 byte jumbo support\n", sc->max_mtu - ETHER_HDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; /* check to see if f/w supports TSO for IPv6 */ if (!mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, &cmd)) { if (CSUM_TCP_IPV6) ifp->if_capabilities |= IFCAP_TSO6; sc->max_tso6_hlen = min(cmd.data0, sizeof (sc->ss[0].scratch)); } ifp->if_capenable = ifp->if_capabilities; if (sc->lro_cnt == 0) ifp->if_capenable &= ~IFCAP_LRO; ifp->if_init = mxge_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = mxge_ioctl; ifp->if_start = mxge_start; ifp->if_get_counter = mxge_get_counter; /* Initialise the ifmedia structure */ ifmedia_init(&sc->media, 0, mxge_media_change, mxge_media_status); mxge_media_init(sc); mxge_media_probe(sc); sc->dying = 0; ether_ifattach(ifp, sc->mac_addr); /* ether_ifattach sets mtu to ETHERMTU */ if (mxge_initial_mtu != ETHERMTU) mxge_change_mtu(sc, mxge_initial_mtu); mxge_add_sysctls(sc); #ifdef IFNET_BUF_RING ifp->if_transmit = mxge_transmit; ifp->if_qflush = mxge_qflush; #endif taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); return 0; abort_with_rings: mxge_free_rings(sc); abort_with_slices: mxge_free_slices(sc); abort_with_dmabench: mxge_dma_free(&sc->dmabench_dma); abort_with_zeropad_dma: mxge_dma_free(&sc->zeropad_dma); abort_with_cmd_dma: mxge_dma_free(&sc->cmd_dma); abort_with_mem_res: bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); abort_with_lock: pci_disable_busmaster(dev); mtx_destroy(&sc->cmd_mtx); mtx_destroy(&sc->driver_mtx); if_free(ifp); abort_with_parent_dmat: bus_dma_tag_destroy(sc->parent_dmat); abort_with_tq: if (sc->tq != NULL) { taskqueue_drain(sc->tq, &sc->watchdog_task); taskqueue_free(sc->tq); sc->tq = NULL; } abort_with_nothing: return err; } static int mxge_detach(device_t dev) { mxge_softc_t *sc = device_get_softc(dev); if (mxge_vlans_active(sc)) { device_printf(sc->dev, "Detach vlans before removing module\n"); return EBUSY; } mtx_lock(&sc->driver_mtx); sc->dying = 1; if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) mxge_close(sc, 0); mtx_unlock(&sc->driver_mtx); ether_ifdetach(sc->ifp); if (sc->tq != NULL) { taskqueue_drain(sc->tq, &sc->watchdog_task); taskqueue_free(sc->tq); sc->tq = NULL; } callout_drain(&sc->co_hdl); ifmedia_removeall(&sc->media); mxge_dummy_rdma(sc, 0); mxge_rem_sysctls(sc); mxge_rem_irq(sc); mxge_free_rings(sc); mxge_free_slices(sc); mxge_dma_free(&sc->dmabench_dma); mxge_dma_free(&sc->zeropad_dma); mxge_dma_free(&sc->cmd_dma); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); pci_disable_busmaster(dev); mtx_destroy(&sc->cmd_mtx); mtx_destroy(&sc->driver_mtx); if_free(sc->ifp); bus_dma_tag_destroy(sc->parent_dmat); return 0; } static int mxge_shutdown(device_t dev) { return 0; } /* This file uses Myri10GE driver indentation. Local Variables: c-file-style:"linux" tab-width:8 End: */ Index: head/sys/dev/nvme/nvme_ctrlr.c =================================================================== --- head/sys/dev/nvme/nvme_ctrlr.c (revision 295789) +++ head/sys/dev/nvme/nvme_ctrlr.c (revision 295790) @@ -1,1227 +1,1227 @@ /*- * Copyright (C) 2012-2016 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "nvme_private.h" static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, struct nvme_async_event_request *aer); static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); static int nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) { ctrlr->resource_id = PCIR_BAR(0); - ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, - &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); + ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->resource_id, RF_ACTIVE); if(ctrlr->resource == NULL) { nvme_printf(ctrlr, "unable to allocate pci resource\n"); return (ENOMEM); } ctrlr->bus_tag = rman_get_bustag(ctrlr->resource); ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource); ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle; /* * The NVMe spec allows for the MSI-X table to be placed behind * BAR 4/5, separate from the control/doorbell registers. Always * try to map this bar, because it must be mapped prior to calling * pci_alloc_msix(). If the table isn't behind BAR 4/5, * bus_alloc_resource() will just return NULL which is OK. */ ctrlr->bar4_resource_id = PCIR_BAR(4); - ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, - &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); + ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->bar4_resource_id, RF_ACTIVE); return (0); } static void nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr) { struct nvme_qpair *qpair; uint32_t num_entries; qpair = &ctrlr->adminq; num_entries = NVME_ADMIN_ENTRIES; TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries); /* * If admin_entries was overridden to an invalid value, revert it * back to our default value. */ if (num_entries < NVME_MIN_ADMIN_ENTRIES || num_entries > NVME_MAX_ADMIN_ENTRIES) { nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d " "specified\n", num_entries); num_entries = NVME_ADMIN_ENTRIES; } /* * The admin queue's max xfer size is treated differently than the * max I/O xfer size. 16KB is sufficient here - maybe even less? */ nvme_qpair_construct(qpair, 0, /* qpair ID */ 0, /* vector */ num_entries, NVME_ADMIN_TRACKERS, ctrlr); } static int nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) { struct nvme_qpair *qpair; union cap_lo_register cap_lo; int i, num_entries, num_trackers; num_entries = NVME_IO_ENTRIES; TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries); /* * NVMe spec sets a hard limit of 64K max entries, but * devices may specify a smaller limit, so we need to check * the MQES field in the capabilities register. */ cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); num_entries = min(num_entries, cap_lo.bits.mqes+1); num_trackers = NVME_IO_TRACKERS; TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers); num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS); num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS); /* * No need to have more trackers than entries in the submit queue. * Note also that for a queue size of N, we can only have (N-1) * commands outstanding, hence the "-1" here. */ num_trackers = min(num_trackers, (num_entries-1)); /* * This was calculated previously when setting up interrupts, but * a controller could theoretically support fewer I/O queues than * MSI-X vectors. So calculate again here just to be safe. */ ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues); ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), M_NVME, M_ZERO | M_WAITOK); for (i = 0; i < ctrlr->num_io_queues; i++) { qpair = &ctrlr->ioq[i]; /* * Admin queue has ID=0. IO queues start at ID=1 - * hence the 'i+1' here. * * For I/O queues, use the controller-wide max_xfer_size * calculated in nvme_attach(). */ nvme_qpair_construct(qpair, i+1, /* qpair ID */ ctrlr->msix_enabled ? i+1 : 0, /* vector */ num_entries, num_trackers, ctrlr); /* * Do not bother binding interrupts if we only have one I/O * interrupt thread for this controller. */ if (ctrlr->num_io_queues > 1) bus_bind_intr(ctrlr->dev, qpair->res, i * ctrlr->num_cpus_per_ioq); } return (0); } static void nvme_ctrlr_fail(struct nvme_controller *ctrlr) { int i; ctrlr->is_failed = TRUE; nvme_qpair_fail(&ctrlr->adminq); for (i = 0; i < ctrlr->num_io_queues; i++) nvme_qpair_fail(&ctrlr->ioq[i]); nvme_notify_fail_consumers(ctrlr); } void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr, struct nvme_request *req) { mtx_lock(&ctrlr->lock); STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq); mtx_unlock(&ctrlr->lock); taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task); } static void nvme_ctrlr_fail_req_task(void *arg, int pending) { struct nvme_controller *ctrlr = arg; struct nvme_request *req; mtx_lock(&ctrlr->lock); while (!STAILQ_EMPTY(&ctrlr->fail_req)) { req = STAILQ_FIRST(&ctrlr->fail_req); STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq); nvme_qpair_manual_complete_request(req->qpair, req, NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE); } mtx_unlock(&ctrlr->lock); } static int nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val) { int ms_waited; union cc_register cc; union csts_register csts; cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); if (cc.bits.en != desired_val) { nvme_printf(ctrlr, "%s called with desired_val = %d " "but cc.en = %d\n", __func__, desired_val, cc.bits.en); return (ENXIO); } ms_waited = 0; while (csts.bits.rdy != desired_val) { DELAY(1000); if (ms_waited++ > ctrlr->ready_timeout_in_ms) { nvme_printf(ctrlr, "controller ready did not become %d " "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms); return (ENXIO); } csts.raw = nvme_mmio_read_4(ctrlr, csts); } return (0); } static void nvme_ctrlr_disable(struct nvme_controller *ctrlr) { union cc_register cc; union csts_register csts; cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); if (cc.bits.en == 1 && csts.bits.rdy == 0) nvme_ctrlr_wait_for_ready(ctrlr, 1); cc.bits.en = 0; nvme_mmio_write_4(ctrlr, cc, cc.raw); DELAY(5000); nvme_ctrlr_wait_for_ready(ctrlr, 0); } static int nvme_ctrlr_enable(struct nvme_controller *ctrlr) { union cc_register cc; union csts_register csts; union aqa_register aqa; cc.raw = nvme_mmio_read_4(ctrlr, cc); csts.raw = nvme_mmio_read_4(ctrlr, csts); if (cc.bits.en == 1) { if (csts.bits.rdy == 1) return (0); else return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); } nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr); DELAY(5000); nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr); DELAY(5000); aqa.raw = 0; /* acqs and asqs are 0-based. */ aqa.bits.acqs = ctrlr->adminq.num_entries-1; aqa.bits.asqs = ctrlr->adminq.num_entries-1; nvme_mmio_write_4(ctrlr, aqa, aqa.raw); DELAY(5000); cc.bits.en = 1; cc.bits.css = 0; cc.bits.ams = 0; cc.bits.shn = 0; cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */ cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */ /* This evaluates to 0, which is according to spec. */ cc.bits.mps = (PAGE_SIZE >> 13); nvme_mmio_write_4(ctrlr, cc, cc.raw); DELAY(5000); return (nvme_ctrlr_wait_for_ready(ctrlr, 1)); } int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) { int i; nvme_admin_qpair_disable(&ctrlr->adminq); /* * I/O queues are not allocated before the initial HW * reset, so do not try to disable them. Use is_initialized * to determine if this is the initial HW reset. */ if (ctrlr->is_initialized) { for (i = 0; i < ctrlr->num_io_queues; i++) nvme_io_qpair_disable(&ctrlr->ioq[i]); } DELAY(100*1000); nvme_ctrlr_disable(ctrlr); return (nvme_ctrlr_enable(ctrlr)); } void nvme_ctrlr_reset(struct nvme_controller *ctrlr) { int cmpset; cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); if (cmpset == 0 || ctrlr->is_failed) /* * Controller is already resetting or has failed. Return * immediately since there is no need to kick off another * reset in these cases. */ return; taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); } static int nvme_ctrlr_identify(struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; status.done = FALSE; nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata, nvme_completion_poll_cb, &status); while (status.done == FALSE) pause("nvme", 1); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); return (ENXIO); } /* * Use MDTS to ensure our default max_xfer_size doesn't exceed what the * controller supports. */ if (ctrlr->cdata.mdts > 0) ctrlr->max_xfer_size = min(ctrlr->max_xfer_size, ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts))); return (0); } static int nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; int cq_allocated, sq_allocated; status.done = FALSE; nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues, nvme_completion_poll_cb, &status); while (status.done == FALSE) pause("nvme", 1); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); return (ENXIO); } /* * Data in cdw0 is 0-based. * Lower 16-bits indicate number of submission queues allocated. * Upper 16-bits indicate number of completion queues allocated. */ sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; cq_allocated = (status.cpl.cdw0 >> 16) + 1; /* * Controller may allocate more queues than we requested, * so use the minimum of the number requested and what was * actually allocated. */ ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated); ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated); return (0); } static int nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; struct nvme_qpair *qpair; int i; for (i = 0; i < ctrlr->num_io_queues; i++) { qpair = &ctrlr->ioq[i]; status.done = FALSE; nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector, nvme_completion_poll_cb, &status); while (status.done == FALSE) pause("nvme", 1); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); return (ENXIO); } status.done = FALSE; nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status); while (status.done == FALSE) pause("nvme", 1); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); return (ENXIO); } } return (0); } static int nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) { struct nvme_namespace *ns; int i, status; for (i = 0; i < ctrlr->cdata.nn; i++) { ns = &ctrlr->ns[i]; status = nvme_ns_construct(ns, i+1, ctrlr); if (status != 0) return (status); } return (0); } static boolean_t is_log_page_id_valid(uint8_t page_id) { switch (page_id) { case NVME_LOG_ERROR: case NVME_LOG_HEALTH_INFORMATION: case NVME_LOG_FIRMWARE_SLOT: return (TRUE); } return (FALSE); } static uint32_t nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id) { uint32_t log_page_size; switch (page_id) { case NVME_LOG_ERROR: log_page_size = min( sizeof(struct nvme_error_information_entry) * ctrlr->cdata.elpe, NVME_MAX_AER_LOG_SIZE); break; case NVME_LOG_HEALTH_INFORMATION: log_page_size = sizeof(struct nvme_health_information_page); break; case NVME_LOG_FIRMWARE_SLOT: log_page_size = sizeof(struct nvme_firmware_page); break; default: log_page_size = 0; break; } return (log_page_size); } static void nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr, union nvme_critical_warning_state state) { if (state.bits.available_spare == 1) nvme_printf(ctrlr, "available spare space below threshold\n"); if (state.bits.temperature == 1) nvme_printf(ctrlr, "temperature above threshold\n"); if (state.bits.device_reliability == 1) nvme_printf(ctrlr, "device reliability degraded\n"); if (state.bits.read_only == 1) nvme_printf(ctrlr, "media placed in read only mode\n"); if (state.bits.volatile_memory_backup == 1) nvme_printf(ctrlr, "volatile memory backup device failed\n"); if (state.bits.reserved != 0) nvme_printf(ctrlr, "unknown critical warning(s): state = 0x%02x\n", state.raw); } static void nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_async_event_request *aer = arg; struct nvme_health_information_page *health_info; /* * If the log page fetch for some reason completed with an error, * don't pass log page data to the consumers. In practice, this case * should never happen. */ if (nvme_completion_is_error(cpl)) nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, aer->log_page_id, NULL, 0); else { if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) { health_info = (struct nvme_health_information_page *) aer->log_page_buffer; nvme_ctrlr_log_critical_warnings(aer->ctrlr, health_info->critical_warning); /* * Critical warnings reported through the * SMART/health log page are persistent, so * clear the associated bits in the async event * config so that we do not receive repeated * notifications for the same event. */ aer->ctrlr->async_event_config.raw &= ~health_info->critical_warning.raw; nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr, aer->ctrlr->async_event_config, NULL, NULL); } /* * Pass the cpl data from the original async event completion, * not the log page fetch. */ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl, aer->log_page_id, aer->log_page_buffer, aer->log_page_size); } /* * Repost another asynchronous event request to replace the one * that just completed. */ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); } static void nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_async_event_request *aer = arg; if (nvme_completion_is_error(cpl)) { /* * Do not retry failed async event requests. This avoids * infinite loops where a new async event request is submitted * to replace the one just failed, only to fail again and * perpetuate the loop. */ return; } /* Associated log page is in bits 23:16 of completion entry dw0. */ aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n", aer->log_page_id); if (is_log_page_id_valid(aer->log_page_id)) { aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id); memcpy(&aer->cpl, cpl, sizeof(*cpl)); nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id, NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, aer->log_page_size, nvme_ctrlr_async_event_log_page_cb, aer); /* Wait to notify consumers until after log page is fetched. */ } else { nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id, NULL, 0); /* * Repost another asynchronous event request to replace the one * that just completed. */ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer); } } static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, struct nvme_async_event_request *aer) { struct nvme_request *req; aer->ctrlr = ctrlr; req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer); aer->req = req; /* * Disable timeout here, since asynchronous event requests should by * nature never be timed out. */ req->timeout = FALSE; req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; nvme_ctrlr_submit_admin_request(ctrlr, req); } static void nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; struct nvme_async_event_request *aer; uint32_t i; ctrlr->async_event_config.raw = 0xFF; ctrlr->async_event_config.bits.reserved = 0; status.done = FALSE; nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD, 0, NULL, 0, nvme_completion_poll_cb, &status); while (status.done == FALSE) pause("nvme", 1); if (nvme_completion_is_error(&status.cpl) || (status.cpl.cdw0 & 0xFFFF) == 0xFFFF || (status.cpl.cdw0 & 0xFFFF) == 0x0000) { nvme_printf(ctrlr, "temperature threshold not supported\n"); ctrlr->async_event_config.bits.temperature = 0; } nvme_ctrlr_cmd_set_async_event_config(ctrlr, ctrlr->async_event_config, NULL, NULL); /* aerl is a zero-based value, so we need to add 1 here. */ ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1)); for (i = 0; i < ctrlr->num_aers; i++) { aer = &ctrlr->aer[i]; nvme_ctrlr_construct_and_submit_aer(ctrlr, aer); } } static void nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr) { ctrlr->int_coal_time = 0; TUNABLE_INT_FETCH("hw.nvme.int_coal_time", &ctrlr->int_coal_time); ctrlr->int_coal_threshold = 0; TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold", &ctrlr->int_coal_threshold); nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL, NULL); } static void nvme_ctrlr_start(void *ctrlr_arg) { struct nvme_controller *ctrlr = ctrlr_arg; uint32_t old_num_io_queues; int i; /* * Only reset adminq here when we are restarting the * controller after a reset. During initialization, * we have already submitted admin commands to get * the number of I/O queues supported, so cannot reset * the adminq again here. */ if (ctrlr->is_resetting) { nvme_qpair_reset(&ctrlr->adminq); } for (i = 0; i < ctrlr->num_io_queues; i++) nvme_qpair_reset(&ctrlr->ioq[i]); nvme_admin_qpair_enable(&ctrlr->adminq); if (nvme_ctrlr_identify(ctrlr) != 0) { nvme_ctrlr_fail(ctrlr); return; } /* * The number of qpairs are determined during controller initialization, * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the * HW limit. We call SET_FEATURES again here so that it gets called * after any reset for controllers that depend on the driver to * explicit specify how many queues it will use. This value should * never change between resets, so panic if somehow that does happen. */ if (ctrlr->is_resetting) { old_num_io_queues = ctrlr->num_io_queues; if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) { nvme_ctrlr_fail(ctrlr); return; } if (old_num_io_queues != ctrlr->num_io_queues) { panic("num_io_queues changed from %u to %u", old_num_io_queues, ctrlr->num_io_queues); } } if (nvme_ctrlr_create_qpairs(ctrlr) != 0) { nvme_ctrlr_fail(ctrlr); return; } if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) { nvme_ctrlr_fail(ctrlr); return; } nvme_ctrlr_configure_aer(ctrlr); nvme_ctrlr_configure_int_coalescing(ctrlr); for (i = 0; i < ctrlr->num_io_queues; i++) nvme_io_qpair_enable(&ctrlr->ioq[i]); } void nvme_ctrlr_start_config_hook(void *arg) { struct nvme_controller *ctrlr = arg; nvme_qpair_reset(&ctrlr->adminq); nvme_admin_qpair_enable(&ctrlr->adminq); if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 && nvme_ctrlr_construct_io_qpairs(ctrlr) == 0) nvme_ctrlr_start(ctrlr); else nvme_ctrlr_fail(ctrlr); nvme_sysctl_initialize_ctrlr(ctrlr); config_intrhook_disestablish(&ctrlr->config_hook); ctrlr->is_initialized = 1; nvme_notify_new_controller(ctrlr); } static void nvme_ctrlr_reset_task(void *arg, int pending) { struct nvme_controller *ctrlr = arg; int status; nvme_printf(ctrlr, "resetting controller\n"); status = nvme_ctrlr_hw_reset(ctrlr); /* * Use pause instead of DELAY, so that we yield to any nvme interrupt * handlers on this CPU that were blocked on a qpair lock. We want * all nvme interrupts completed before proceeding with restarting the * controller. * * XXX - any way to guarantee the interrupt handlers have quiesced? */ pause("nvmereset", hz / 10); if (status == 0) nvme_ctrlr_start(ctrlr); else nvme_ctrlr_fail(ctrlr); atomic_cmpset_32(&ctrlr->is_resetting, 1, 0); } static void nvme_ctrlr_intx_handler(void *arg) { struct nvme_controller *ctrlr = arg; nvme_mmio_write_4(ctrlr, intms, 1); nvme_qpair_process_completions(&ctrlr->adminq); if (ctrlr->ioq[0].cpl) nvme_qpair_process_completions(&ctrlr->ioq[0]); nvme_mmio_write_4(ctrlr, intmc, 1); } static int nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr) { ctrlr->msix_enabled = 0; ctrlr->num_io_queues = 1; ctrlr->num_cpus_per_ioq = mp_ncpus; ctrlr->rid = 0; ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE); if (ctrlr->res == NULL) { nvme_printf(ctrlr, "unable to allocate shared IRQ\n"); return (ENOMEM); } bus_setup_intr(ctrlr->dev, ctrlr->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler, ctrlr, &ctrlr->tag); if (ctrlr->tag == NULL) { nvme_printf(ctrlr, "unable to setup intx handler\n"); return (ENOMEM); } return (0); } static void nvme_pt_done(void *arg, const struct nvme_completion *cpl) { struct nvme_pt_command *pt = arg; bzero(&pt->cpl, sizeof(pt->cpl)); pt->cpl.cdw0 = cpl->cdw0; pt->cpl.status = cpl->status; pt->cpl.status.p = 0; mtx_lock(pt->driver_lock); wakeup(pt); mtx_unlock(pt->driver_lock); } int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr, struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer, int is_admin_cmd) { struct nvme_request *req; struct mtx *mtx; struct buf *buf = NULL; int ret = 0; if (pt->len > 0) { if (pt->len > ctrlr->max_xfer_size) { nvme_printf(ctrlr, "pt->len (%d) " "exceeds max_xfer_size (%d)\n", pt->len, ctrlr->max_xfer_size); return EIO; } if (is_user_buffer) { /* * Ensure the user buffer is wired for the duration of * this passthrough command. */ PHOLD(curproc); buf = getpbuf(NULL); buf->b_data = pt->buf; buf->b_bufsize = pt->len; buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE; #ifdef NVME_UNMAPPED_BIO_SUPPORT if (vmapbuf(buf, 1) < 0) { #else if (vmapbuf(buf) < 0) { #endif ret = EFAULT; goto err; } req = nvme_allocate_request_vaddr(buf->b_data, pt->len, nvme_pt_done, pt); } else req = nvme_allocate_request_vaddr(pt->buf, pt->len, nvme_pt_done, pt); } else req = nvme_allocate_request_null(nvme_pt_done, pt); req->cmd.opc = pt->cmd.opc; req->cmd.cdw10 = pt->cmd.cdw10; req->cmd.cdw11 = pt->cmd.cdw11; req->cmd.cdw12 = pt->cmd.cdw12; req->cmd.cdw13 = pt->cmd.cdw13; req->cmd.cdw14 = pt->cmd.cdw14; req->cmd.cdw15 = pt->cmd.cdw15; req->cmd.nsid = nsid; if (is_admin_cmd) mtx = &ctrlr->lock; else mtx = &ctrlr->ns[nsid-1].lock; mtx_lock(mtx); pt->driver_lock = mtx; if (is_admin_cmd) nvme_ctrlr_submit_admin_request(ctrlr, req); else nvme_ctrlr_submit_io_request(ctrlr, req); mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0); mtx_unlock(mtx); pt->driver_lock = NULL; err: if (buf != NULL) { relpbuf(buf, NULL); PRELE(curproc); } return (ret); } static int nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct nvme_controller *ctrlr; struct nvme_pt_command *pt; ctrlr = cdev->si_drv1; switch (cmd) { case NVME_RESET_CONTROLLER: nvme_ctrlr_reset(ctrlr); break; case NVME_PASSTHROUGH_CMD: pt = (struct nvme_pt_command *)arg; return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid, 1 /* is_user_buffer */, 1 /* is_admin_cmd */)); default: return (ENOTTY); } return (0); } static struct cdevsw nvme_ctrlr_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_ioctl = nvme_ctrlr_ioctl }; static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr) { device_t dev; int per_cpu_io_queues; int min_cpus_per_ioq; int num_vectors_requested, num_vectors_allocated; int num_vectors_available; dev = ctrlr->dev; min_cpus_per_ioq = 1; TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq); if (min_cpus_per_ioq < 1) { min_cpus_per_ioq = 1; } else if (min_cpus_per_ioq > mp_ncpus) { min_cpus_per_ioq = mp_ncpus; } per_cpu_io_queues = 1; TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues); if (per_cpu_io_queues == 0) { min_cpus_per_ioq = mp_ncpus; } ctrlr->force_intx = 0; TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx); /* * FreeBSD currently cannot allocate more than about 190 vectors at * boot, meaning that systems with high core count and many devices * requesting per-CPU interrupt vectors will not get their full * allotment. So first, try to allocate as many as we may need to * understand what is available, then immediately release them. * Then figure out how many of those we will actually use, based on * assigning an equal number of cores to each I/O queue. */ /* One vector for per core I/O queue, plus one vector for admin queue. */ num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1); if (pci_alloc_msix(dev, &num_vectors_available) != 0) { num_vectors_available = 0; } pci_release_msi(dev); if (ctrlr->force_intx || num_vectors_available < 2) { nvme_ctrlr_configure_intx(ctrlr); return; } /* * Do not use all vectors for I/O queues - one must be saved for the * admin queue. */ ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq, howmany(mp_ncpus, num_vectors_available - 1)); ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq); num_vectors_requested = ctrlr->num_io_queues + 1; num_vectors_allocated = num_vectors_requested; /* * Now just allocate the number of vectors we need. This should * succeed, since we previously called pci_alloc_msix() * successfully returning at least this many vectors, but just to * be safe, if something goes wrong just revert to INTx. */ if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) { nvme_ctrlr_configure_intx(ctrlr); return; } if (num_vectors_allocated < num_vectors_requested) { pci_release_msi(dev); nvme_ctrlr_configure_intx(ctrlr); return; } ctrlr->msix_enabled = 1; } int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev) { union cap_lo_register cap_lo; union cap_hi_register cap_hi; int status, timeout_period; ctrlr->dev = dev; mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); status = nvme_ctrlr_allocate_bar(ctrlr); if (status != 0) return (status); /* * Software emulators may set the doorbell stride to something * other than zero, but this driver is not set up to handle that. */ cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi); if (cap_hi.bits.dstrd != 0) return (ENXIO); ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin); /* Get ready timeout value from controller, in units of 500ms. */ cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo); ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500; timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD; TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period); timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD); timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD); ctrlr->timeout_period = timeout_period; nvme_retry_count = NVME_DEFAULT_RETRY_COUNT; TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count); ctrlr->enable_aborts = 0; TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts); nvme_ctrlr_setup_interrupts(ctrlr); ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; nvme_ctrlr_construct_admin_qpair(ctrlr); ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev)); if (ctrlr->cdev == NULL) return (ENXIO); ctrlr->cdev->si_drv1 = (void *)ctrlr; ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK, taskqueue_thread_enqueue, &ctrlr->taskqueue); taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq"); ctrlr->is_resetting = 0; ctrlr->is_initialized = 0; ctrlr->notification_sent = 0; TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr); TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr); STAILQ_INIT(&ctrlr->fail_req); ctrlr->is_failed = FALSE; return (0); } void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev) { int i; /* * Notify the controller of a shutdown, even though this is due to * a driver unload, not a system shutdown (this path is not invoked * during shutdown). This ensures the controller receives a * shutdown notification in case the system is shutdown before * reloading the driver. */ nvme_ctrlr_shutdown(ctrlr); nvme_ctrlr_disable(ctrlr); taskqueue_free(ctrlr->taskqueue); for (i = 0; i < NVME_MAX_NAMESPACES; i++) nvme_ns_destruct(&ctrlr->ns[i]); if (ctrlr->cdev) destroy_dev(ctrlr->cdev); for (i = 0; i < ctrlr->num_io_queues; i++) { nvme_io_qpair_destroy(&ctrlr->ioq[i]); } free(ctrlr->ioq, M_NVME); nvme_admin_qpair_destroy(&ctrlr->adminq); if (ctrlr->resource != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, ctrlr->resource); } if (ctrlr->bar4_resource != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->bar4_resource_id, ctrlr->bar4_resource); } if (ctrlr->tag) bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); if (ctrlr->res) bus_release_resource(ctrlr->dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res), ctrlr->res); if (ctrlr->msix_enabled) pci_release_msi(dev); } void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) { union cc_register cc; union csts_register csts; int ticks = 0; cc.raw = nvme_mmio_read_4(ctrlr, cc); cc.bits.shn = NVME_SHN_NORMAL; nvme_mmio_write_4(ctrlr, cc, cc.raw); csts.raw = nvme_mmio_read_4(ctrlr, csts); while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) { pause("nvme shn", 1); csts.raw = nvme_mmio_read_4(ctrlr, csts); } if (csts.bits.shst != NVME_SHST_COMPLETE) nvme_printf(ctrlr, "did not complete shutdown within 5 seconds " "of notification\n"); } void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, struct nvme_request *req) { nvme_qpair_submit_request(&ctrlr->adminq, req); } void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr, struct nvme_request *req) { struct nvme_qpair *qpair; qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq]; nvme_qpair_submit_request(qpair, req); } device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr) { return (ctrlr->dev); } const struct nvme_controller_data * nvme_ctrlr_get_data(struct nvme_controller *ctrlr) { return (&ctrlr->cdata); } Index: head/sys/dev/quicc/quicc_core.c =================================================================== --- head/sys/dev/quicc/quicc_core.c (revision 295789) +++ head/sys/dev/quicc/quicc_core.c (revision 295790) @@ -1,401 +1,401 @@ /*- * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define quicc_read2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, o) #define quicc_read4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, o) #define quicc_write2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, o, v) #define quicc_write4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, o, v) devclass_t quicc_devclass; char quicc_driver_name[] = "quicc"; static MALLOC_DEFINE(M_QUICC, "QUICC", "QUICC driver"); struct quicc_device { struct rman *qd_rman; struct resource_list qd_rlist; device_t qd_dev; int qd_devtype; driver_filter_t *qd_ih; void *qd_ih_arg; }; static int quicc_bfe_intr(void *arg) { struct quicc_device *qd; struct quicc_softc *sc = arg; uint32_t sipnr; sipnr = quicc_read4(sc->sc_rres, QUICC_REG_SIPNR_L); if (sipnr & 0x00f00000) qd = sc->sc_device; else qd = NULL; if (qd == NULL || qd->qd_ih == NULL) { device_printf(sc->sc_dev, "Stray interrupt %08x\n", sipnr); return (FILTER_STRAY); } return ((*qd->qd_ih)(qd->qd_ih_arg)); } int quicc_bfe_attach(device_t dev) { struct quicc_device *qd; struct quicc_softc *sc; struct resource_list_entry *rle; const char *sep; rman_res_t size, start; int error; sc = device_get_softc(dev); /* * Re-allocate. We expect that the softc contains the information * collected by quicc_bfe_probe() intact. */ - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, &sc->sc_rrid, - 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, + RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); start = rman_get_start(sc->sc_rres); size = rman_get_size(sc->sc_rres); sc->sc_rman.rm_start = start; sc->sc_rman.rm_end = start + size - 1; sc->sc_rman.rm_type = RMAN_ARRAY; sc->sc_rman.rm_descr = "QUICC resources"; error = rman_init(&sc->sc_rman); if (!error) error = rman_manage_region(&sc->sc_rman, start, start + size - 1); if (error) { bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (error); } /* * Allocate interrupt resource. */ sc->sc_irid = 0; sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, quicc_bfe_intr, NULL, sc, &sc->sc_icookie); if (error) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) sc->sc_polled = 1; if (bootverbose && (sc->sc_fastintr || sc->sc_polled)) { sep = ""; device_print_prettyname(dev); if (sc->sc_fastintr) { printf("%sfast interrupt", sep); sep = ", "; } if (sc->sc_polled) { printf("%spolled mode", sep); sep = ", "; } printf("\n"); } sc->sc_device = qd = malloc(sizeof(struct quicc_device), M_QUICC, M_WAITOK | M_ZERO); qd->qd_devtype = QUICC_DEVTYPE_SCC; qd->qd_rman = &sc->sc_rman; resource_list_init(&qd->qd_rlist); resource_list_add(&qd->qd_rlist, sc->sc_rtype, 0, start, start + size - 1, size); resource_list_add(&qd->qd_rlist, SYS_RES_IRQ, 0, 0xf00, 0xf00, 1); rle = resource_list_find(&qd->qd_rlist, SYS_RES_IRQ, 0); rle->res = sc->sc_ires; qd->qd_dev = device_add_child(dev, NULL, -1); device_set_ivars(qd->qd_dev, (void *)qd); error = device_probe_and_attach(qd->qd_dev); /* Enable all SCC interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIMR_L, 0x00f00000); /* Clear all pending interrupts. */ quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_H, ~0); quicc_write4(sc->sc_rres, QUICC_REG_SIPNR_L, ~0); return (error); } int quicc_bfe_detach(device_t dev) { struct quicc_softc *sc; sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return (0); } int quicc_bfe_probe(device_t dev, u_int clock) { struct quicc_softc *sc; uint16_t rev; sc = device_get_softc(dev); sc->sc_dev = dev; if (device_get_desc(dev) == NULL) device_set_desc(dev, "Quad integrated communications controller"); sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_MEMORY; - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, &sc->sc_rrid, - 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, + RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_IOPORT; - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, - &sc->sc_rrid, 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, + &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } sc->sc_clock = clock; /* * Check that the microcode revision is 0x00e8, as documented * in the MPC8555E PowerQUICC III Integrated Processor Family * Reference Manual. */ rev = quicc_read2(sc->sc_rres, QUICC_PRAM_REV_NUM); bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres); return ((rev == 0x00e8) ? BUS_PROBE_DEFAULT : ENXIO); } struct resource * quicc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (NULL); /* We only support default allocations. */ if (start != 0UL || end != ~0UL) return (NULL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, *rid); if (rle == NULL) return (NULL); if (rle->res == NULL) { rle->res = rman_reserve_resource(qd->qd_rman, rle->start, rle->start + rle->count - 1, rle->count, flags, child); if (rle->res != NULL) { rman_set_bustag(rle->res, &bs_be_tag); rman_set_bushandle(rle->res, rle->start); } } return (rle->res); } int quicc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, rid); if (rle == NULL) return (EINVAL); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } int quicc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct quicc_device *qd; struct quicc_softc *sc; uint32_t sccr; if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); qd = device_get_ivars(child); switch (index) { case QUICC_IVAR_CLOCK: *result = sc->sc_clock; break; case QUICC_IVAR_BRGCLK: sccr = quicc_read4(sc->sc_rres, QUICC_REG_SCCR) & 3; *result = sc->sc_clock / ((1 << (sccr + 1)) << sccr); break; case QUICC_IVAR_DEVTYPE: *result = qd->qd_devtype; break; default: return (EINVAL); } return (0); } int quicc_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct quicc_device *qd; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); rle = resource_list_find(&qd->qd_rlist, type, rid); return ((rle == NULL) ? EINVAL : 0); } int quicc_bus_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct quicc_device *qd; struct quicc_softc *sc; if (device_get_parent(child) != dev) return (EINVAL); /* Interrupt handlers must be FAST or MPSAFE. */ if (filt == NULL && !(flags & INTR_MPSAFE)) return (EINVAL); sc = device_get_softc(dev); if (sc->sc_polled) return (ENXIO); if (sc->sc_fastintr && filt == NULL) { sc->sc_fastintr = 0; bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)quicc_bfe_intr, sc, &sc->sc_icookie); } qd = device_get_ivars(child); qd->qd_ih = (filt != NULL) ? filt : (driver_filter_t *)ihand; qd->qd_ih_arg = arg; *cookiep = ihand; return (0); } int quicc_bus_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct quicc_device *qd; if (device_get_parent(child) != dev) return (EINVAL); qd = device_get_ivars(child); if (qd->qd_ih != cookie) return (EINVAL); qd->qd_ih = NULL; qd->qd_ih_arg = NULL; return (0); } Index: head/sys/dev/sound/pci/envy24.c =================================================================== --- head/sys/dev/sound/pci/envy24.c (revision 295789) +++ head/sys/dev/sound/pci/envy24.c (revision 295790) @@ -1,2699 +1,2699 @@ /*- * Copyright (c) 2001 Katsurajima Naoto * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); static MALLOC_DEFINE(M_ENVY24, "envy24", "envy24 audio"); /* -------------------------------------------------------------------- */ struct sc_info; #define ENVY24_PLAY_CHNUM 10 #define ENVY24_REC_CHNUM 12 #define ENVY24_PLAY_BUFUNIT (4 /* byte/sample */ * 10 /* channel */) #define ENVY24_REC_BUFUNIT (4 /* byte/sample */ * 12 /* channel */) #define ENVY24_SAMPLE_NUM 4096 #define ENVY24_TIMEOUT 1000 #define ENVY24_DEFAULT_FORMAT SND_FORMAT(AFMT_S16_LE, 2, 0) #define ENVY24_NAMELEN 32 #define SDA_GPIO 0x10 #define SCL_GPIO 0x20 struct envy24_sample { volatile u_int32_t buffer; }; typedef struct envy24_sample sample32_t; /* channel registers */ struct sc_chinfo { struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; int dir; unsigned num; /* hw channel number */ /* channel information */ u_int32_t format; u_int32_t speed; u_int32_t blk; /* hw block size(dword) */ /* format conversion structure */ u_int8_t *data; unsigned int size; /* data buffer size(byte) */ int unit; /* sample size(byte) */ unsigned int offset; /* samples number offset */ void (*emldma)(struct sc_chinfo *); /* flags */ int run; }; /* codec interface entrys */ struct codec_entry { void *(*create)(device_t dev, void *devinfo, int dir, int num); void (*destroy)(void *codec); void (*init)(void *codec); void (*reinit)(void *codec); void (*setvolume)(void *codec, int dir, unsigned int left, unsigned int right); void (*setrate)(void *codec, int which, int rate); }; /* system configuration information */ struct cfg_info { char *name; u_int16_t subvendor, subdevice; u_int8_t scfg, acl, i2s, spdif; u_int8_t gpiomask, gpiostate, gpiodir; u_int8_t cdti, cclk, cs, cif, type; u_int8_t free; struct codec_entry *codec; }; /* device private data */ struct sc_info { device_t dev; struct mtx *lock; /* Control/Status registor */ struct resource *cs; int csid; bus_space_tag_t cst; bus_space_handle_t csh; /* DDMA registor */ struct resource *ddma; int ddmaid; bus_space_tag_t ddmat; bus_space_handle_t ddmah; /* Consumer Section DMA Channel Registers */ struct resource *ds; int dsid; bus_space_tag_t dst; bus_space_handle_t dsh; /* MultiTrack registor */ struct resource *mt; int mtid; bus_space_tag_t mtt; bus_space_handle_t mth; /* DMA tag */ bus_dma_tag_t dmat; /* IRQ resource */ struct resource *irq; int irqid; void *ih; /* system configuration data */ struct cfg_info *cfg; /* ADC/DAC number and info */ int adcn, dacn; void *adc[4], *dac[4]; /* mixer control data */ u_int32_t src; u_int8_t left[ENVY24_CHAN_NUM]; u_int8_t right[ENVY24_CHAN_NUM]; /* Play/Record DMA fifo */ sample32_t *pbuf; sample32_t *rbuf; u_int32_t psize, rsize; /* DMA buffer size(byte) */ u_int16_t blk[2]; /* transfer check blocksize(dword) */ bus_dmamap_t pmap, rmap; bus_addr_t paddr, raddr; /* current status */ u_int32_t speed; int run[2]; u_int16_t intr[2]; struct pcmchan_caps caps[2]; /* channel info table */ unsigned chnum; struct sc_chinfo chan[11]; }; /* -------------------------------------------------------------------- */ /* * prototypes */ /* DMA emulator */ static void envy24_p8u(struct sc_chinfo *); static void envy24_p16sl(struct sc_chinfo *); static void envy24_p32sl(struct sc_chinfo *); static void envy24_r16sl(struct sc_chinfo *); static void envy24_r32sl(struct sc_chinfo *); /* channel interface */ static void *envy24chan_init(kobj_t, void *, struct snd_dbuf *, struct pcm_channel *, int); static int envy24chan_setformat(kobj_t, void *, u_int32_t); static u_int32_t envy24chan_setspeed(kobj_t, void *, u_int32_t); static u_int32_t envy24chan_setblocksize(kobj_t, void *, u_int32_t); static int envy24chan_trigger(kobj_t, void *, int); static u_int32_t envy24chan_getptr(kobj_t, void *); static struct pcmchan_caps *envy24chan_getcaps(kobj_t, void *); /* mixer interface */ static int envy24mixer_init(struct snd_mixer *); static int envy24mixer_reinit(struct snd_mixer *); static int envy24mixer_uninit(struct snd_mixer *); static int envy24mixer_set(struct snd_mixer *, unsigned, unsigned, unsigned); static u_int32_t envy24mixer_setrecsrc(struct snd_mixer *, u_int32_t); /* M-Audio Delta series AK4524 access interface */ static void *envy24_delta_ak4524_create(device_t, void *, int, int); static void envy24_delta_ak4524_destroy(void *); static void envy24_delta_ak4524_init(void *); static void envy24_delta_ak4524_reinit(void *); static void envy24_delta_ak4524_setvolume(void *, int, unsigned int, unsigned int); /* -------------------------------------------------------------------- */ /* system constant tables */ /* API -> hardware channel map */ static unsigned envy24_chanmap[ENVY24_CHAN_NUM] = { ENVY24_CHAN_PLAY_SPDIF, /* 0 */ ENVY24_CHAN_PLAY_DAC1, /* 1 */ ENVY24_CHAN_PLAY_DAC2, /* 2 */ ENVY24_CHAN_PLAY_DAC3, /* 3 */ ENVY24_CHAN_PLAY_DAC4, /* 4 */ ENVY24_CHAN_REC_MIX, /* 5 */ ENVY24_CHAN_REC_SPDIF, /* 6 */ ENVY24_CHAN_REC_ADC1, /* 7 */ ENVY24_CHAN_REC_ADC2, /* 8 */ ENVY24_CHAN_REC_ADC3, /* 9 */ ENVY24_CHAN_REC_ADC4, /* 10 */ }; /* mixer -> API channel map. see above */ static int envy24_mixmap[] = { -1, /* Master output level. It is depend on codec support */ -1, /* Treble level of all output channels */ -1, /* Bass level of all output channels */ -1, /* Volume of synthesier input */ 0, /* Output level for the audio device */ -1, /* Output level for the PC speaker */ 7, /* line in jack */ -1, /* microphone jack */ -1, /* CD audio input */ -1, /* Recording monitor */ 1, /* alternative codec */ -1, /* global recording level */ -1, /* Input gain */ -1, /* Output gain */ 8, /* Input source 1 */ 9, /* Input source 2 */ 10, /* Input source 3 */ 6, /* Digital (input) 1 */ -1, /* Digital (input) 2 */ -1, /* Digital (input) 3 */ -1, /* Phone input */ -1, /* Phone output */ -1, /* Video/TV (audio) in */ -1, /* Radio in */ -1, /* Monitor volume */ }; /* variable rate audio */ static u_int32_t envy24_speed[] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 9600, 8000, 0 }; /* known boards configuration */ static struct codec_entry delta_codec = { envy24_delta_ak4524_create, envy24_delta_ak4524_destroy, envy24_delta_ak4524_init, envy24_delta_ak4524_reinit, envy24_delta_ak4524_setvolume, NULL, /* setrate */ }; static struct cfg_info cfg_table[] = { { "Envy24 audio (M Audio Delta Dio 2496)", 0x1412, 0xd631, 0x10, 0x80, 0xf0, 0x03, 0x02, 0xc0, 0xfd, 0x10, 0x20, 0x40, 0x00, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (Terratec DMX 6fire)", 0x153b, 0x1138, 0x2f, 0x80, 0xf0, 0x03, 0xc0, 0xff, 0x7f, 0x10, 0x20, 0x01, 0x01, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (M Audio Audiophile 2496)", 0x1412, 0xd634, 0x10, 0x80, 0x72, 0x03, 0x04, 0xfe, 0xfb, 0x08, 0x02, 0x20, 0x00, 0x01, 0x00, &delta_codec, }, { "Envy24 audio (M Audio Delta 66)", 0x1412, 0xd632, 0x15, 0x80, 0xf0, 0x03, 0x02, 0xc0, 0xfd, 0x10, 0x20, 0x40, 0x00, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (M Audio Delta 44)", 0x1412, 0xd633, 0x15, 0x80, 0xf0, 0x00, 0x02, 0xc0, 0xfd, 0x10, 0x20, 0x40, 0x00, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (M Audio Delta 1010)", 0x1412, 0xd630, 0x1f, 0x80, 0xf0, 0x03, 0x22, 0xd0, 0xdd, 0x10, 0x20, 0x40, 0x00, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (M Audio Delta 1010LT)", 0x1412, 0xd63b, 0x1f, 0x80, 0x72, 0x03, 0x04, 0x7e, 0xfb, 0x08, 0x02, 0x70, 0x00, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (Terratec EWX 2496)", 0x153b, 0x1130, 0x10, 0x80, 0xf0, 0x03, 0xc0, 0x3f, 0x3f, 0x10, 0x20, 0x01, 0x01, 0x00, 0x00, &delta_codec, }, { "Envy24 audio (Generic)", 0, 0, 0x0f, 0x00, 0x01, 0x03, 0xff, 0x00, 0x00, 0x10, 0x20, 0x40, 0x00, 0x00, 0x00, &delta_codec, /* default codec routines */ } }; static u_int32_t envy24_recfmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_S32_LE, 2, 0), 0 }; static struct pcmchan_caps envy24_reccaps = {8000, 96000, envy24_recfmt, 0}; static u_int32_t envy24_playfmt[] = { SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_S32_LE, 2, 0), 0 }; static struct pcmchan_caps envy24_playcaps = {8000, 96000, envy24_playfmt, 0}; struct envy24_emldma { u_int32_t format; void (*emldma)(struct sc_chinfo *); int unit; }; static struct envy24_emldma envy24_pemltab[] = { {SND_FORMAT(AFMT_U8, 2, 0), envy24_p8u, 2}, {SND_FORMAT(AFMT_S16_LE, 2, 0), envy24_p16sl, 4}, {SND_FORMAT(AFMT_S32_LE, 2, 0), envy24_p32sl, 8}, {0, NULL, 0} }; static struct envy24_emldma envy24_remltab[] = { {SND_FORMAT(AFMT_S16_LE, 2, 0), envy24_r16sl, 4}, {SND_FORMAT(AFMT_S32_LE, 2, 0), envy24_r32sl, 8}, {0, NULL, 0} }; /* -------------------------------------------------------------------- */ /* common routines */ static u_int32_t envy24_rdcs(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->cst, sc->csh, regno); case 2: return bus_space_read_2(sc->cst, sc->csh, regno); case 4: return bus_space_read_4(sc->cst, sc->csh, regno); default: return 0xffffffff; } } static void envy24_wrcs(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->cst, sc->csh, regno, data); break; case 2: bus_space_write_2(sc->cst, sc->csh, regno, data); break; case 4: bus_space_write_4(sc->cst, sc->csh, regno, data); break; } } static u_int32_t envy24_rdmt(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->mtt, sc->mth, regno); case 2: return bus_space_read_2(sc->mtt, sc->mth, regno); case 4: return bus_space_read_4(sc->mtt, sc->mth, regno); default: return 0xffffffff; } } static void envy24_wrmt(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->mtt, sc->mth, regno, data); break; case 2: bus_space_write_2(sc->mtt, sc->mth, regno, data); break; case 4: bus_space_write_4(sc->mtt, sc->mth, regno, data); break; } } static u_int32_t envy24_rdci(struct sc_info *sc, int regno) { envy24_wrcs(sc, ENVY24_CCS_INDEX, regno, 1); return envy24_rdcs(sc, ENVY24_CCS_DATA, 1); } static void envy24_wrci(struct sc_info *sc, int regno, u_int32_t data) { envy24_wrcs(sc, ENVY24_CCS_INDEX, regno, 1); envy24_wrcs(sc, ENVY24_CCS_DATA, data, 1); } /* -------------------------------------------------------------------- */ /* I2C port/E2PROM access routines */ static int envy24_rdi2c(struct sc_info *sc, u_int32_t dev, u_int32_t addr) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24_rdi2c(sc, 0x%02x, 0x%02x)\n", dev, addr); #endif for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdcs(sc, ENVY24_CCS_I2CSTAT, 1); if ((data & ENVY24_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24_TIMEOUT) { return -1; } envy24_wrcs(sc, ENVY24_CCS_I2CADDR, addr, 1); envy24_wrcs(sc, ENVY24_CCS_I2CDEV, (dev & ENVY24_CCS_I2CDEV_ADDR) | ENVY24_CCS_I2CDEV_RD, 1); for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdcs(sc, ENVY24_CCS_I2CSTAT, 1); if ((data & ENVY24_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24_TIMEOUT) { return -1; } data = envy24_rdcs(sc, ENVY24_CCS_I2CDATA, 1); #if(0) device_printf(sc->dev, "envy24_rdi2c(): return 0x%x\n", data); #endif return (int)data; } #if 0 static int envy24_wri2c(struct sc_info *sc, u_int32_t dev, u_int32_t addr, u_int32_t data) { u_int32_t tmp; int i; #if(0) device_printf(sc->dev, "envy24_rdi2c(sc, 0x%02x, 0x%02x)\n", dev, addr); #endif for (i = 0; i < ENVY24_TIMEOUT; i++) { tmp = envy24_rdcs(sc, ENVY24_CCS_I2CSTAT, 1); if ((tmp & ENVY24_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24_TIMEOUT) { return -1; } envy24_wrcs(sc, ENVY24_CCS_I2CADDR, addr, 1); envy24_wrcs(sc, ENVY24_CCS_I2CDATA, data, 1); envy24_wrcs(sc, ENVY24_CCS_I2CDEV, (dev & ENVY24_CCS_I2CDEV_ADDR) | ENVY24_CCS_I2CDEV_WR, 1); for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdcs(sc, ENVY24_CCS_I2CSTAT, 1); if ((data & ENVY24_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24_TIMEOUT) { return -1; } return 0; } #endif static int envy24_rdrom(struct sc_info *sc, u_int32_t addr) { u_int32_t data; #if(0) device_printf(sc->dev, "envy24_rdrom(sc, 0x%02x)\n", addr); #endif data = envy24_rdcs(sc, ENVY24_CCS_I2CSTAT, 1); if ((data & ENVY24_CCS_I2CSTAT_ROM) == 0) { #if(0) device_printf(sc->dev, "envy24_rdrom(): E2PROM not presented\n"); #endif return -1; } return envy24_rdi2c(sc, ENVY24_CCS_I2CDEV_ROM, addr); } static struct cfg_info * envy24_rom2cfg(struct sc_info *sc) { struct cfg_info *buff; int size; int i; #if(0) device_printf(sc->dev, "envy24_rom2cfg(sc)\n"); #endif size = envy24_rdrom(sc, ENVY24_E2PROM_SIZE); if (size < ENVY24_E2PROM_GPIODIR + 1) { #if(0) device_printf(sc->dev, "envy24_rom2cfg(): ENVY24_E2PROM_SIZE-->%d\n", size); #endif return NULL; } buff = malloc(sizeof(*buff), M_ENVY24, M_NOWAIT); if (buff == NULL) { #if(0) device_printf(sc->dev, "envy24_rom2cfg(): malloc()\n"); #endif return NULL; } buff->free = 1; buff->subvendor = envy24_rdrom(sc, ENVY24_E2PROM_SUBVENDOR) << 8; buff->subvendor += envy24_rdrom(sc, ENVY24_E2PROM_SUBVENDOR + 1); buff->subdevice = envy24_rdrom(sc, ENVY24_E2PROM_SUBDEVICE) << 8; buff->subdevice += envy24_rdrom(sc, ENVY24_E2PROM_SUBDEVICE + 1); buff->scfg = envy24_rdrom(sc, ENVY24_E2PROM_SCFG); buff->acl = envy24_rdrom(sc, ENVY24_E2PROM_ACL); buff->i2s = envy24_rdrom(sc, ENVY24_E2PROM_I2S); buff->spdif = envy24_rdrom(sc, ENVY24_E2PROM_SPDIF); buff->gpiomask = envy24_rdrom(sc, ENVY24_E2PROM_GPIOMASK); buff->gpiostate = envy24_rdrom(sc, ENVY24_E2PROM_GPIOSTATE); buff->gpiodir = envy24_rdrom(sc, ENVY24_E2PROM_GPIODIR); for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) if (cfg_table[i].subvendor == buff->subvendor && cfg_table[i].subdevice == buff->subdevice) break; buff->name = cfg_table[i].name; buff->codec = cfg_table[i].codec; return buff; } static void envy24_cfgfree(struct cfg_info *cfg) { if (cfg == NULL) return; if (cfg->free) free(cfg, M_ENVY24); return; } /* -------------------------------------------------------------------- */ /* AC'97 codec access routines */ #if 0 static int envy24_coldcd(struct sc_info *sc) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24_coldcd()\n"); #endif envy24_wrmt(sc, ENVY24_MT_AC97CMD, ENVY24_MT_AC97CMD_CLD, 1); DELAY(10); envy24_wrmt(sc, ENVY24_MT_AC97CMD, 0, 1); DELAY(1000); for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdmt(sc, ENVY24_MT_AC97CMD, 1); if (data & ENVY24_MT_AC97CMD_RDY) { return 0; } } return -1; } #endif static int envy24_slavecd(struct sc_info *sc) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24_slavecd()\n"); #endif envy24_wrmt(sc, ENVY24_MT_AC97CMD, ENVY24_MT_AC97CMD_CLD | ENVY24_MT_AC97CMD_WRM, 1); DELAY(10); envy24_wrmt(sc, ENVY24_MT_AC97CMD, 0, 1); DELAY(1000); for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdmt(sc, ENVY24_MT_AC97CMD, 1); if (data & ENVY24_MT_AC97CMD_RDY) { return 0; } } return -1; } #if 0 static int envy24_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24_rdcd(obj, sc, 0x%02x)\n", regno); #endif envy24_wrmt(sc, ENVY24_MT_AC97IDX, (u_int32_t)regno, 1); envy24_wrmt(sc, ENVY24_MT_AC97CMD, ENVY24_MT_AC97CMD_RD, 1); for (i = 0; i < ENVY24_TIMEOUT; i++) { data = envy24_rdmt(sc, ENVY24_MT_AC97CMD, 1); if ((data & ENVY24_MT_AC97CMD_RD) == 0) break; } data = envy24_rdmt(sc, ENVY24_MT_AC97DLO, 2); #if(0) device_printf(sc->dev, "envy24_rdcd(): return 0x%x\n", data); #endif return (int)data; } static int envy24_wrcd(kobj_t obj, void *devinfo, int regno, u_int16_t data) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t cmd; int i; #if(0) device_printf(sc->dev, "envy24_wrcd(obj, sc, 0x%02x, 0x%04x)\n", regno, data); #endif envy24_wrmt(sc, ENVY24_MT_AC97IDX, (u_int32_t)regno, 1); envy24_wrmt(sc, ENVY24_MT_AC97DLO, (u_int32_t)data, 2); envy24_wrmt(sc, ENVY24_MT_AC97CMD, ENVY24_MT_AC97CMD_WR, 1); for (i = 0; i < ENVY24_TIMEOUT; i++) { cmd = envy24_rdmt(sc, ENVY24_MT_AC97CMD, 1); if ((cmd & ENVY24_MT_AC97CMD_WR) == 0) break; } return 0; } static kobj_method_t envy24_ac97_methods[] = { KOBJMETHOD(ac97_read, envy24_rdcd), KOBJMETHOD(ac97_write, envy24_wrcd), KOBJMETHOD_END }; AC97_DECLARE(envy24_ac97); #endif /* -------------------------------------------------------------------- */ /* GPIO access routines */ static u_int32_t envy24_gpiord(struct sc_info *sc) { return envy24_rdci(sc, ENVY24_CCI_GPIODAT); } static void envy24_gpiowr(struct sc_info *sc, u_int32_t data) { #if(0) device_printf(sc->dev, "envy24_gpiowr(sc, 0x%02x)\n", data & 0xff); return; #endif envy24_wrci(sc, ENVY24_CCI_GPIODAT, data); return; } #if 0 static u_int32_t envy24_gpiogetmask(struct sc_info *sc) { return envy24_rdci(sc, ENVY24_CCI_GPIOMASK); } #endif static void envy24_gpiosetmask(struct sc_info *sc, u_int32_t mask) { envy24_wrci(sc, ENVY24_CCI_GPIOMASK, mask); return; } #if 0 static u_int32_t envy24_gpiogetdir(struct sc_info *sc) { return envy24_rdci(sc, ENVY24_CCI_GPIOCTL); } #endif static void envy24_gpiosetdir(struct sc_info *sc, u_int32_t dir) { envy24_wrci(sc, ENVY24_CCI_GPIOCTL, dir); return; } /* -------------------------------------------------------------------- */ /* Envy24 I2C through GPIO bit-banging */ struct envy24_delta_ak4524_codec { struct spicds_info *info; struct sc_info *parent; int dir; int num; int cs, cclk, cdti; }; static void envy24_gpio_i2c_ctl(void *codec, unsigned int scl, unsigned int sda) { u_int32_t data = 0; struct envy24_delta_ak4524_codec *ptr = codec; #if(0) device_printf(ptr->parent->dev, "--> %d, %d\n", scl, sda); #endif data = envy24_gpiord(ptr->parent); data &= ~(SDA_GPIO | SCL_GPIO); if (scl) data += SCL_GPIO; if (sda) data += SDA_GPIO; envy24_gpiowr(ptr->parent, data); return; } static void i2c_wrbit(void *codec, void (*ctrl)(void*, unsigned int, unsigned int), int bit) { struct envy24_delta_ak4524_codec *ptr = codec; unsigned int sda; if (bit) sda = 1; else sda = 0; ctrl(ptr, 0, sda); DELAY(I2C_DELAY); ctrl(ptr, 1, sda); DELAY(I2C_DELAY); ctrl(ptr, 0, sda); DELAY(I2C_DELAY); } static void i2c_start(void *codec, void (*ctrl)(void*, unsigned int, unsigned int)) { struct envy24_delta_ak4524_codec *ptr = codec; ctrl(ptr, 1, 1); DELAY(I2C_DELAY); ctrl(ptr, 1, 0); DELAY(I2C_DELAY); ctrl(ptr, 0, 0); DELAY(I2C_DELAY); } static void i2c_stop(void *codec, void (*ctrl)(void*, unsigned int, unsigned int)) { struct envy24_delta_ak4524_codec *ptr = codec; ctrl(ptr, 0, 0); DELAY(I2C_DELAY); ctrl(ptr, 1, 0); DELAY(I2C_DELAY); ctrl(ptr, 1, 1); DELAY(I2C_DELAY); } static void i2c_ack(void *codec, void (*ctrl)(void*, unsigned int, unsigned int)) { struct envy24_delta_ak4524_codec *ptr = codec; ctrl(ptr, 0, 1); DELAY(I2C_DELAY); ctrl(ptr, 1, 1); DELAY(I2C_DELAY); /* dummy, need routine to change gpio direction */ ctrl(ptr, 0, 1); DELAY(I2C_DELAY); } static void i2c_wr(void *codec, void (*ctrl)(void*, unsigned int, unsigned int), u_int32_t dev, int reg, u_int8_t val) { struct envy24_delta_ak4524_codec *ptr = codec; int mask; i2c_start(ptr, ctrl); for (mask = 0x80; mask != 0; mask >>= 1) i2c_wrbit(ptr, ctrl, dev & mask); i2c_ack(ptr, ctrl); if (reg != 0xff) { for (mask = 0x80; mask != 0; mask >>= 1) i2c_wrbit(ptr, ctrl, reg & mask); i2c_ack(ptr, ctrl); } for (mask = 0x80; mask != 0; mask >>= 1) i2c_wrbit(ptr, ctrl, val & mask); i2c_ack(ptr, ctrl); i2c_stop(ptr, ctrl); } /* -------------------------------------------------------------------- */ /* M-Audio Delta series AK4524 access interface routine */ static void envy24_delta_ak4524_ctl(void *codec, unsigned int cs, unsigned int cclk, unsigned int cdti) { u_int32_t data = 0; struct envy24_delta_ak4524_codec *ptr = codec; #if(0) device_printf(ptr->parent->dev, "--> %d, %d, %d\n", cs, cclk, cdti); #endif data = envy24_gpiord(ptr->parent); data &= ~(ptr->cs | ptr->cclk | ptr->cdti); if (cs) data += ptr->cs; if (cclk) data += ptr->cclk; if (cdti) data += ptr->cdti; envy24_gpiowr(ptr->parent, data); return; } static void * envy24_delta_ak4524_create(device_t dev, void *info, int dir, int num) { struct sc_info *sc = info; struct envy24_delta_ak4524_codec *buff = NULL; #if(0) device_printf(sc->dev, "envy24_delta_ak4524_create(dev, sc, %d, %d)\n", dir, num); #endif buff = malloc(sizeof(*buff), M_ENVY24, M_NOWAIT); if (buff == NULL) return NULL; if (dir == PCMDIR_REC && sc->adc[num] != NULL) buff->info = ((struct envy24_delta_ak4524_codec *)sc->adc[num])->info; else if (dir == PCMDIR_PLAY && sc->dac[num] != NULL) buff->info = ((struct envy24_delta_ak4524_codec *)sc->dac[num])->info; else buff->info = spicds_create(dev, buff, num, envy24_delta_ak4524_ctl); if (buff->info == NULL) { free(buff, M_ENVY24); return NULL; } buff->parent = sc; buff->dir = dir; buff->num = num; return (void *)buff; } static void envy24_delta_ak4524_destroy(void *codec) { struct envy24_delta_ak4524_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24_delta_ak4524_destroy()\n"); #endif if (ptr->dir == PCMDIR_PLAY) { if (ptr->parent->dac[ptr->num] != NULL) spicds_destroy(ptr->info); } else { if (ptr->parent->adc[ptr->num] != NULL) spicds_destroy(ptr->info); } free(codec, M_ENVY24); } static void envy24_delta_ak4524_init(void *codec) { #if 0 u_int32_t gpiomask, gpiodir; #endif struct envy24_delta_ak4524_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24_delta_ak4524_init()\n"); #endif /* gpiomask = envy24_gpiogetmask(ptr->parent); gpiomask &= ~(ENVY24_GPIO_AK4524_CDTI | ENVY24_GPIO_AK4524_CCLK | ENVY24_GPIO_AK4524_CS0 | ENVY24_GPIO_AK4524_CS1); envy24_gpiosetmask(ptr->parent, gpiomask); gpiodir = envy24_gpiogetdir(ptr->parent); gpiodir |= ENVY24_GPIO_AK4524_CDTI | ENVY24_GPIO_AK4524_CCLK | ENVY24_GPIO_AK4524_CS0 | ENVY24_GPIO_AK4524_CS1; envy24_gpiosetdir(ptr->parent, gpiodir); */ ptr->cs = ptr->parent->cfg->cs; #if 0 envy24_gpiosetmask(ptr->parent, ENVY24_GPIO_CS8414_STATUS); envy24_gpiosetdir(ptr->parent, ~ENVY24_GPIO_CS8414_STATUS); if (ptr->num == 0) ptr->cs = ENVY24_GPIO_AK4524_CS0; else ptr->cs = ENVY24_GPIO_AK4524_CS1; ptr->cclk = ENVY24_GPIO_AK4524_CCLK; #endif ptr->cclk = ptr->parent->cfg->cclk; ptr->cdti = ptr->parent->cfg->cdti; spicds_settype(ptr->info, ptr->parent->cfg->type); spicds_setcif(ptr->info, ptr->parent->cfg->cif); spicds_setformat(ptr->info, AK452X_FORMAT_I2S | AK452X_FORMAT_256FSN | AK452X_FORMAT_1X); spicds_setdvc(ptr->info, AK452X_DVC_DEMOFF); /* for the time being, init only first codec */ if (ptr->num == 0) spicds_init(ptr->info); /* 6fire rear input init test, set ptr->num to 1 for test */ if (ptr->parent->cfg->subvendor == 0x153b && \ ptr->parent->cfg->subdevice == 0x1138 && ptr->num == 100) { ptr->cs = 0x02; spicds_init(ptr->info); device_printf(ptr->parent->dev, "6fire rear input init\n"); i2c_wr(ptr, envy24_gpio_i2c_ctl, \ PCA9554_I2CDEV, PCA9554_DIR, 0x80); i2c_wr(ptr, envy24_gpio_i2c_ctl, \ PCA9554_I2CDEV, PCA9554_OUT, 0x02); } } static void envy24_delta_ak4524_reinit(void *codec) { struct envy24_delta_ak4524_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24_delta_ak4524_reinit()\n"); #endif spicds_reinit(ptr->info); } static void envy24_delta_ak4524_setvolume(void *codec, int dir, unsigned int left, unsigned int right) { struct envy24_delta_ak4524_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24_delta_ak4524_set()\n"); #endif spicds_set(ptr->info, dir, left, right); } /* There is no need for AK452[48] codec to set sample rate static void envy24_delta_ak4524_setrate(struct envy24_delta_ak4524_codec *codec, int which, int rate) { } */ /* -------------------------------------------------------------------- */ /* hardware access routeines */ static struct { u_int32_t speed; u_int32_t code; } envy24_speedtab[] = { {48000, ENVY24_MT_RATE_48000}, {24000, ENVY24_MT_RATE_24000}, {12000, ENVY24_MT_RATE_12000}, {9600, ENVY24_MT_RATE_9600}, {32000, ENVY24_MT_RATE_32000}, {16000, ENVY24_MT_RATE_16000}, {8000, ENVY24_MT_RATE_8000}, {96000, ENVY24_MT_RATE_96000}, {64000, ENVY24_MT_RATE_64000}, {44100, ENVY24_MT_RATE_44100}, {22050, ENVY24_MT_RATE_22050}, {11025, ENVY24_MT_RATE_11025}, {88200, ENVY24_MT_RATE_88200}, {0, 0x10} }; static u_int32_t envy24_setspeed(struct sc_info *sc, u_int32_t speed) { u_int32_t code; int i = 0; #if(0) device_printf(sc->dev, "envy24_setspeed(sc, %d)\n", speed); #endif if (speed == 0) { code = ENVY24_MT_RATE_SPDIF; /* external master clock */ envy24_slavecd(sc); } else { for (i = 0; envy24_speedtab[i].speed != 0; i++) { if (envy24_speedtab[i].speed == speed) break; } code = envy24_speedtab[i].code; } #if(0) device_printf(sc->dev, "envy24_setspeed(): speed %d/code 0x%04x\n", envy24_speedtab[i].speed, code); #endif if (code < 0x10) { envy24_wrmt(sc, ENVY24_MT_RATE, code, 1); code = envy24_rdmt(sc, ENVY24_MT_RATE, 1); code &= ENVY24_MT_RATE_MASK; for (i = 0; envy24_speedtab[i].code < 0x10; i++) { if (envy24_speedtab[i].code == code) break; } speed = envy24_speedtab[i].speed; } else speed = 0; #if(0) device_printf(sc->dev, "envy24_setspeed(): return %d\n", speed); #endif return speed; } static void envy24_setvolume(struct sc_info *sc, unsigned ch) { #if(0) device_printf(sc->dev, "envy24_setvolume(sc, %d)\n", ch); #endif if (sc->cfg->subvendor==0x153b && sc->cfg->subdevice==0x1138 ) { envy24_wrmt(sc, ENVY24_MT_VOLIDX, 16, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, 0x7f7f, 2); envy24_wrmt(sc, ENVY24_MT_VOLIDX, 17, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, 0x7f7f, 2); } envy24_wrmt(sc, ENVY24_MT_VOLIDX, ch * 2, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, 0x7f00 | sc->left[ch], 2); envy24_wrmt(sc, ENVY24_MT_VOLIDX, ch * 2 + 1, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, (sc->right[ch] << 8) | 0x7f, 2); } static void envy24_mutevolume(struct sc_info *sc, unsigned ch) { u_int32_t vol; #if(0) device_printf(sc->dev, "envy24_mutevolume(sc, %d)\n", ch); #endif vol = ENVY24_VOL_MUTE << 8 | ENVY24_VOL_MUTE; envy24_wrmt(sc, ENVY24_MT_VOLIDX, ch * 2, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, vol, 2); envy24_wrmt(sc, ENVY24_MT_VOLIDX, ch * 2 + 1, 1); envy24_wrmt(sc, ENVY24_MT_VOLUME, vol, 2); } static u_int32_t envy24_gethwptr(struct sc_info *sc, int dir) { int unit, regno; u_int32_t ptr, rtn; #if(0) device_printf(sc->dev, "envy24_gethwptr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) { rtn = sc->psize / 4; unit = ENVY24_PLAY_BUFUNIT / 4; regno = ENVY24_MT_PCNT; } else { rtn = sc->rsize / 4; unit = ENVY24_REC_BUFUNIT / 4; regno = ENVY24_MT_RCNT; } ptr = envy24_rdmt(sc, regno, 2); rtn -= (ptr + 1); rtn /= unit; #if(0) device_printf(sc->dev, "envy24_gethwptr(): return %d\n", rtn); #endif return rtn; } static void envy24_updintr(struct sc_info *sc, int dir) { int regptr, regintr; u_int32_t mask, intr; u_int32_t ptr, size, cnt; u_int16_t blk; #if(0) device_printf(sc->dev, "envy24_updintr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) { blk = sc->blk[0]; size = sc->psize / 4; regptr = ENVY24_MT_PCNT; regintr = ENVY24_MT_PTERM; mask = ~ENVY24_MT_INT_PMASK; } else { blk = sc->blk[1]; size = sc->rsize / 4; regptr = ENVY24_MT_RCNT; regintr = ENVY24_MT_RTERM; mask = ~ENVY24_MT_INT_RMASK; } ptr = size - envy24_rdmt(sc, regptr, 2) - 1; /* cnt = blk - ptr % blk - 1; if (cnt == 0) cnt = blk - 1; */ cnt = blk - 1; #if(0) device_printf(sc->dev, "envy24_updintr():ptr = %d, blk = %d, cnt = %d\n", ptr, blk, cnt); #endif envy24_wrmt(sc, regintr, cnt, 2); intr = envy24_rdmt(sc, ENVY24_MT_INT, 1); #if(0) device_printf(sc->dev, "envy24_updintr():intr = 0x%02x, mask = 0x%02x\n", intr, mask); #endif envy24_wrmt(sc, ENVY24_MT_INT, intr & mask, 1); #if(0) device_printf(sc->dev, "envy24_updintr():INT-->0x%02x\n", envy24_rdmt(sc, ENVY24_MT_INT, 1)); #endif return; } #if 0 static void envy24_maskintr(struct sc_info *sc, int dir) { u_int32_t mask, intr; #if(0) device_printf(sc->dev, "envy24_maskintr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) mask = ENVY24_MT_INT_PMASK; else mask = ENVY24_MT_INT_RMASK; intr = envy24_rdmt(sc, ENVY24_MT_INT, 1); envy24_wrmt(sc, ENVY24_MT_INT, intr | mask, 1); return; } #endif static int envy24_checkintr(struct sc_info *sc, int dir) { u_int32_t mask, stat, intr, rtn; #if(0) device_printf(sc->dev, "envy24_checkintr(sc, %d)\n", dir); #endif intr = envy24_rdmt(sc, ENVY24_MT_INT, 1); if (dir == PCMDIR_PLAY) { if ((rtn = intr & ENVY24_MT_INT_PSTAT) != 0) { mask = ~ENVY24_MT_INT_RSTAT; stat = ENVY24_MT_INT_PSTAT | ENVY24_MT_INT_PMASK; envy24_wrmt(sc, ENVY24_MT_INT, (intr & mask) | stat, 1); } } else { if ((rtn = intr & ENVY24_MT_INT_RSTAT) != 0) { mask = ~ENVY24_MT_INT_PSTAT; stat = ENVY24_MT_INT_RSTAT | ENVY24_MT_INT_RMASK; envy24_wrmt(sc, ENVY24_MT_INT, (intr & mask) | stat, 1); } } return rtn; } static void envy24_start(struct sc_info *sc, int dir) { u_int32_t stat, sw; #if(0) device_printf(sc->dev, "envy24_start(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) sw = ENVY24_MT_PCTL_PSTART; else sw = ENVY24_MT_PCTL_RSTART; stat = envy24_rdmt(sc, ENVY24_MT_PCTL, 1); envy24_wrmt(sc, ENVY24_MT_PCTL, stat | sw, 1); #if(0) DELAY(100); device_printf(sc->dev, "PADDR:0x%08x\n", envy24_rdmt(sc, ENVY24_MT_PADDR, 4)); device_printf(sc->dev, "PCNT:%ld\n", envy24_rdmt(sc, ENVY24_MT_PCNT, 2)); #endif return; } static void envy24_stop(struct sc_info *sc, int dir) { u_int32_t stat, sw; #if(0) device_printf(sc->dev, "envy24_stop(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) sw = ~ENVY24_MT_PCTL_PSTART; else sw = ~ENVY24_MT_PCTL_RSTART; stat = envy24_rdmt(sc, ENVY24_MT_PCTL, 1); envy24_wrmt(sc, ENVY24_MT_PCTL, stat & sw, 1); return; } static int envy24_route(struct sc_info *sc, int dac, int class, int adc, int rev) { u_int32_t reg, mask; u_int32_t left, right; #if(0) device_printf(sc->dev, "envy24_route(sc, %d, %d, %d, %d)\n", dac, class, adc, rev); #endif /* parameter pattern check */ if (dac < 0 || ENVY24_ROUTE_DAC_SPDIF < dac) return -1; if (class == ENVY24_ROUTE_CLASS_MIX && (dac != ENVY24_ROUTE_DAC_1 && dac != ENVY24_ROUTE_DAC_SPDIF)) return -1; if (rev) { left = ENVY24_ROUTE_RIGHT; right = ENVY24_ROUTE_LEFT; } else { left = ENVY24_ROUTE_LEFT; right = ENVY24_ROUTE_RIGHT; } if (dac == ENVY24_ROUTE_DAC_SPDIF) { reg = class | class << 2 | ((adc << 1 | left) | left << 3) << 8 | ((adc << 1 | right) | right << 3) << 12; #if(0) device_printf(sc->dev, "envy24_route(): MT_SPDOUT-->0x%04x\n", reg); #endif envy24_wrmt(sc, ENVY24_MT_SPDOUT, reg, 2); } else { mask = ~(0x0303 << dac * 2); reg = envy24_rdmt(sc, ENVY24_MT_PSDOUT, 2); reg = (reg & mask) | ((class | class << 8) << dac * 2); #if(0) device_printf(sc->dev, "envy24_route(): MT_PSDOUT-->0x%04x\n", reg); #endif envy24_wrmt(sc, ENVY24_MT_PSDOUT, reg, 2); mask = ~(0xff << dac * 8); reg = envy24_rdmt(sc, ENVY24_MT_RECORD, 4); reg = (reg & mask) | (((adc << 1 | left) | left << 3) | ((adc << 1 | right) | right << 3) << 4) << dac * 8; #if(0) device_printf(sc->dev, "envy24_route(): MT_RECORD-->0x%08x\n", reg); #endif envy24_wrmt(sc, ENVY24_MT_RECORD, reg, 4); /* 6fire rear input init test */ envy24_wrmt(sc, ENVY24_MT_RECORD, 0x00, 4); } return 0; } /* -------------------------------------------------------------------- */ /* buffer copy routines */ static void envy24_p32sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int32_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getready(ch->buffer) / 8; dmabuf = ch->parent->pbuf; data = (u_int32_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer) / 4; dst = src / 2 + ch->offset; ssize = ch->size / 4; dsize = ch->size / 8; slot = ch->num * 2; for (i = 0; i < length; i++) { dmabuf[dst * ENVY24_PLAY_CHNUM + slot].buffer = data[src]; dmabuf[dst * ENVY24_PLAY_CHNUM + slot + 1].buffer = data[src + 1]; dst++; dst %= dsize; src += 2; src %= ssize; } return; } static void envy24_p16sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int16_t *data; int src, dst, ssize, dsize, slot; int i; #if(0) device_printf(ch->parent->dev, "envy24_p16sl()\n"); #endif length = sndbuf_getready(ch->buffer) / 4; dmabuf = ch->parent->pbuf; data = (u_int16_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer) / 2; dst = src / 2 + ch->offset; ssize = ch->size / 2; dsize = ch->size / 4; slot = ch->num * 2; #if(0) device_printf(ch->parent->dev, "envy24_p16sl():%lu-->%lu(%lu)\n", src, dst, length); #endif for (i = 0; i < length; i++) { dmabuf[dst * ENVY24_PLAY_CHNUM + slot].buffer = (u_int32_t)data[src] << 16; dmabuf[dst * ENVY24_PLAY_CHNUM + slot + 1].buffer = (u_int32_t)data[src + 1] << 16; #if(0) if (i < 16) { printf("%08x", dmabuf[dst * ENVY24_PLAY_CHNUM + slot]); printf("%08x", dmabuf[dst * ENVY24_PLAY_CHNUM + slot + 1]); } #endif dst++; dst %= dsize; src += 2; src %= ssize; } #if(0) printf("\n"); #endif return; } static void envy24_p8u(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int8_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getready(ch->buffer) / 2; dmabuf = ch->parent->pbuf; data = (u_int8_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer); dst = src / 2 + ch->offset; ssize = ch->size; dsize = ch->size / 4; slot = ch->num * 2; for (i = 0; i < length; i++) { dmabuf[dst * ENVY24_PLAY_CHNUM + slot].buffer = ((u_int32_t)data[src] ^ 0x80) << 24; dmabuf[dst * ENVY24_PLAY_CHNUM + slot + 1].buffer = ((u_int32_t)data[src + 1] ^ 0x80) << 24; dst++; dst %= dsize; src += 2; src %= ssize; } return; } static void envy24_r32sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int32_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getfree(ch->buffer) / 8; dmabuf = ch->parent->rbuf; data = (u_int32_t *)ch->data; dst = sndbuf_getfreeptr(ch->buffer) / 4; src = dst / 2 + ch->offset; dsize = ch->size / 4; ssize = ch->size / 8; slot = (ch->num - ENVY24_CHAN_REC_ADC1) * 2; for (i = 0; i < length; i++) { data[dst] = dmabuf[src * ENVY24_REC_CHNUM + slot].buffer; data[dst + 1] = dmabuf[src * ENVY24_REC_CHNUM + slot + 1].buffer; dst += 2; dst %= dsize; src++; src %= ssize; } return; } static void envy24_r16sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int16_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getfree(ch->buffer) / 4; dmabuf = ch->parent->rbuf; data = (u_int16_t *)ch->data; dst = sndbuf_getfreeptr(ch->buffer) / 2; src = dst / 2 + ch->offset; dsize = ch->size / 2; ssize = ch->size / 8; slot = (ch->num - ENVY24_CHAN_REC_ADC1) * 2; for (i = 0; i < length; i++) { data[dst] = dmabuf[src * ENVY24_REC_CHNUM + slot].buffer; data[dst + 1] = dmabuf[src * ENVY24_REC_CHNUM + slot + 1].buffer; dst += 2; dst %= dsize; src++; src %= ssize; } return; } /* -------------------------------------------------------------------- */ /* channel interface */ static void * envy24chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = (struct sc_info *)devinfo; struct sc_chinfo *ch; unsigned num; #if(0) device_printf(sc->dev, "envy24chan_init(obj, devinfo, b, c, %d)\n", dir); #endif snd_mtxlock(sc->lock); if ((sc->chnum > ENVY24_CHAN_PLAY_SPDIF && dir != PCMDIR_REC) || (sc->chnum < ENVY24_CHAN_REC_ADC1 && dir != PCMDIR_PLAY)) { snd_mtxunlock(sc->lock); return NULL; } num = sc->chnum; ch = &sc->chan[num]; ch->size = 8 * ENVY24_SAMPLE_NUM; ch->data = malloc(ch->size, M_ENVY24, M_NOWAIT); if (ch->data == NULL) { ch->size = 0; ch = NULL; } else { ch->buffer = b; ch->channel = c; ch->parent = sc; ch->dir = dir; /* set channel map */ ch->num = envy24_chanmap[num]; snd_mtxunlock(sc->lock); sndbuf_setup(ch->buffer, ch->data, ch->size); snd_mtxlock(sc->lock); /* these 2 values are dummy */ ch->unit = 4; ch->blk = 10240; } snd_mtxunlock(sc->lock); return ch; } static int envy24chan_free(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; #if(0) device_printf(sc->dev, "envy24chan_free()\n"); #endif snd_mtxlock(sc->lock); if (ch->data != NULL) { free(ch->data, M_ENVY24); ch->data = NULL; } snd_mtxunlock(sc->lock); return 0; } static int envy24chan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; struct envy24_emldma *emltab; /* unsigned int bcnt, bsize; */ int i; #if(0) device_printf(sc->dev, "envy24chan_setformat(obj, data, 0x%08x)\n", format); #endif snd_mtxlock(sc->lock); /* check and get format related information */ if (ch->dir == PCMDIR_PLAY) emltab = envy24_pemltab; else emltab = envy24_remltab; if (emltab == NULL) { snd_mtxunlock(sc->lock); return -1; } for (i = 0; emltab[i].format != 0; i++) if (emltab[i].format == format) break; if (emltab[i].format == 0) { snd_mtxunlock(sc->lock); return -1; } /* set format information */ ch->format = format; ch->emldma = emltab[i].emldma; if (ch->unit > emltab[i].unit) ch->blk *= ch->unit / emltab[i].unit; else ch->blk /= emltab[i].unit / ch->unit; ch->unit = emltab[i].unit; /* set channel buffer information */ ch->size = ch->unit * ENVY24_SAMPLE_NUM; #if 0 if (ch->dir == PCMDIR_PLAY) bsize = ch->blk * 4 / ENVY24_PLAY_BUFUNIT; else bsize = ch->blk * 4 / ENVY24_REC_BUFUNIT; bsize *= ch->unit; bcnt = ch->size / bsize; sndbuf_resize(ch->buffer, bcnt, bsize); #endif snd_mtxunlock(sc->lock); #if(0) device_printf(sc->dev, "envy24chan_setformat(): return 0x%08x\n", 0); #endif return 0; } /* IMPLEMENT NOTICE: In this driver, setspeed function only do setting of speed information value. And real hardware speed setting is done at start triggered(see envy24chan_trigger()). So, at this function is called, any value that ENVY24 can use is able to set. But, at start triggerd, some other channel is running, and that channel's speed isn't same with, then trigger function will fail. */ static u_int32_t envy24chan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; u_int32_t val, prev; int i; #if(0) device_printf(ch->parent->dev, "envy24chan_setspeed(obj, data, %d)\n", speed); #endif prev = 0x7fffffff; for (i = 0; (val = envy24_speed[i]) != 0; i++) { if (abs(val - speed) < abs(prev - speed)) prev = val; else break; } ch->speed = prev; #if(0) device_printf(ch->parent->dev, "envy24chan_setspeed(): return %d\n", ch->speed); #endif return ch->speed; } static u_int32_t envy24chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; /* struct sc_info *sc = ch->parent; */ u_int32_t size, prev; unsigned int bcnt, bsize; #if(0) device_printf(sc->dev, "envy24chan_setblocksize(obj, data, %d)\n", blocksize); #endif prev = 0x7fffffff; /* snd_mtxlock(sc->lock); */ for (size = ch->size / 2; size > 0; size /= 2) { if (abs(size - blocksize) < abs(prev - blocksize)) prev = size; else break; } ch->blk = prev / ch->unit; if (ch->dir == PCMDIR_PLAY) ch->blk *= ENVY24_PLAY_BUFUNIT / 4; else ch->blk *= ENVY24_REC_BUFUNIT / 4; /* set channel buffer information */ /* ch->size = ch->unit * ENVY24_SAMPLE_NUM; */ if (ch->dir == PCMDIR_PLAY) bsize = ch->blk * 4 / ENVY24_PLAY_BUFUNIT; else bsize = ch->blk * 4 / ENVY24_REC_BUFUNIT; bsize *= ch->unit; bcnt = ch->size / bsize; sndbuf_resize(ch->buffer, bcnt, bsize); /* snd_mtxunlock(sc->lock); */ #if(0) device_printf(sc->dev, "envy24chan_setblocksize(): return %d\n", prev); #endif return prev; } /* semantic note: must start at beginning of buffer */ static int envy24chan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t ptr; int slot; int error = 0; #if 0 int i; device_printf(sc->dev, "envy24chan_trigger(obj, data, %d)\n", go); #endif snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) slot = 0; else slot = 1; switch (go) { case PCMTRIG_START: #if(0) device_printf(sc->dev, "envy24chan_trigger(): start\n"); #endif /* check or set channel speed */ if (sc->run[0] == 0 && sc->run[1] == 0) { sc->speed = envy24_setspeed(sc, ch->speed); sc->caps[0].minspeed = sc->caps[0].maxspeed = sc->speed; sc->caps[1].minspeed = sc->caps[1].maxspeed = sc->speed; } else if (ch->speed != 0 && ch->speed != sc->speed) { error = -1; goto fail; } if (ch->speed == 0) ch->channel->speed = sc->speed; /* start or enable channel */ sc->run[slot]++; if (sc->run[slot] == 1) { /* first channel */ ch->offset = 0; sc->blk[slot] = ch->blk; } else { ptr = envy24_gethwptr(sc, ch->dir); ch->offset = ((ptr / ch->blk + 1) * ch->blk % (ch->size / 4)) * 4 / ch->unit; if (ch->blk < sc->blk[slot]) sc->blk[slot] = ch->blk; } if (ch->dir == PCMDIR_PLAY) { ch->emldma(ch); envy24_setvolume(sc, ch->num); } envy24_updintr(sc, ch->dir); if (sc->run[slot] == 1) envy24_start(sc, ch->dir); ch->run = 1; break; case PCMTRIG_EMLDMAWR: #if(0) device_printf(sc->dev, "envy24chan_trigger(): emldmawr\n"); #endif if (ch->run != 1) { error = -1; goto fail; } ch->emldma(ch); break; case PCMTRIG_EMLDMARD: #if(0) device_printf(sc->dev, "envy24chan_trigger(): emldmard\n"); #endif if (ch->run != 1) { error = -1; goto fail; } ch->emldma(ch); break; case PCMTRIG_ABORT: if (ch->run) { #if(0) device_printf(sc->dev, "envy24chan_trigger(): abort\n"); #endif ch->run = 0; sc->run[slot]--; if (ch->dir == PCMDIR_PLAY) envy24_mutevolume(sc, ch->num); if (sc->run[slot] == 0) { envy24_stop(sc, ch->dir); sc->intr[slot] = 0; } #if 0 else if (ch->blk == sc->blk[slot]) { sc->blk[slot] = ENVY24_SAMPLE_NUM / 2; for (i = 0; i < ENVY24_CHAN_NUM; i++) { if (sc->chan[i].dir == ch->dir && sc->chan[i].run == 1 && sc->chan[i].blk < sc->blk[slot]) sc->blk[slot] = sc->chan[i].blk; } if (ch->blk != sc->blk[slot]) envy24_updintr(sc, ch->dir); } #endif } break; } fail: snd_mtxunlock(sc->lock); return (error); } static u_int32_t envy24chan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t ptr, rtn; #if(0) device_printf(sc->dev, "envy24chan_getptr()\n"); #endif snd_mtxlock(sc->lock); ptr = envy24_gethwptr(sc, ch->dir); rtn = ptr * ch->unit; snd_mtxunlock(sc->lock); #if(0) device_printf(sc->dev, "envy24chan_getptr(): return %d\n", rtn); #endif return rtn; } static struct pcmchan_caps * envy24chan_getcaps(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; struct pcmchan_caps *rtn; #if(0) device_printf(sc->dev, "envy24chan_getcaps()\n"); #endif snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { if (sc->run[0] == 0) rtn = &envy24_playcaps; else rtn = &sc->caps[0]; } else { if (sc->run[1] == 0) rtn = &envy24_reccaps; else rtn = &sc->caps[1]; } snd_mtxunlock(sc->lock); return rtn; } static kobj_method_t envy24chan_methods[] = { KOBJMETHOD(channel_init, envy24chan_init), KOBJMETHOD(channel_free, envy24chan_free), KOBJMETHOD(channel_setformat, envy24chan_setformat), KOBJMETHOD(channel_setspeed, envy24chan_setspeed), KOBJMETHOD(channel_setblocksize, envy24chan_setblocksize), KOBJMETHOD(channel_trigger, envy24chan_trigger), KOBJMETHOD(channel_getptr, envy24chan_getptr), KOBJMETHOD(channel_getcaps, envy24chan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(envy24chan); /* -------------------------------------------------------------------- */ /* mixer interface */ static int envy24mixer_init(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); #if(0) device_printf(sc->dev, "envy24mixer_init()\n"); #endif if (sc == NULL) return -1; /* set volume control rate */ snd_mtxlock(sc->lock); envy24_wrmt(sc, ENVY24_MT_VOLRATE, 0x30, 1); /* 0x30 is default value */ mix_setdevs(m, ENVY24_MIX_MASK); mix_setrecdevs(m, ENVY24_MIX_REC_MASK); snd_mtxunlock(sc->lock); return 0; } static int envy24mixer_reinit(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); if (sc == NULL) return -1; #if(0) device_printf(sc->dev, "envy24mixer_reinit()\n"); #endif return 0; } static int envy24mixer_uninit(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); if (sc == NULL) return -1; #if(0) device_printf(sc->dev, "envy24mixer_uninit()\n"); #endif return 0; } static int envy24mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sc_info *sc = mix_getdevinfo(m); int ch = envy24_mixmap[dev]; int hwch; int i; if (sc == NULL) return -1; if (dev == 0 && sc->cfg->codec->setvolume == NULL) return -1; if (dev != 0 && ch == -1) return -1; hwch = envy24_chanmap[ch]; #if(0) device_printf(sc->dev, "envy24mixer_set(m, %d, %d, %d)\n", dev, left, right); #endif snd_mtxlock(sc->lock); if (dev == 0) { for (i = 0; i < sc->dacn; i++) { sc->cfg->codec->setvolume(sc->dac[i], PCMDIR_PLAY, left, right); } } else { /* set volume value for hardware */ if ((sc->left[hwch] = 100 - left) > ENVY24_VOL_MIN) sc->left[hwch] = ENVY24_VOL_MUTE; if ((sc->right[hwch] = 100 - right) > ENVY24_VOL_MIN) sc->right[hwch] = ENVY24_VOL_MUTE; /* set volume for record channel and running play channel */ if (hwch > ENVY24_CHAN_PLAY_SPDIF || sc->chan[ch].run) envy24_setvolume(sc, hwch); } snd_mtxunlock(sc->lock); return right << 8 | left; } static u_int32_t envy24mixer_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sc_info *sc = mix_getdevinfo(m); int ch = envy24_mixmap[src]; #if(0) device_printf(sc->dev, "envy24mixer_setrecsrc(m, %d)\n", src); #endif if (ch > ENVY24_CHAN_PLAY_SPDIF) sc->src = ch; return src; } static kobj_method_t envy24mixer_methods[] = { KOBJMETHOD(mixer_init, envy24mixer_init), KOBJMETHOD(mixer_reinit, envy24mixer_reinit), KOBJMETHOD(mixer_uninit, envy24mixer_uninit), KOBJMETHOD(mixer_set, envy24mixer_set), KOBJMETHOD(mixer_setrecsrc, envy24mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(envy24mixer); /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void envy24_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; struct sc_chinfo *ch; u_int32_t ptr, dsize, feed; int i; #if(0) device_printf(sc->dev, "envy24_intr()\n"); #endif snd_mtxlock(sc->lock); if (envy24_checkintr(sc, PCMDIR_PLAY)) { #if(0) device_printf(sc->dev, "envy24_intr(): play\n"); #endif dsize = sc->psize / 4; ptr = dsize - envy24_rdmt(sc, ENVY24_MT_PCNT, 2) - 1; #if(0) device_printf(sc->dev, "envy24_intr(): ptr = %d-->", ptr); #endif ptr -= ptr % sc->blk[0]; feed = (ptr + dsize - sc->intr[0]) % dsize; #if(0) printf("%d intr = %d feed = %d\n", ptr, sc->intr[0], feed); #endif for (i = ENVY24_CHAN_PLAY_DAC1; i <= ENVY24_CHAN_PLAY_SPDIF; i++) { ch = &sc->chan[i]; #if(0) if (ch->run) device_printf(sc->dev, "envy24_intr(): chan[%d].blk = %d\n", i, ch->blk); #endif if (ch->run && ch->blk <= feed) { snd_mtxunlock(sc->lock); chn_intr(ch->channel); snd_mtxlock(sc->lock); } } sc->intr[0] = ptr; envy24_updintr(sc, PCMDIR_PLAY); } if (envy24_checkintr(sc, PCMDIR_REC)) { #if(0) device_printf(sc->dev, "envy24_intr(): rec\n"); #endif dsize = sc->rsize / 4; ptr = dsize - envy24_rdmt(sc, ENVY24_MT_RCNT, 2) - 1; ptr -= ptr % sc->blk[1]; feed = (ptr + dsize - sc->intr[1]) % dsize; for (i = ENVY24_CHAN_REC_ADC1; i <= ENVY24_CHAN_REC_SPDIF; i++) { ch = &sc->chan[i]; if (ch->run && ch->blk <= feed) { snd_mtxunlock(sc->lock); chn_intr(ch->channel); snd_mtxlock(sc->lock); } } sc->intr[1] = ptr; envy24_updintr(sc, PCMDIR_REC); } snd_mtxunlock(sc->lock); return; } /* * Probe and attach the card */ static int envy24_pci_probe(device_t dev) { u_int16_t sv, sd; int i; #if(0) printf("envy24_pci_probe()\n"); #endif if (pci_get_device(dev) == PCID_ENVY24 && pci_get_vendor(dev) == PCIV_ENVY24) { sv = pci_get_subvendor(dev); sd = pci_get_subdevice(dev); for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) { if (cfg_table[i].subvendor == sv && cfg_table[i].subdevice == sd) { break; } } device_set_desc(dev, cfg_table[i].name); #if(0) printf("envy24_pci_probe(): return 0\n"); #endif return 0; } else { #if(0) printf("envy24_pci_probe(): return ENXIO\n"); #endif return ENXIO; } } static void envy24_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sc_info *sc = (struct sc_info *)arg; sc->paddr = segs->ds_addr; #if(0) device_printf(sc->dev, "envy24_dmapsetmap()\n"); if (bootverbose) { printf("envy24(play): setmap %lx, %lx; ", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len); printf("%p -> %lx\n", sc->pmap, sc->paddr); } #endif } static void envy24_dmarsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sc_info *sc = (struct sc_info *)arg; sc->raddr = segs->ds_addr; #if(0) device_printf(sc->dev, "envy24_dmarsetmap()\n"); if (bootverbose) { printf("envy24(record): setmap %lx, %lx; ", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len); printf("%p -> %lx\n", sc->rmap, sc->raddr); } #endif } static void envy24_dmafree(struct sc_info *sc) { #if(0) device_printf(sc->dev, "envy24_dmafree():"); printf(" sc->raddr(0x%08x)", (u_int32_t)sc->raddr); printf(" sc->paddr(0x%08x)", (u_int32_t)sc->paddr); if (sc->rbuf) printf(" sc->rbuf(0x%08x)", (u_int32_t)sc->rbuf); else printf(" sc->rbuf(null)"); if (sc->pbuf) printf(" sc->pbuf(0x%08x)\n", (u_int32_t)sc->pbuf); else printf(" sc->pbuf(null)\n"); #endif #if(0) if (sc->raddr) bus_dmamap_unload(sc->dmat, sc->rmap); if (sc->paddr) bus_dmamap_unload(sc->dmat, sc->pmap); if (sc->rbuf) bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); if (sc->pbuf) bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); #else bus_dmamap_unload(sc->dmat, sc->rmap); bus_dmamap_unload(sc->dmat, sc->pmap); bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); #endif sc->raddr = sc->paddr = 0; sc->pbuf = NULL; sc->rbuf = NULL; return; } static int envy24_dmainit(struct sc_info *sc) { #if(0) device_printf(sc->dev, "envy24_dmainit()\n"); #endif /* init values */ sc->psize = ENVY24_PLAY_BUFUNIT * ENVY24_SAMPLE_NUM; sc->rsize = ENVY24_REC_BUFUNIT * ENVY24_SAMPLE_NUM; sc->pbuf = NULL; sc->rbuf = NULL; sc->paddr = sc->raddr = 0; sc->blk[0] = sc->blk[1] = 0; /* allocate DMA buffer */ #if(0) device_printf(sc->dev, "envy24_dmainit(): bus_dmamem_alloc(): sc->pbuf\n"); #endif if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_NOWAIT, &sc->pmap)) goto bad; #if(0) device_printf(sc->dev, "envy24_dmainit(): bus_dmamem_alloc(): sc->rbuf\n"); #endif if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_NOWAIT, &sc->rmap)) goto bad; #if(0) device_printf(sc->dev, "envy24_dmainit(): bus_dmamem_load(): sc->pmap\n"); #endif if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->psize, envy24_dmapsetmap, sc, 0)) goto bad; #if(0) device_printf(sc->dev, "envy24_dmainit(): bus_dmamem_load(): sc->rmap\n"); #endif if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->rsize, envy24_dmarsetmap, sc, 0)) goto bad; bzero(sc->pbuf, sc->psize); bzero(sc->rbuf, sc->rsize); /* set values to register */ #if(0) device_printf(sc->dev, "paddr(0x%08x)\n", sc->paddr); #endif envy24_wrmt(sc, ENVY24_MT_PADDR, sc->paddr, 4); #if(0) device_printf(sc->dev, "PADDR-->(0x%08x)\n", envy24_rdmt(sc, ENVY24_MT_PADDR, 4)); device_printf(sc->dev, "psize(%ld)\n", sc->psize / 4 - 1); #endif envy24_wrmt(sc, ENVY24_MT_PCNT, sc->psize / 4 - 1, 2); #if(0) device_printf(sc->dev, "PCNT-->(%ld)\n", envy24_rdmt(sc, ENVY24_MT_PCNT, 2)); #endif envy24_wrmt(sc, ENVY24_MT_RADDR, sc->raddr, 4); envy24_wrmt(sc, ENVY24_MT_RCNT, sc->rsize / 4 - 1, 2); return 0; bad: envy24_dmafree(sc); return ENOSPC; } static void envy24_putcfg(struct sc_info *sc) { device_printf(sc->dev, "system configuration\n"); printf(" SubVendorID: 0x%04x, SubDeviceID: 0x%04x\n", sc->cfg->subvendor, sc->cfg->subdevice); printf(" XIN2 Clock Source: "); switch (sc->cfg->scfg & PCIM_SCFG_XIN2) { case 0x00: printf("22.5792MHz(44.1kHz*512)\n"); break; case 0x40: printf("16.9344MHz(44.1kHz*384)\n"); break; case 0x80: printf("from external clock synthesizer chip\n"); break; default: printf("illegal system setting\n"); } printf(" MPU-401 UART(s) #: "); if (sc->cfg->scfg & PCIM_SCFG_MPU) printf("2\n"); else printf("1\n"); printf(" AC'97 codec: "); if (sc->cfg->scfg & PCIM_SCFG_AC97) printf("not exist\n"); else printf("exist\n"); printf(" ADC #: "); printf("%d\n", sc->adcn); printf(" DAC #: "); printf("%d\n", sc->dacn); printf(" Multi-track converter type: "); if ((sc->cfg->acl & PCIM_ACL_MTC) == 0) { printf("AC'97(SDATA_OUT:"); if (sc->cfg->acl & PCIM_ACL_OMODE) printf("packed"); else printf("split"); printf("|SDATA_IN:"); if (sc->cfg->acl & PCIM_ACL_IMODE) printf("packed"); else printf("split"); printf(")\n"); } else { printf("I2S("); if (sc->cfg->i2s & PCIM_I2S_VOL) printf("with volume, "); if (sc->cfg->i2s & PCIM_I2S_96KHZ) printf("96KHz support, "); switch (sc->cfg->i2s & PCIM_I2S_RES) { case PCIM_I2S_16BIT: printf("16bit resolution, "); break; case PCIM_I2S_18BIT: printf("18bit resolution, "); break; case PCIM_I2S_20BIT: printf("20bit resolution, "); break; case PCIM_I2S_24BIT: printf("24bit resolution, "); break; } printf("ID#0x%x)\n", sc->cfg->i2s & PCIM_I2S_ID); } printf(" S/PDIF(IN/OUT): "); if (sc->cfg->spdif & PCIM_SPDIF_IN) printf("1/"); else printf("0/"); if (sc->cfg->spdif & PCIM_SPDIF_OUT) printf("1 "); else printf("0 "); if (sc->cfg->spdif & (PCIM_SPDIF_IN | PCIM_SPDIF_OUT)) printf("ID# 0x%02x\n", (sc->cfg->spdif & PCIM_SPDIF_ID) >> 2); printf(" GPIO(mask/dir/state): 0x%02x/0x%02x/0x%02x\n", sc->cfg->gpiomask, sc->cfg->gpiodir, sc->cfg->gpiostate); } static int envy24_init(struct sc_info *sc) { u_int32_t data; #if(0) int rtn; #endif int i; u_int32_t sv, sd; #if(0) device_printf(sc->dev, "envy24_init()\n"); #endif /* reset chip */ envy24_wrcs(sc, ENVY24_CCS_CTL, ENVY24_CCS_CTL_RESET | ENVY24_CCS_CTL_NATIVE, 1); DELAY(200); envy24_wrcs(sc, ENVY24_CCS_CTL, ENVY24_CCS_CTL_NATIVE, 1); DELAY(200); /* legacy hardware disable */ data = pci_read_config(sc->dev, PCIR_LAC, 2); data |= PCIM_LAC_DISABLE; pci_write_config(sc->dev, PCIR_LAC, data, 2); /* check system configuration */ sc->cfg = NULL; for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) { /* 1st: search configuration from table */ sv = pci_get_subvendor(sc->dev); sd = pci_get_subdevice(sc->dev); if (sv == cfg_table[i].subvendor && sd == cfg_table[i].subdevice) { #if(0) device_printf(sc->dev, "Set configuration from table\n"); #endif sc->cfg = &cfg_table[i]; break; } } if (sc->cfg == NULL) { /* 2nd: read configuration from table */ sc->cfg = envy24_rom2cfg(sc); } sc->adcn = ((sc->cfg->scfg & PCIM_SCFG_ADC) >> 2) + 1; sc->dacn = (sc->cfg->scfg & PCIM_SCFG_DAC) + 1; if (1 /* bootverbose */) { envy24_putcfg(sc); } /* set system configuration */ pci_write_config(sc->dev, PCIR_SCFG, sc->cfg->scfg, 1); pci_write_config(sc->dev, PCIR_ACL, sc->cfg->acl, 1); pci_write_config(sc->dev, PCIR_I2S, sc->cfg->i2s, 1); pci_write_config(sc->dev, PCIR_SPDIF, sc->cfg->spdif, 1); envy24_gpiosetmask(sc, sc->cfg->gpiomask); envy24_gpiosetdir(sc, sc->cfg->gpiodir); envy24_gpiowr(sc, sc->cfg->gpiostate); for (i = 0; i < sc->adcn; i++) { sc->adc[i] = sc->cfg->codec->create(sc->dev, sc, PCMDIR_REC, i); sc->cfg->codec->init(sc->adc[i]); } for (i = 0; i < sc->dacn; i++) { sc->dac[i] = sc->cfg->codec->create(sc->dev, sc, PCMDIR_PLAY, i); sc->cfg->codec->init(sc->dac[i]); } /* initialize DMA buffer */ #if(0) device_printf(sc->dev, "envy24_init(): initialize DMA buffer\n"); #endif if (envy24_dmainit(sc)) return ENOSPC; /* initialize status */ sc->run[0] = sc->run[1] = 0; sc->intr[0] = sc->intr[1] = 0; sc->speed = 0; sc->caps[0].fmtlist = envy24_playfmt; sc->caps[1].fmtlist = envy24_recfmt; /* set channel router */ envy24_route(sc, ENVY24_ROUTE_DAC_1, ENVY24_ROUTE_CLASS_MIX, 0, 0); envy24_route(sc, ENVY24_ROUTE_DAC_SPDIF, ENVY24_ROUTE_CLASS_DMA, 0, 0); /* envy24_route(sc, ENVY24_ROUTE_DAC_SPDIF, ENVY24_ROUTE_CLASS_MIX, 0, 0); */ /* set macro interrupt mask */ data = envy24_rdcs(sc, ENVY24_CCS_IMASK, 1); envy24_wrcs(sc, ENVY24_CCS_IMASK, data & ~ENVY24_CCS_IMASK_PMT, 1); data = envy24_rdcs(sc, ENVY24_CCS_IMASK, 1); #if(0) device_printf(sc->dev, "envy24_init(): CCS_IMASK-->0x%02x\n", data); #endif return 0; } static int envy24_alloc_resource(struct sc_info *sc) { /* allocate I/O port resource */ sc->csid = PCIR_CCS; - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->csid, RF_ACTIVE); sc->ddmaid = PCIR_DDMA; - sc->ddma = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->ddmaid, 0, ~0, 1, RF_ACTIVE); + sc->ddma = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->ddmaid, RF_ACTIVE); sc->dsid = PCIR_DS; - sc->ds = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->dsid, 0, ~0, 1, RF_ACTIVE); + sc->ds = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->dsid, RF_ACTIVE); sc->mtid = PCIR_MT; - sc->mt = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->mtid, 0, ~0, 1, RF_ACTIVE); + sc->mt = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->mtid, RF_ACTIVE); if (!sc->cs || !sc->ddma || !sc->ds || !sc->mt) { device_printf(sc->dev, "unable to map IO port space\n"); return ENXIO; } sc->cst = rman_get_bustag(sc->cs); sc->csh = rman_get_bushandle(sc->cs); sc->ddmat = rman_get_bustag(sc->ddma); sc->ddmah = rman_get_bushandle(sc->ddma); sc->dst = rman_get_bustag(sc->ds); sc->dsh = rman_get_bushandle(sc->ds); sc->mtt = rman_get_bustag(sc->mt); sc->mth = rman_get_bushandle(sc->mt); #if(0) device_printf(sc->dev, "IO port register values\nCCS: 0x%lx\nDDMA: 0x%lx\nDS: 0x%lx\nMT: 0x%lx\n", pci_read_config(sc->dev, PCIR_CCS, 4), pci_read_config(sc->dev, PCIR_DDMA, 4), pci_read_config(sc->dev, PCIR_DS, 4), pci_read_config(sc->dev, PCIR_MT, 4)); #endif /* allocate interrupt resource */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(sc->dev, sc->irq, INTR_MPSAFE, envy24_intr, sc, &sc->ih)) { device_printf(sc->dev, "unable to map interrupt\n"); return ENXIO; } /* allocate DMA resource */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev), /*alignment*/4, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_ENVY24, /*highaddr*/BUS_SPACE_MAXADDR_ENVY24, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_ENVY24, /*nsegments*/1, /*maxsegsz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->dmat) != 0) { device_printf(sc->dev, "unable to create dma tag\n"); return ENXIO; } return 0; } static int envy24_pci_attach(device_t dev) { struct sc_info *sc; char status[SND_STATUSLEN]; int err = 0; int i; #if(0) device_printf(dev, "envy24_pci_attach()\n"); #endif /* get sc_info data area */ if ((sc = malloc(sizeof(*sc), M_ENVY24, M_NOWAIT)) == NULL) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } bzero(sc, sizeof(*sc)); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_envy24 softc"); sc->dev = dev; /* initialize PCI interface */ pci_enable_busmaster(dev); /* allocate resources */ err = envy24_alloc_resource(sc); if (err) { device_printf(dev, "unable to allocate system resources\n"); goto bad; } /* initialize card */ err = envy24_init(sc); if (err) { device_printf(dev, "unable to initialize the card\n"); goto bad; } /* set multi track mixer */ mixer_init(dev, &envy24mixer_class, sc); /* set channel information */ err = pcm_register(dev, sc, 5, 2 + sc->adcn); if (err) goto bad; sc->chnum = 0; for (i = 0; i < 5; i++) { pcm_addchan(dev, PCMDIR_PLAY, &envy24chan_class, sc); sc->chnum++; } for (i = 0; i < 2 + sc->adcn; i++) { pcm_addchan(dev, PCMDIR_REC, &envy24chan_class, sc); sc->chnum++; } /* set status iformation */ snprintf(status, SND_STATUSLEN, "at io 0x%lx:%ld,0x%lx:%ld,0x%lx:%ld,0x%lx:%ld irq %ld", rman_get_start(sc->cs), rman_get_end(sc->cs) - rman_get_start(sc->cs) + 1, rman_get_start(sc->ddma), rman_get_end(sc->ddma) - rman_get_start(sc->ddma) + 1, rman_get_start(sc->ds), rman_get_end(sc->ds) - rman_get_start(sc->ds) + 1, rman_get_start(sc->mt), rman_get_end(sc->mt) - rman_get_start(sc->mt) + 1, rman_get_start(sc->irq)); pcm_setstatus(dev, status); return 0; bad: if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); envy24_dmafree(sc); if (sc->dmat) bus_dma_tag_destroy(sc->dmat); if (sc->cfg->codec->destroy != NULL) { for (i = 0; i < sc->adcn; i++) sc->cfg->codec->destroy(sc->adc[i]); for (i = 0; i < sc->dacn; i++) sc->cfg->codec->destroy(sc->dac[i]); } envy24_cfgfree(sc->cfg); if (sc->cs) bus_release_resource(dev, SYS_RES_IOPORT, sc->csid, sc->cs); if (sc->ddma) bus_release_resource(dev, SYS_RES_IOPORT, sc->ddmaid, sc->ddma); if (sc->ds) bus_release_resource(dev, SYS_RES_IOPORT, sc->dsid, sc->ds); if (sc->mt) bus_release_resource(dev, SYS_RES_IOPORT, sc->mtid, sc->mt); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_ENVY24); return err; } static int envy24_pci_detach(device_t dev) { struct sc_info *sc; int r; int i; #if(0) device_printf(dev, "envy24_pci_detach()\n"); #endif sc = pcm_getdevinfo(dev); if (sc == NULL) return 0; r = pcm_unregister(dev); if (r) return r; envy24_dmafree(sc); if (sc->cfg->codec->destroy != NULL) { for (i = 0; i < sc->adcn; i++) sc->cfg->codec->destroy(sc->adc[i]); for (i = 0; i < sc->dacn; i++) sc->cfg->codec->destroy(sc->dac[i]); } envy24_cfgfree(sc->cfg); bus_dma_tag_destroy(sc->dmat); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); bus_release_resource(dev, SYS_RES_IOPORT, sc->csid, sc->cs); bus_release_resource(dev, SYS_RES_IOPORT, sc->ddmaid, sc->ddma); bus_release_resource(dev, SYS_RES_IOPORT, sc->dsid, sc->ds); bus_release_resource(dev, SYS_RES_IOPORT, sc->mtid, sc->mt); snd_mtxfree(sc->lock); free(sc, M_ENVY24); return 0; } static device_method_t envy24_methods[] = { /* Device interface */ DEVMETHOD(device_probe, envy24_pci_probe), DEVMETHOD(device_attach, envy24_pci_attach), DEVMETHOD(device_detach, envy24_pci_detach), { 0, 0 } }; static driver_t envy24_driver = { "pcm", envy24_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_envy24, pci, envy24_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_envy24, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_envy24, snd_spicds, 1, 1, 1); MODULE_VERSION(snd_envy24, 1); Index: head/sys/dev/sound/pci/envy24ht.c =================================================================== --- head/sys/dev/sound/pci/envy24ht.c (revision 295789) +++ head/sys/dev/sound/pci/envy24ht.c (revision 295790) @@ -1,2597 +1,2597 @@ /*- * Copyright (c) 2006 Konstantin Dimitrov * Copyright (c) 2001 Katsurajima Naoto * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Konstantin Dimitrov's thanks list: * * A huge thanks goes to Spas Filipov for his friendship, support and his * generous gift - an 'Audiotrak Prodigy HD2' audio card! I also want to * thank Keiichi Iwasaki and his parents, because they helped Spas to get * the card from Japan! Having hardware sample of Prodigy HD2 made adding * support for that great card very easy and real fun and pleasure. * */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); static MALLOC_DEFINE(M_ENVY24HT, "envy24ht", "envy24ht audio"); /* -------------------------------------------------------------------- */ struct sc_info; #define ENVY24HT_PLAY_CHNUM 8 #define ENVY24HT_REC_CHNUM 2 #define ENVY24HT_PLAY_BUFUNIT (4 /* byte/sample */ * 8 /* channel */) #define ENVY24HT_REC_BUFUNIT (4 /* byte/sample */ * 2 /* channel */) #define ENVY24HT_SAMPLE_NUM 4096 #define ENVY24HT_TIMEOUT 1000 #define ENVY24HT_DEFAULT_FORMAT SND_FORMAT(AFMT_S16_LE, 2, 0) #define ENVY24HT_NAMELEN 32 struct envy24ht_sample { volatile u_int32_t buffer; }; typedef struct envy24ht_sample sample32_t; /* channel registers */ struct sc_chinfo { struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; int dir; unsigned num; /* hw channel number */ /* channel information */ u_int32_t format; u_int32_t speed; u_int32_t blk; /* hw block size(dword) */ /* format conversion structure */ u_int8_t *data; unsigned int size; /* data buffer size(byte) */ int unit; /* sample size(byte) */ unsigned int offset; /* samples number offset */ void (*emldma)(struct sc_chinfo *); /* flags */ int run; }; /* codec interface entrys */ struct codec_entry { void *(*create)(device_t dev, void *devinfo, int dir, int num); void (*destroy)(void *codec); void (*init)(void *codec); void (*reinit)(void *codec); void (*setvolume)(void *codec, int dir, unsigned int left, unsigned int right); void (*setrate)(void *codec, int which, int rate); }; /* system configuration information */ struct cfg_info { char *name; u_int16_t subvendor, subdevice; u_int8_t scfg, acl, i2s, spdif; u_int32_t gpiomask, gpiostate, gpiodir; u_int32_t cdti, cclk, cs; u_int8_t cif, type, free; struct codec_entry *codec; }; /* device private data */ struct sc_info { device_t dev; struct mtx *lock; /* Control/Status registor */ struct resource *cs; int csid; bus_space_tag_t cst; bus_space_handle_t csh; /* MultiTrack registor */ struct resource *mt; int mtid; bus_space_tag_t mtt; bus_space_handle_t mth; /* DMA tag */ bus_dma_tag_t dmat; /* IRQ resource */ struct resource *irq; int irqid; void *ih; /* system configuration data */ struct cfg_info *cfg; /* ADC/DAC number and info */ int adcn, dacn; void *adc[4], *dac[4]; /* mixer control data */ u_int32_t src; u_int8_t left[ENVY24HT_CHAN_NUM]; u_int8_t right[ENVY24HT_CHAN_NUM]; /* Play/Record DMA fifo */ sample32_t *pbuf; sample32_t *rbuf; u_int32_t psize, rsize; /* DMA buffer size(byte) */ u_int16_t blk[2]; /* transfer check blocksize(dword) */ bus_dmamap_t pmap, rmap; bus_addr_t paddr, raddr; /* current status */ u_int32_t speed; int run[2]; u_int16_t intr[2]; struct pcmchan_caps caps[2]; /* channel info table */ unsigned chnum; struct sc_chinfo chan[11]; }; /* -------------------------------------------------------------------- */ /* * prototypes */ /* DMA emulator */ static void envy24ht_p8u(struct sc_chinfo *); static void envy24ht_p16sl(struct sc_chinfo *); static void envy24ht_p32sl(struct sc_chinfo *); static void envy24ht_r16sl(struct sc_chinfo *); static void envy24ht_r32sl(struct sc_chinfo *); /* channel interface */ static void *envy24htchan_init(kobj_t, void *, struct snd_dbuf *, struct pcm_channel *, int); static int envy24htchan_setformat(kobj_t, void *, u_int32_t); static u_int32_t envy24htchan_setspeed(kobj_t, void *, u_int32_t); static u_int32_t envy24htchan_setblocksize(kobj_t, void *, u_int32_t); static int envy24htchan_trigger(kobj_t, void *, int); static u_int32_t envy24htchan_getptr(kobj_t, void *); static struct pcmchan_caps *envy24htchan_getcaps(kobj_t, void *); /* mixer interface */ static int envy24htmixer_init(struct snd_mixer *); static int envy24htmixer_reinit(struct snd_mixer *); static int envy24htmixer_uninit(struct snd_mixer *); static int envy24htmixer_set(struct snd_mixer *, unsigned, unsigned, unsigned); static u_int32_t envy24htmixer_setrecsrc(struct snd_mixer *, u_int32_t); /* SPI codec access interface */ static void *envy24ht_spi_create(device_t, void *, int, int); static void envy24ht_spi_destroy(void *); static void envy24ht_spi_init(void *); static void envy24ht_spi_reinit(void *); static void envy24ht_spi_setvolume(void *, int, unsigned int, unsigned int); /* -------------------------------------------------------------------- */ /* system constant tables */ /* API -> hardware channel map */ static unsigned envy24ht_chanmap[ENVY24HT_CHAN_NUM] = { ENVY24HT_CHAN_PLAY_DAC1, /* 1 */ ENVY24HT_CHAN_PLAY_DAC2, /* 2 */ ENVY24HT_CHAN_PLAY_DAC3, /* 3 */ ENVY24HT_CHAN_PLAY_DAC4, /* 4 */ ENVY24HT_CHAN_PLAY_SPDIF, /* 0 */ ENVY24HT_CHAN_REC_MIX, /* 5 */ ENVY24HT_CHAN_REC_SPDIF, /* 6 */ ENVY24HT_CHAN_REC_ADC1, /* 7 */ ENVY24HT_CHAN_REC_ADC2, /* 8 */ ENVY24HT_CHAN_REC_ADC3, /* 9 */ ENVY24HT_CHAN_REC_ADC4, /* 10 */ }; /* mixer -> API channel map. see above */ static int envy24ht_mixmap[] = { -1, /* Master output level. It is depend on codec support */ -1, /* Treble level of all output channels */ -1, /* Bass level of all output channels */ -1, /* Volume of synthesier input */ 0, /* Output level for the audio device */ -1, /* Output level for the PC speaker */ 7, /* line in jack */ -1, /* microphone jack */ -1, /* CD audio input */ -1, /* Recording monitor */ 1, /* alternative codec */ -1, /* global recording level */ -1, /* Input gain */ -1, /* Output gain */ 8, /* Input source 1 */ 9, /* Input source 2 */ 10, /* Input source 3 */ 6, /* Digital (input) 1 */ -1, /* Digital (input) 2 */ -1, /* Digital (input) 3 */ -1, /* Phone input */ -1, /* Phone output */ -1, /* Video/TV (audio) in */ -1, /* Radio in */ -1, /* Monitor volume */ }; /* variable rate audio */ static u_int32_t envy24ht_speed[] = { 192000, 176400, 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 9600, 8000, 0 }; /* known boards configuration */ static struct codec_entry spi_codec = { envy24ht_spi_create, envy24ht_spi_destroy, envy24ht_spi_init, envy24ht_spi_reinit, envy24ht_spi_setvolume, NULL, /* setrate */ }; static struct cfg_info cfg_table[] = { { "Envy24HT audio (Terratec Aureon 7.1 Space)", 0x153b, 0x1145, 0x0b, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (Terratec Aureon 5.1 Sky)", 0x153b, 0x1147, 0x0a, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (Terratec Aureon 7.1 Universe)", 0x153b, 0x1153, 0x0b, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (AudioTrak Prodigy 7.1)", 0x4933, 0x4553, 0x0b, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (Terratec PHASE 28)", 0x153b, 0x1149, 0x0b, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT-S audio (Terratec PHASE 22)", 0x153b, 0x1150, 0x10, 0x80, 0xf0, 0xc3, 0x7ffbc7, 0x7fffff, 0x438, 0x10, 0x20, 0x400, 0x01, 0x00, 0, &spi_codec, }, { "Envy24HT audio (AudioTrak Prodigy 7.1 LT)", 0x3132, 0x4154, 0x4b, 0x80, 0xfc, 0xc3, 0x7ff8ff, 0x7fffff, 0x700, 0x400, 0x200, 0x100, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (AudioTrak Prodigy 7.1 XT)", 0x3136, 0x4154, 0x4b, 0x80, 0xfc, 0xc3, 0x7ff8ff, 0x7fffff, 0x700, 0x400, 0x200, 0x100, 0x00, 0x02, 0, &spi_codec, }, { "Envy24HT audio (M-Audio Revolution 7.1)", 0x1412, 0x3630, 0x43, 0x80, 0xf8, 0xc1, 0x3fff85, 0x400072, 0x4000fa, 0x08, 0x02, 0x20, 0x00, 0x04, 0, &spi_codec, }, { "Envy24GT audio (M-Audio Revolution 5.1)", 0x1412, 0x3631, 0x42, 0x80, 0xf8, 0xc1, 0x3fff05, 0x4000f0, 0x4000fa, 0x08, 0x02, 0x10, 0x00, 0x03, 0, &spi_codec, }, { "Envy24HT audio (M-Audio Audiophile 192)", 0x1412, 0x3632, 0x68, 0x80, 0xf8, 0xc3, 0x45, 0x4000b5, 0x7fffba, 0x08, 0x02, 0x10, 0x00, 0x03, 0, &spi_codec, }, { "Envy24HT audio (AudioTrak Prodigy HD2)", 0x3137, 0x4154, 0x68, 0x80, 0x78, 0xc3, 0xfff8ff, 0x200700, 0xdfffff, 0x400, 0x200, 0x100, 0x00, 0x05, 0, &spi_codec, }, { "Envy24HT audio (ESI Juli@)", 0x3031, 0x4553, 0x20, 0x80, 0xf8, 0xc3, 0x7fff9f, 0x8016, 0x7fff9f, 0x08, 0x02, 0x10, 0x00, 0x03, 0, &spi_codec, }, { "Envy24HT-S audio (Terrasoniq TS22PCI)", 0x153b, 0x117b, 0x10, 0x80, 0xf0, 0xc3, 0x7ffbc7, 0x7fffff, 0x438, 0x10, 0x20, 0x400, 0x01, 0x00, 0, &spi_codec, }, { "Envy24HT audio (Generic)", 0, 0, 0x0b, 0x80, 0xfc, 0xc3, 0x21efff, 0x7fffff, 0x5e1000, 0x40000, 0x80000, 0x1000, 0x00, 0x02, 0, &spi_codec, /* default codec routines */ } }; static u_int32_t envy24ht_recfmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_S32_LE, 2, 0), 0 }; static struct pcmchan_caps envy24ht_reccaps = {8000, 96000, envy24ht_recfmt, 0}; static u_int32_t envy24ht_playfmt[] = { SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_S32_LE, 2, 0), 0 }; static struct pcmchan_caps envy24ht_playcaps = {8000, 192000, envy24ht_playfmt, 0}; struct envy24ht_emldma { u_int32_t format; void (*emldma)(struct sc_chinfo *); int unit; }; static struct envy24ht_emldma envy24ht_pemltab[] = { {SND_FORMAT(AFMT_U8, 2, 0), envy24ht_p8u, 2}, {SND_FORMAT(AFMT_S16_LE, 2, 0), envy24ht_p16sl, 4}, {SND_FORMAT(AFMT_S32_LE, 2, 0), envy24ht_p32sl, 8}, {0, NULL, 0} }; static struct envy24ht_emldma envy24ht_remltab[] = { {SND_FORMAT(AFMT_S16_LE, 2, 0), envy24ht_r16sl, 4}, {SND_FORMAT(AFMT_S32_LE, 2, 0), envy24ht_r32sl, 8}, {0, NULL, 0} }; /* -------------------------------------------------------------------- */ /* common routines */ static u_int32_t envy24ht_rdcs(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->cst, sc->csh, regno); case 2: return bus_space_read_2(sc->cst, sc->csh, regno); case 4: return bus_space_read_4(sc->cst, sc->csh, regno); default: return 0xffffffff; } } static void envy24ht_wrcs(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->cst, sc->csh, regno, data); break; case 2: bus_space_write_2(sc->cst, sc->csh, regno, data); break; case 4: bus_space_write_4(sc->cst, sc->csh, regno, data); break; } } static u_int32_t envy24ht_rdmt(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->mtt, sc->mth, regno); case 2: return bus_space_read_2(sc->mtt, sc->mth, regno); case 4: return bus_space_read_4(sc->mtt, sc->mth, regno); default: return 0xffffffff; } } static void envy24ht_wrmt(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->mtt, sc->mth, regno, data); break; case 2: bus_space_write_2(sc->mtt, sc->mth, regno, data); break; case 4: bus_space_write_4(sc->mtt, sc->mth, regno, data); break; } } /* -------------------------------------------------------------------- */ /* I2C port/E2PROM access routines */ static int envy24ht_rdi2c(struct sc_info *sc, u_int32_t dev, u_int32_t addr) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24ht_rdi2c(sc, 0x%02x, 0x%02x)\n", dev, addr); #endif for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CSTAT, 1); if ((data & ENVY24HT_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24HT_TIMEOUT) { return -1; } envy24ht_wrcs(sc, ENVY24HT_CCS_I2CADDR, addr, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_I2CDEV, (dev & ENVY24HT_CCS_I2CDEV_ADDR) | ENVY24HT_CCS_I2CDEV_RD, 1); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CSTAT, 1); if ((data & ENVY24HT_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24HT_TIMEOUT) { return -1; } data = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CDATA, 1); #if(0) device_printf(sc->dev, "envy24ht_rdi2c(): return 0x%x\n", data); #endif return (int)data; } static int envy24ht_wri2c(struct sc_info *sc, u_int32_t dev, u_int32_t addr, u_int32_t data) { u_int32_t tmp; int i; #if(0) device_printf(sc->dev, "envy24ht_rdi2c(sc, 0x%02x, 0x%02x)\n", dev, addr); #endif for (i = 0; i < ENVY24HT_TIMEOUT; i++) { tmp = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CSTAT, 1); if ((tmp & ENVY24HT_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24HT_TIMEOUT) { return -1; } envy24ht_wrcs(sc, ENVY24HT_CCS_I2CADDR, addr, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_I2CDATA, data, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_I2CDEV, (dev & ENVY24HT_CCS_I2CDEV_ADDR) | ENVY24HT_CCS_I2CDEV_WR, 1); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CSTAT, 1); if ((data & ENVY24HT_CCS_I2CSTAT_BSY) == 0) break; DELAY(32); /* 31.25kHz */ } if (i == ENVY24HT_TIMEOUT) { return -1; } return 0; } static int envy24ht_rdrom(struct sc_info *sc, u_int32_t addr) { u_int32_t data; #if(0) device_printf(sc->dev, "envy24ht_rdrom(sc, 0x%02x)\n", addr); #endif data = envy24ht_rdcs(sc, ENVY24HT_CCS_I2CSTAT, 1); if ((data & ENVY24HT_CCS_I2CSTAT_ROM) == 0) { #if(0) device_printf(sc->dev, "envy24ht_rdrom(): E2PROM not presented\n"); #endif return -1; } return envy24ht_rdi2c(sc, ENVY24HT_CCS_I2CDEV_ROM, addr); } static struct cfg_info * envy24ht_rom2cfg(struct sc_info *sc) { struct cfg_info *buff; int size; int i; #if(0) device_printf(sc->dev, "envy24ht_rom2cfg(sc)\n"); #endif size = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SIZE); if ((size < ENVY24HT_E2PROM_GPIOSTATE + 3) || (size == 0x78)) { #if(0) device_printf(sc->dev, "envy24ht_rom2cfg(): ENVY24HT_E2PROM_SIZE-->%d\n", size); #endif buff = malloc(sizeof(*buff), M_ENVY24HT, M_NOWAIT); if (buff == NULL) { #if(0) device_printf(sc->dev, "envy24ht_rom2cfg(): malloc()\n"); #endif return NULL; } buff->free = 1; /* no valid e2prom, using default values */ buff->subvendor = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBVENDOR) << 8; buff->subvendor += envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBVENDOR + 1); buff->subdevice = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBDEVICE) << 8; buff->subdevice += envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBDEVICE + 1); buff->scfg = 0x0b; buff->acl = 0x80; buff->i2s = 0xfc; buff->spdif = 0xc3; buff->gpiomask = 0x21efff; buff->gpiostate = 0x7fffff; buff->gpiodir = 0x5e1000; buff->cdti = 0x40000; buff->cclk = 0x80000; buff->cs = 0x1000; buff->cif = 0x00; buff->type = 0x02; for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) if (cfg_table[i].subvendor == buff->subvendor && cfg_table[i].subdevice == buff->subdevice) break; buff->name = cfg_table[i].name; buff->codec = cfg_table[i].codec; return buff; #if 0 return NULL; #endif } buff = malloc(sizeof(*buff), M_ENVY24HT, M_NOWAIT); if (buff == NULL) { #if(0) device_printf(sc->dev, "envy24ht_rom2cfg(): malloc()\n"); #endif return NULL; } buff->free = 1; buff->subvendor = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBVENDOR) << 8; buff->subvendor += envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBVENDOR + 1); buff->subdevice = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBDEVICE) << 8; buff->subdevice += envy24ht_rdrom(sc, ENVY24HT_E2PROM_SUBDEVICE + 1); buff->scfg = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SCFG); buff->acl = envy24ht_rdrom(sc, ENVY24HT_E2PROM_ACL); buff->i2s = envy24ht_rdrom(sc, ENVY24HT_E2PROM_I2S); buff->spdif = envy24ht_rdrom(sc, ENVY24HT_E2PROM_SPDIF); buff->gpiomask = envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOMASK) | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOMASK + 1) << 8 | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOMASK + 2) << 16; buff->gpiostate = envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOSTATE) | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOSTATE + 1) << 8 | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIOSTATE + 2) << 16; buff->gpiodir = envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIODIR) | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIODIR + 1) << 8 | \ envy24ht_rdrom(sc, ENVY24HT_E2PROM_GPIODIR + 2) << 16; for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) if (cfg_table[i].subvendor == buff->subvendor && cfg_table[i].subdevice == buff->subdevice) break; buff->name = cfg_table[i].name; buff->codec = cfg_table[i].codec; return buff; } static void envy24ht_cfgfree(struct cfg_info *cfg) { if (cfg == NULL) return; if (cfg->free) free(cfg, M_ENVY24HT); return; } /* -------------------------------------------------------------------- */ /* AC'97 codec access routines */ #if 0 static int envy24ht_coldcd(struct sc_info *sc) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24ht_coldcd()\n"); #endif envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, ENVY24HT_MT_AC97CMD_CLD, 1); DELAY(10); envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, 0, 1); DELAY(1000); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdmt(sc, ENVY24HT_MT_AC97CMD, 1); if (data & ENVY24HT_MT_AC97CMD_RDY) { return 0; } } return -1; } static int envy24ht_slavecd(struct sc_info *sc) { u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24ht_slavecd()\n"); #endif envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, ENVY24HT_MT_AC97CMD_CLD | ENVY24HT_MT_AC97CMD_WRM, 1); DELAY(10); envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, 0, 1); DELAY(1000); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdmt(sc, ENVY24HT_MT_AC97CMD, 1); if (data & ENVY24HT_MT_AC97CMD_RDY) { return 0; } } return -1; } static int envy24ht_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t data; int i; #if(0) device_printf(sc->dev, "envy24ht_rdcd(obj, sc, 0x%02x)\n", regno); #endif envy24ht_wrmt(sc, ENVY24HT_MT_AC97IDX, (u_int32_t)regno, 1); envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, ENVY24HT_MT_AC97CMD_RD, 1); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { data = envy24ht_rdmt(sc, ENVY24HT_MT_AC97CMD, 1); if ((data & ENVY24HT_MT_AC97CMD_RD) == 0) break; } data = envy24ht_rdmt(sc, ENVY24HT_MT_AC97DLO, 2); #if(0) device_printf(sc->dev, "envy24ht_rdcd(): return 0x%x\n", data); #endif return (int)data; } static int envy24ht_wrcd(kobj_t obj, void *devinfo, int regno, u_int16_t data) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t cmd; int i; #if(0) device_printf(sc->dev, "envy24ht_wrcd(obj, sc, 0x%02x, 0x%04x)\n", regno, data); #endif envy24ht_wrmt(sc, ENVY24HT_MT_AC97IDX, (u_int32_t)regno, 1); envy24ht_wrmt(sc, ENVY24HT_MT_AC97DLO, (u_int32_t)data, 2); envy24ht_wrmt(sc, ENVY24HT_MT_AC97CMD, ENVY24HT_MT_AC97CMD_WR, 1); for (i = 0; i < ENVY24HT_TIMEOUT; i++) { cmd = envy24ht_rdmt(sc, ENVY24HT_MT_AC97CMD, 1); if ((cmd & ENVY24HT_MT_AC97CMD_WR) == 0) break; } return 0; } static kobj_method_t envy24ht_ac97_methods[] = { KOBJMETHOD(ac97_read, envy24ht_rdcd), KOBJMETHOD(ac97_write, envy24ht_wrcd), KOBJMETHOD_END }; AC97_DECLARE(envy24ht_ac97); #endif /* -------------------------------------------------------------------- */ /* GPIO access routines */ static u_int32_t envy24ht_gpiord(struct sc_info *sc) { if (sc->cfg->subvendor == 0x153b && sc->cfg->subdevice == 0x1150) return envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_LDATA, 2); else return (envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_HDATA, 1) << 16 | envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_LDATA, 2)); } static void envy24ht_gpiowr(struct sc_info *sc, u_int32_t data) { #if(0) device_printf(sc->dev, "envy24ht_gpiowr(sc, 0x%02x)\n", data & 0x7FFFFF); return; #endif envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_LDATA, data, 2); if (sc->cfg->subdevice != 0x1150) envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_HDATA, data >> 16, 1); return; } #if 0 static u_int32_t envy24ht_gpiogetmask(struct sc_info *sc) { return (envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_HMASK, 1) << 16 | envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_LMASK, 2)); } #endif static void envy24ht_gpiosetmask(struct sc_info *sc, u_int32_t mask) { envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_LMASK, mask, 2); if (sc->cfg->subdevice != 0x1150) envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_HMASK, mask >> 16, 1); return; } #if 0 static u_int32_t envy24ht_gpiogetdir(struct sc_info *sc) { return envy24ht_rdcs(sc, ENVY24HT_CCS_GPIO_CTLDIR, 4); } #endif static void envy24ht_gpiosetdir(struct sc_info *sc, u_int32_t dir) { if (sc->cfg->subvendor == 0x153b && sc->cfg->subdevice == 0x1150) envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_CTLDIR, dir, 2); else envy24ht_wrcs(sc, ENVY24HT_CCS_GPIO_CTLDIR, dir, 4); return; } /* -------------------------------------------------------------------- */ /* SPI codec access interface routine */ struct envy24ht_spi_codec { struct spicds_info *info; struct sc_info *parent; int dir; int num; int cs, cclk, cdti; }; static void envy24ht_spi_ctl(void *codec, unsigned int cs, unsigned int cclk, unsigned int cdti) { u_int32_t data = 0; struct envy24ht_spi_codec *ptr = codec; #if(0) device_printf(ptr->parent->dev, "--> %d, %d, %d\n", cs, cclk, cdti); #endif data = envy24ht_gpiord(ptr->parent); data &= ~(ptr->cs | ptr->cclk | ptr->cdti); if (cs) data += ptr->cs; if (cclk) data += ptr->cclk; if (cdti) data += ptr->cdti; envy24ht_gpiowr(ptr->parent, data); return; } static void * envy24ht_spi_create(device_t dev, void *info, int dir, int num) { struct sc_info *sc = info; struct envy24ht_spi_codec *buff = NULL; #if(0) device_printf(sc->dev, "envy24ht_spi_create(dev, sc, %d, %d)\n", dir, num); #endif buff = malloc(sizeof(*buff), M_ENVY24HT, M_NOWAIT); if (buff == NULL) return NULL; if (dir == PCMDIR_REC && sc->adc[num] != NULL) buff->info = ((struct envy24ht_spi_codec *)sc->adc[num])->info; else if (dir == PCMDIR_PLAY && sc->dac[num] != NULL) buff->info = ((struct envy24ht_spi_codec *)sc->dac[num])->info; else buff->info = spicds_create(dev, buff, num, envy24ht_spi_ctl); if (buff->info == NULL) { free(buff, M_ENVY24HT); return NULL; } buff->parent = sc; buff->dir = dir; buff->num = num; return (void *)buff; } static void envy24ht_spi_destroy(void *codec) { struct envy24ht_spi_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24ht_spi_destroy()\n"); #endif if (ptr->dir == PCMDIR_PLAY) { if (ptr->parent->dac[ptr->num] != NULL) spicds_destroy(ptr->info); } else { if (ptr->parent->adc[ptr->num] != NULL) spicds_destroy(ptr->info); } free(codec, M_ENVY24HT); } static void envy24ht_spi_init(void *codec) { struct envy24ht_spi_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24ht_spicds_init()\n"); #endif ptr->cs = ptr->parent->cfg->cs; ptr->cclk = ptr->parent->cfg->cclk; ptr->cdti = ptr->parent->cfg->cdti; spicds_settype(ptr->info, ptr->parent->cfg->type); spicds_setcif(ptr->info, ptr->parent->cfg->cif); if (ptr->parent->cfg->type == SPICDS_TYPE_AK4524 || \ ptr->parent->cfg->type == SPICDS_TYPE_AK4528) { spicds_setformat(ptr->info, AK452X_FORMAT_I2S | AK452X_FORMAT_256FSN | AK452X_FORMAT_1X); spicds_setdvc(ptr->info, AK452X_DVC_DEMOFF); } /* for the time being, init only first codec */ if (ptr->num == 0) spicds_init(ptr->info); } static void envy24ht_spi_reinit(void *codec) { struct envy24ht_spi_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24ht_spi_reinit()\n"); #endif spicds_reinit(ptr->info); } static void envy24ht_spi_setvolume(void *codec, int dir, unsigned int left, unsigned int right) { struct envy24ht_spi_codec *ptr = codec; if (ptr == NULL) return; #if(0) device_printf(ptr->parent->dev, "envy24ht_spi_set()\n"); #endif spicds_set(ptr->info, dir, left, right); } /* -------------------------------------------------------------------- */ /* hardware access routeines */ static struct { u_int32_t speed; u_int32_t code; } envy24ht_speedtab[] = { {48000, ENVY24HT_MT_RATE_48000}, {24000, ENVY24HT_MT_RATE_24000}, {12000, ENVY24HT_MT_RATE_12000}, {9600, ENVY24HT_MT_RATE_9600}, {32000, ENVY24HT_MT_RATE_32000}, {16000, ENVY24HT_MT_RATE_16000}, {8000, ENVY24HT_MT_RATE_8000}, {96000, ENVY24HT_MT_RATE_96000}, {192000, ENVY24HT_MT_RATE_192000}, {64000, ENVY24HT_MT_RATE_64000}, {44100, ENVY24HT_MT_RATE_44100}, {22050, ENVY24HT_MT_RATE_22050}, {11025, ENVY24HT_MT_RATE_11025}, {88200, ENVY24HT_MT_RATE_88200}, {176400, ENVY24HT_MT_RATE_176400}, {0, 0x10} }; static u_int32_t envy24ht_setspeed(struct sc_info *sc, u_int32_t speed) { u_int32_t code, i2sfmt; int i = 0; #if(0) device_printf(sc->dev, "envy24ht_setspeed(sc, %d)\n", speed); if (speed == 0) { code = ENVY24HT_MT_RATE_SPDIF; /* external master clock */ envy24ht_slavecd(sc); } else { #endif for (i = 0; envy24ht_speedtab[i].speed != 0; i++) { if (envy24ht_speedtab[i].speed == speed) break; } code = envy24ht_speedtab[i].code; #if 0 } device_printf(sc->dev, "envy24ht_setspeed(): speed %d/code 0x%04x\n", envy24ht_speedtab[i].speed, code); #endif if (code < 0x10) { envy24ht_wrmt(sc, ENVY24HT_MT_RATE, code, 1); if ((((sc->cfg->scfg & ENVY24HT_CCSM_SCFG_XIN2) == 0x00) && (code == ENVY24HT_MT_RATE_192000)) || \ (code == ENVY24HT_MT_RATE_176400)) { i2sfmt = envy24ht_rdmt(sc, ENVY24HT_MT_I2S, 1); i2sfmt |= ENVY24HT_MT_I2S_MLR128; envy24ht_wrmt(sc, ENVY24HT_MT_I2S, i2sfmt, 1); } else { i2sfmt = envy24ht_rdmt(sc, ENVY24HT_MT_I2S, 1); i2sfmt &= ~ENVY24HT_MT_I2S_MLR128; envy24ht_wrmt(sc, ENVY24HT_MT_I2S, i2sfmt, 1); } code = envy24ht_rdmt(sc, ENVY24HT_MT_RATE, 1); code &= ENVY24HT_MT_RATE_MASK; for (i = 0; envy24ht_speedtab[i].code < 0x10; i++) { if (envy24ht_speedtab[i].code == code) break; } speed = envy24ht_speedtab[i].speed; } else speed = 0; #if(0) device_printf(sc->dev, "envy24ht_setspeed(): return %d\n", speed); #endif return speed; } static void envy24ht_setvolume(struct sc_info *sc, unsigned ch) { #if(0) device_printf(sc->dev, "envy24ht_setvolume(sc, %d)\n", ch); envy24ht_wrmt(sc, ENVY24HT_MT_VOLIDX, ch * 2, 1); envy24ht_wrmt(sc, ENVY24HT_MT_VOLUME, 0x7f00 | sc->left[ch], 2); envy24ht_wrmt(sc, ENVY24HT_MT_VOLIDX, ch * 2 + 1, 1); envy24ht_wrmt(sc, ENVY24HT_MT_VOLUME, (sc->right[ch] << 8) | 0x7f, 2); #endif } static void envy24ht_mutevolume(struct sc_info *sc, unsigned ch) { #if 0 u_int32_t vol; device_printf(sc->dev, "envy24ht_mutevolume(sc, %d)\n", ch); vol = ENVY24HT_VOL_MUTE << 8 | ENVY24HT_VOL_MUTE; envy24ht_wrmt(sc, ENVY24HT_MT_VOLIDX, ch * 2, 1); envy24ht_wrmt(sc, ENVY24HT_MT_VOLUME, vol, 2); envy24ht_wrmt(sc, ENVY24HT_MT_VOLIDX, ch * 2 + 1, 1); envy24ht_wrmt(sc, ENVY24HT_MT_VOLUME, vol, 2); #endif } static u_int32_t envy24ht_gethwptr(struct sc_info *sc, int dir) { int unit, regno; u_int32_t ptr, rtn; #if(0) device_printf(sc->dev, "envy24ht_gethwptr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) { rtn = sc->psize / 4; unit = ENVY24HT_PLAY_BUFUNIT / 4; regno = ENVY24HT_MT_PCNT; } else { rtn = sc->rsize / 4; unit = ENVY24HT_REC_BUFUNIT / 4; regno = ENVY24HT_MT_RCNT; } ptr = envy24ht_rdmt(sc, regno, 2); rtn -= (ptr + 1); rtn /= unit; #if(0) device_printf(sc->dev, "envy24ht_gethwptr(): return %d\n", rtn); #endif return rtn; } static void envy24ht_updintr(struct sc_info *sc, int dir) { int regptr, regintr; u_int32_t mask, intr; u_int32_t ptr, size, cnt; u_int16_t blk; #if(0) device_printf(sc->dev, "envy24ht_updintr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) { blk = sc->blk[0]; size = sc->psize / 4; regptr = ENVY24HT_MT_PCNT; regintr = ENVY24HT_MT_PTERM; mask = ~ENVY24HT_MT_INT_PMASK; } else { blk = sc->blk[1]; size = sc->rsize / 4; regptr = ENVY24HT_MT_RCNT; regintr = ENVY24HT_MT_RTERM; mask = ~ENVY24HT_MT_INT_RMASK; } ptr = size - envy24ht_rdmt(sc, regptr, 2) - 1; /* cnt = blk - ptr % blk - 1; if (cnt == 0) cnt = blk - 1; */ cnt = blk - 1; #if(0) device_printf(sc->dev, "envy24ht_updintr():ptr = %d, blk = %d, cnt = %d\n", ptr, blk, cnt); #endif envy24ht_wrmt(sc, regintr, cnt, 2); intr = envy24ht_rdmt(sc, ENVY24HT_MT_INT_MASK, 1); #if(0) device_printf(sc->dev, "envy24ht_updintr():intr = 0x%02x, mask = 0x%02x\n", intr, mask); #endif envy24ht_wrmt(sc, ENVY24HT_MT_INT_MASK, intr & mask, 1); #if(0) device_printf(sc->dev, "envy24ht_updintr():INT-->0x%02x\n", envy24ht_rdmt(sc, ENVY24HT_MT_INT_MASK, 1)); #endif return; } #if 0 static void envy24ht_maskintr(struct sc_info *sc, int dir) { u_int32_t mask, intr; #if(0) device_printf(sc->dev, "envy24ht_maskintr(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) mask = ENVY24HT_MT_INT_PMASK; else mask = ENVY24HT_MT_INT_RMASK; intr = envy24ht_rdmt(sc, ENVY24HT_MT_INT, 1); envy24ht_wrmt(sc, ENVY24HT_MT_INT, intr | mask, 1); return; } #endif static int envy24ht_checkintr(struct sc_info *sc, int dir) { u_int32_t mask, stat, intr, rtn; #if(0) device_printf(sc->dev, "envy24ht_checkintr(sc, %d)\n", dir); #endif intr = envy24ht_rdmt(sc, ENVY24HT_MT_INT_STAT, 1); if (dir == PCMDIR_PLAY) { if ((rtn = intr & ENVY24HT_MT_INT_PSTAT) != 0) { mask = ~ENVY24HT_MT_INT_RSTAT; envy24ht_wrmt(sc, 0x1a, 0x01, 1); envy24ht_wrmt(sc, ENVY24HT_MT_INT_STAT, (intr & mask) | ENVY24HT_MT_INT_PSTAT | 0x08, 1); stat = envy24ht_rdmt(sc, ENVY24HT_MT_INT_MASK, 1); envy24ht_wrmt(sc, ENVY24HT_MT_INT_MASK, stat | ENVY24HT_MT_INT_PMASK, 1); } } else { if ((rtn = intr & ENVY24HT_MT_INT_RSTAT) != 0) { mask = ~ENVY24HT_MT_INT_PSTAT; #if 0 stat = ENVY24HT_MT_INT_RSTAT | ENVY24HT_MT_INT_RMASK; #endif envy24ht_wrmt(sc, ENVY24HT_MT_INT_STAT, (intr & mask) | ENVY24HT_MT_INT_RSTAT, 1); stat = envy24ht_rdmt(sc, ENVY24HT_MT_INT_MASK, 1); envy24ht_wrmt(sc, ENVY24HT_MT_INT_MASK, stat | ENVY24HT_MT_INT_RMASK, 1); } } return rtn; } static void envy24ht_start(struct sc_info *sc, int dir) { u_int32_t stat, sw; #if(0) device_printf(sc->dev, "envy24ht_start(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) sw = ENVY24HT_MT_PCTL_PSTART; else sw = ENVY24HT_MT_PCTL_RSTART; stat = envy24ht_rdmt(sc, ENVY24HT_MT_PCTL, 1); envy24ht_wrmt(sc, ENVY24HT_MT_PCTL, stat | sw, 1); #if(0) DELAY(100); device_printf(sc->dev, "PADDR:0x%08x\n", envy24ht_rdmt(sc, ENVY24HT_MT_PADDR, 4)); device_printf(sc->dev, "PCNT:%ld\n", envy24ht_rdmt(sc, ENVY24HT_MT_PCNT, 2)); #endif return; } static void envy24ht_stop(struct sc_info *sc, int dir) { u_int32_t stat, sw; #if(0) device_printf(sc->dev, "envy24ht_stop(sc, %d)\n", dir); #endif if (dir == PCMDIR_PLAY) sw = ~ENVY24HT_MT_PCTL_PSTART; else sw = ~ENVY24HT_MT_PCTL_RSTART; stat = envy24ht_rdmt(sc, ENVY24HT_MT_PCTL, 1); envy24ht_wrmt(sc, ENVY24HT_MT_PCTL, stat & sw, 1); return; } #if 0 static int envy24ht_route(struct sc_info *sc, int dac, int class, int adc, int rev) { return 0; } #endif /* -------------------------------------------------------------------- */ /* buffer copy routines */ static void envy24ht_p32sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int32_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getready(ch->buffer) / 8; dmabuf = ch->parent->pbuf; data = (u_int32_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer) / 4; dst = src / 2 + ch->offset; ssize = ch->size / 4; dsize = ch->size / 8; slot = ch->num * 2; for (i = 0; i < length; i++) { dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot].buffer = data[src]; dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot + 1].buffer = data[src + 1]; dst++; dst %= dsize; src += 2; src %= ssize; } return; } static void envy24ht_p16sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int16_t *data; int src, dst, ssize, dsize, slot; int i; #if(0) device_printf(ch->parent->dev, "envy24ht_p16sl()\n"); #endif length = sndbuf_getready(ch->buffer) / 4; dmabuf = ch->parent->pbuf; data = (u_int16_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer) / 2; dst = src / 2 + ch->offset; ssize = ch->size / 2; dsize = ch->size / 4; slot = ch->num * 2; #if(0) device_printf(ch->parent->dev, "envy24ht_p16sl():%lu-->%lu(%lu)\n", src, dst, length); #endif for (i = 0; i < length; i++) { dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot].buffer = (u_int32_t)data[src] << 16; dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot + 1].buffer = (u_int32_t)data[src + 1] << 16; #if(0) if (i < 16) { printf("%08x", dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot]); printf("%08x", dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot + 1]); } #endif dst++; dst %= dsize; src += 2; src %= ssize; } #if(0) printf("\n"); #endif return; } static void envy24ht_p8u(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int8_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getready(ch->buffer) / 2; dmabuf = ch->parent->pbuf; data = (u_int8_t *)ch->data; src = sndbuf_getreadyptr(ch->buffer); dst = src / 2 + ch->offset; ssize = ch->size; dsize = ch->size / 4; slot = ch->num * 2; for (i = 0; i < length; i++) { dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot].buffer = ((u_int32_t)data[src] ^ 0x80) << 24; dmabuf[dst * ENVY24HT_PLAY_CHNUM + slot + 1].buffer = ((u_int32_t)data[src + 1] ^ 0x80) << 24; dst++; dst %= dsize; src += 2; src %= ssize; } return; } static void envy24ht_r32sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int32_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getfree(ch->buffer) / 8; dmabuf = ch->parent->rbuf; data = (u_int32_t *)ch->data; dst = sndbuf_getfreeptr(ch->buffer) / 4; src = dst / 2 + ch->offset; dsize = ch->size / 4; ssize = ch->size / 8; slot = (ch->num - ENVY24HT_CHAN_REC_ADC1) * 2; for (i = 0; i < length; i++) { data[dst] = dmabuf[src * ENVY24HT_REC_CHNUM + slot].buffer; data[dst + 1] = dmabuf[src * ENVY24HT_REC_CHNUM + slot + 1].buffer; dst += 2; dst %= dsize; src++; src %= ssize; } return; } static void envy24ht_r16sl(struct sc_chinfo *ch) { int length; sample32_t *dmabuf; u_int16_t *data; int src, dst, ssize, dsize, slot; int i; length = sndbuf_getfree(ch->buffer) / 4; dmabuf = ch->parent->rbuf; data = (u_int16_t *)ch->data; dst = sndbuf_getfreeptr(ch->buffer) / 2; src = dst / 2 + ch->offset; dsize = ch->size / 2; ssize = ch->size / 8; slot = (ch->num - ENVY24HT_CHAN_REC_ADC1) * 2; for (i = 0; i < length; i++) { data[dst] = dmabuf[src * ENVY24HT_REC_CHNUM + slot].buffer; data[dst + 1] = dmabuf[src * ENVY24HT_REC_CHNUM + slot + 1].buffer; dst += 2; dst %= dsize; src++; src %= ssize; } return; } /* -------------------------------------------------------------------- */ /* channel interface */ static void * envy24htchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = (struct sc_info *)devinfo; struct sc_chinfo *ch; unsigned num; #if(0) device_printf(sc->dev, "envy24htchan_init(obj, devinfo, b, c, %d)\n", dir); #endif snd_mtxlock(sc->lock); #if 0 if ((sc->chnum > ENVY24HT_CHAN_PLAY_SPDIF && dir != PCMDIR_REC) || (sc->chnum < ENVY24HT_CHAN_REC_ADC1 && dir != PCMDIR_PLAY)) { snd_mtxunlock(sc->lock); return NULL; } #endif num = sc->chnum; ch = &sc->chan[num]; ch->size = 8 * ENVY24HT_SAMPLE_NUM; ch->data = malloc(ch->size, M_ENVY24HT, M_NOWAIT); if (ch->data == NULL) { ch->size = 0; ch = NULL; } else { ch->buffer = b; ch->channel = c; ch->parent = sc; ch->dir = dir; /* set channel map */ ch->num = envy24ht_chanmap[num]; snd_mtxunlock(sc->lock); sndbuf_setup(ch->buffer, ch->data, ch->size); snd_mtxlock(sc->lock); /* these 2 values are dummy */ ch->unit = 4; ch->blk = 10240; } snd_mtxunlock(sc->lock); return ch; } static int envy24htchan_free(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; #if(0) device_printf(sc->dev, "envy24htchan_free()\n"); #endif snd_mtxlock(sc->lock); if (ch->data != NULL) { free(ch->data, M_ENVY24HT); ch->data = NULL; } snd_mtxunlock(sc->lock); return 0; } static int envy24htchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; struct envy24ht_emldma *emltab; /* unsigned int bcnt, bsize; */ int i; #if(0) device_printf(sc->dev, "envy24htchan_setformat(obj, data, 0x%08x)\n", format); #endif snd_mtxlock(sc->lock); /* check and get format related information */ if (ch->dir == PCMDIR_PLAY) emltab = envy24ht_pemltab; else emltab = envy24ht_remltab; if (emltab == NULL) { snd_mtxunlock(sc->lock); return -1; } for (i = 0; emltab[i].format != 0; i++) if (emltab[i].format == format) break; if (emltab[i].format == 0) { snd_mtxunlock(sc->lock); return -1; } /* set format information */ ch->format = format; ch->emldma = emltab[i].emldma; if (ch->unit > emltab[i].unit) ch->blk *= ch->unit / emltab[i].unit; else ch->blk /= emltab[i].unit / ch->unit; ch->unit = emltab[i].unit; /* set channel buffer information */ ch->size = ch->unit * ENVY24HT_SAMPLE_NUM; #if 0 if (ch->dir == PCMDIR_PLAY) bsize = ch->blk * 4 / ENVY24HT_PLAY_BUFUNIT; else bsize = ch->blk * 4 / ENVY24HT_REC_BUFUNIT; bsize *= ch->unit; bcnt = ch->size / bsize; sndbuf_resize(ch->buffer, bcnt, bsize); #endif snd_mtxunlock(sc->lock); #if(0) device_printf(sc->dev, "envy24htchan_setformat(): return 0x%08x\n", 0); #endif return 0; } /* IMPLEMENT NOTICE: In this driver, setspeed function only do setting of speed information value. And real hardware speed setting is done at start triggered(see envy24htchan_trigger()). So, at this function is called, any value that ENVY24 can use is able to set. But, at start triggerd, some other channel is running, and that channel's speed isn't same with, then trigger function will fail. */ static u_int32_t envy24htchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; u_int32_t val, prev; int i; #if(0) device_printf(ch->parent->dev, "envy24htchan_setspeed(obj, data, %d)\n", speed); #endif prev = 0x7fffffff; for (i = 0; (val = envy24ht_speed[i]) != 0; i++) { if (abs(val - speed) < abs(prev - speed)) prev = val; else break; } ch->speed = prev; #if(0) device_printf(ch->parent->dev, "envy24htchan_setspeed(): return %d\n", ch->speed); #endif return ch->speed; } static u_int32_t envy24htchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; /* struct sc_info *sc = ch->parent; */ u_int32_t size, prev; unsigned int bcnt, bsize; #if(0) device_printf(sc->dev, "envy24htchan_setblocksize(obj, data, %d)\n", blocksize); #endif prev = 0x7fffffff; /* snd_mtxlock(sc->lock); */ for (size = ch->size / 2; size > 0; size /= 2) { if (abs(size - blocksize) < abs(prev - blocksize)) prev = size; else break; } ch->blk = prev / ch->unit; if (ch->dir == PCMDIR_PLAY) ch->blk *= ENVY24HT_PLAY_BUFUNIT / 4; else ch->blk *= ENVY24HT_REC_BUFUNIT / 4; /* set channel buffer information */ /* ch->size = ch->unit * ENVY24HT_SAMPLE_NUM; */ if (ch->dir == PCMDIR_PLAY) bsize = ch->blk * 4 / ENVY24HT_PLAY_BUFUNIT; else bsize = ch->blk * 4 / ENVY24HT_REC_BUFUNIT; bsize *= ch->unit; bcnt = ch->size / bsize; sndbuf_resize(ch->buffer, bcnt, bsize); /* snd_mtxunlock(sc->lock); */ #if(0) device_printf(sc->dev, "envy24htchan_setblocksize(): return %d\n", prev); #endif return prev; } /* semantic note: must start at beginning of buffer */ static int envy24htchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t ptr; int slot; int error = 0; #if 0 int i; device_printf(sc->dev, "envy24htchan_trigger(obj, data, %d)\n", go); #endif snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) slot = 0; else slot = 1; switch (go) { case PCMTRIG_START: #if(0) device_printf(sc->dev, "envy24htchan_trigger(): start\n"); #endif /* check or set channel speed */ if (sc->run[0] == 0 && sc->run[1] == 0) { sc->speed = envy24ht_setspeed(sc, ch->speed); sc->caps[0].minspeed = sc->caps[0].maxspeed = sc->speed; sc->caps[1].minspeed = sc->caps[1].maxspeed = sc->speed; } else if (ch->speed != 0 && ch->speed != sc->speed) { error = -1; goto fail; } if (ch->speed == 0) ch->channel->speed = sc->speed; /* start or enable channel */ sc->run[slot]++; if (sc->run[slot] == 1) { /* first channel */ ch->offset = 0; sc->blk[slot] = ch->blk; } else { ptr = envy24ht_gethwptr(sc, ch->dir); ch->offset = ((ptr / ch->blk + 1) * ch->blk % (ch->size / 4)) * 4 / ch->unit; if (ch->blk < sc->blk[slot]) sc->blk[slot] = ch->blk; } if (ch->dir == PCMDIR_PLAY) { ch->emldma(ch); envy24ht_setvolume(sc, ch->num); } envy24ht_updintr(sc, ch->dir); if (sc->run[slot] == 1) envy24ht_start(sc, ch->dir); ch->run = 1; break; case PCMTRIG_EMLDMAWR: #if(0) device_printf(sc->dev, "envy24htchan_trigger(): emldmawr\n"); #endif if (ch->run != 1) { error = -1; goto fail; } ch->emldma(ch); break; case PCMTRIG_EMLDMARD: #if(0) device_printf(sc->dev, "envy24htchan_trigger(): emldmard\n"); #endif if (ch->run != 1) { error = -1; goto fail; } ch->emldma(ch); break; case PCMTRIG_ABORT: if (ch->run) { #if(0) device_printf(sc->dev, "envy24htchan_trigger(): abort\n"); #endif ch->run = 0; sc->run[slot]--; if (ch->dir == PCMDIR_PLAY) envy24ht_mutevolume(sc, ch->num); if (sc->run[slot] == 0) { envy24ht_stop(sc, ch->dir); sc->intr[slot] = 0; } /* else if (ch->blk == sc->blk[slot]) { sc->blk[slot] = ENVY24HT_SAMPLE_NUM / 2; for (i = 0; i < ENVY24HT_CHAN_NUM; i++) { if (sc->chan[i].dir == ch->dir && sc->chan[i].run == 1 && sc->chan[i].blk < sc->blk[slot]) sc->blk[slot] = sc->chan[i].blk; } if (ch->blk != sc->blk[slot]) envy24ht_updintr(sc, ch->dir); }*/ } break; } fail: snd_mtxunlock(sc->lock); return (error); } static u_int32_t envy24htchan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t ptr, rtn; #if(0) device_printf(sc->dev, "envy24htchan_getptr()\n"); #endif snd_mtxlock(sc->lock); ptr = envy24ht_gethwptr(sc, ch->dir); rtn = ptr * ch->unit; snd_mtxunlock(sc->lock); #if(0) device_printf(sc->dev, "envy24htchan_getptr(): return %d\n", rtn); #endif return rtn; } static struct pcmchan_caps * envy24htchan_getcaps(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; struct pcmchan_caps *rtn; #if(0) device_printf(sc->dev, "envy24htchan_getcaps()\n"); #endif snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { if (sc->run[0] == 0) rtn = &envy24ht_playcaps; else rtn = &sc->caps[0]; } else { if (sc->run[1] == 0) rtn = &envy24ht_reccaps; else rtn = &sc->caps[1]; } snd_mtxunlock(sc->lock); return rtn; } static kobj_method_t envy24htchan_methods[] = { KOBJMETHOD(channel_init, envy24htchan_init), KOBJMETHOD(channel_free, envy24htchan_free), KOBJMETHOD(channel_setformat, envy24htchan_setformat), KOBJMETHOD(channel_setspeed, envy24htchan_setspeed), KOBJMETHOD(channel_setblocksize, envy24htchan_setblocksize), KOBJMETHOD(channel_trigger, envy24htchan_trigger), KOBJMETHOD(channel_getptr, envy24htchan_getptr), KOBJMETHOD(channel_getcaps, envy24htchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(envy24htchan); /* -------------------------------------------------------------------- */ /* mixer interface */ static int envy24htmixer_init(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); #if(0) device_printf(sc->dev, "envy24htmixer_init()\n"); #endif if (sc == NULL) return -1; /* set volume control rate */ snd_mtxlock(sc->lock); #if 0 envy24ht_wrmt(sc, ENVY24HT_MT_VOLRATE, 0x30, 1); /* 0x30 is default value */ #endif pcm_setflags(sc->dev, pcm_getflags(sc->dev) | SD_F_SOFTPCMVOL); mix_setdevs(m, ENVY24HT_MIX_MASK); mix_setrecdevs(m, ENVY24HT_MIX_REC_MASK); snd_mtxunlock(sc->lock); return 0; } static int envy24htmixer_reinit(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); if (sc == NULL) return -1; #if(0) device_printf(sc->dev, "envy24htmixer_reinit()\n"); #endif return 0; } static int envy24htmixer_uninit(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); if (sc == NULL) return -1; #if(0) device_printf(sc->dev, "envy24htmixer_uninit()\n"); #endif return 0; } static int envy24htmixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sc_info *sc = mix_getdevinfo(m); int ch = envy24ht_mixmap[dev]; int hwch; int i; if (sc == NULL) return -1; if (dev == 0 && sc->cfg->codec->setvolume == NULL) return -1; if (dev != 0 && ch == -1) return -1; hwch = envy24ht_chanmap[ch]; #if(0) device_printf(sc->dev, "envy24htmixer_set(m, %d, %d, %d)\n", dev, left, right); #endif snd_mtxlock(sc->lock); if (dev == 0) { for (i = 0; i < sc->dacn; i++) { sc->cfg->codec->setvolume(sc->dac[i], PCMDIR_PLAY, left, right); } } else { /* set volume value for hardware */ if ((sc->left[hwch] = 100 - left) > ENVY24HT_VOL_MIN) sc->left[hwch] = ENVY24HT_VOL_MUTE; if ((sc->right[hwch] = 100 - right) > ENVY24HT_VOL_MIN) sc->right[hwch] = ENVY24HT_VOL_MUTE; /* set volume for record channel and running play channel */ if (hwch > ENVY24HT_CHAN_PLAY_SPDIF || sc->chan[ch].run) envy24ht_setvolume(sc, hwch); } snd_mtxunlock(sc->lock); return right << 8 | left; } static u_int32_t envy24htmixer_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sc_info *sc = mix_getdevinfo(m); int ch = envy24ht_mixmap[src]; #if(0) device_printf(sc->dev, "envy24htmixer_setrecsrc(m, %d)\n", src); #endif if (ch > ENVY24HT_CHAN_PLAY_SPDIF) sc->src = ch; return src; } static kobj_method_t envy24htmixer_methods[] = { KOBJMETHOD(mixer_init, envy24htmixer_init), KOBJMETHOD(mixer_reinit, envy24htmixer_reinit), KOBJMETHOD(mixer_uninit, envy24htmixer_uninit), KOBJMETHOD(mixer_set, envy24htmixer_set), KOBJMETHOD(mixer_setrecsrc, envy24htmixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(envy24htmixer); /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void envy24ht_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; struct sc_chinfo *ch; u_int32_t ptr, dsize, feed; int i; #if(0) device_printf(sc->dev, "envy24ht_intr()\n"); #endif snd_mtxlock(sc->lock); if (envy24ht_checkintr(sc, PCMDIR_PLAY)) { #if(0) device_printf(sc->dev, "envy24ht_intr(): play\n"); #endif dsize = sc->psize / 4; ptr = dsize - envy24ht_rdmt(sc, ENVY24HT_MT_PCNT, 2) - 1; #if(0) device_printf(sc->dev, "envy24ht_intr(): ptr = %d-->", ptr); #endif ptr -= ptr % sc->blk[0]; feed = (ptr + dsize - sc->intr[0]) % dsize; #if(0) printf("%d intr = %d feed = %d\n", ptr, sc->intr[0], feed); #endif for (i = ENVY24HT_CHAN_PLAY_DAC1; i <= ENVY24HT_CHAN_PLAY_SPDIF; i++) { ch = &sc->chan[i]; #if(0) if (ch->run) device_printf(sc->dev, "envy24ht_intr(): chan[%d].blk = %d\n", i, ch->blk); #endif if (ch->run && ch->blk <= feed) { snd_mtxunlock(sc->lock); chn_intr(ch->channel); snd_mtxlock(sc->lock); } } sc->intr[0] = ptr; envy24ht_updintr(sc, PCMDIR_PLAY); } if (envy24ht_checkintr(sc, PCMDIR_REC)) { #if(0) device_printf(sc->dev, "envy24ht_intr(): rec\n"); #endif dsize = sc->rsize / 4; ptr = dsize - envy24ht_rdmt(sc, ENVY24HT_MT_RCNT, 2) - 1; ptr -= ptr % sc->blk[1]; feed = (ptr + dsize - sc->intr[1]) % dsize; for (i = ENVY24HT_CHAN_REC_ADC1; i <= ENVY24HT_CHAN_REC_SPDIF; i++) { ch = &sc->chan[i]; if (ch->run && ch->blk <= feed) { snd_mtxunlock(sc->lock); chn_intr(ch->channel); snd_mtxlock(sc->lock); } } sc->intr[1] = ptr; envy24ht_updintr(sc, PCMDIR_REC); } snd_mtxunlock(sc->lock); return; } /* * Probe and attach the card */ static int envy24ht_pci_probe(device_t dev) { u_int16_t sv, sd; int i; #if(0) printf("envy24ht_pci_probe()\n"); #endif if (pci_get_device(dev) == PCID_ENVY24HT && pci_get_vendor(dev) == PCIV_ENVY24) { sv = pci_get_subvendor(dev); sd = pci_get_subdevice(dev); for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) { if (cfg_table[i].subvendor == sv && cfg_table[i].subdevice == sd) { break; } } device_set_desc(dev, cfg_table[i].name); #if(0) printf("envy24ht_pci_probe(): return 0\n"); #endif return 0; } else { #if(0) printf("envy24ht_pci_probe(): return ENXIO\n"); #endif return ENXIO; } } static void envy24ht_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sc_info *sc = arg; sc->paddr = segs->ds_addr; #if(0) device_printf(sc->dev, "envy24ht_dmapsetmap()\n"); if (bootverbose) { printf("envy24ht(play): setmap %lx, %lx; ", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len); } #endif envy24ht_wrmt(sc, ENVY24HT_MT_PADDR, (uint32_t)segs->ds_addr, 4); envy24ht_wrmt(sc, ENVY24HT_MT_PCNT, (uint32_t)(segs->ds_len / 4 - 1), 2); } static void envy24ht_dmarsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sc_info *sc = arg; sc->raddr = segs->ds_addr; #if(0) device_printf(sc->dev, "envy24ht_dmarsetmap()\n"); if (bootverbose) { printf("envy24ht(record): setmap %lx, %lx; ", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len); } #endif envy24ht_wrmt(sc, ENVY24HT_MT_RADDR, (uint32_t)segs->ds_addr, 4); envy24ht_wrmt(sc, ENVY24HT_MT_RCNT, (uint32_t)(segs->ds_len / 4 - 1), 2); } static void envy24ht_dmafree(struct sc_info *sc) { #if(0) device_printf(sc->dev, "envy24ht_dmafree():"); printf(" sc->raddr(0x%08x)", (u_int32_t)sc->raddr); printf(" sc->paddr(0x%08x)", (u_int32_t)sc->paddr); if (sc->rbuf) printf(" sc->rbuf(0x%08x)", (u_int32_t)sc->rbuf); else printf(" sc->rbuf(null)"); if (sc->pbuf) printf(" sc->pbuf(0x%08x)\n", (u_int32_t)sc->pbuf); else printf(" sc->pbuf(null)\n"); #endif #if(0) if (sc->raddr) bus_dmamap_unload(sc->dmat, sc->rmap); if (sc->paddr) bus_dmamap_unload(sc->dmat, sc->pmap); if (sc->rbuf) bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); if (sc->pbuf) bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); #else bus_dmamap_unload(sc->dmat, sc->rmap); bus_dmamap_unload(sc->dmat, sc->pmap); bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); #endif sc->raddr = sc->paddr = 0; sc->pbuf = NULL; sc->rbuf = NULL; return; } static int envy24ht_dmainit(struct sc_info *sc) { #if(0) device_printf(sc->dev, "envy24ht_dmainit()\n"); #endif /* init values */ sc->psize = ENVY24HT_PLAY_BUFUNIT * ENVY24HT_SAMPLE_NUM; sc->rsize = ENVY24HT_REC_BUFUNIT * ENVY24HT_SAMPLE_NUM; sc->pbuf = NULL; sc->rbuf = NULL; sc->paddr = sc->raddr = 0; sc->blk[0] = sc->blk[1] = 0; /* allocate DMA buffer */ #if(0) device_printf(sc->dev, "envy24ht_dmainit(): bus_dmamem_alloc(): sc->pbuf\n"); #endif if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_NOWAIT, &sc->pmap)) goto bad; #if(0) device_printf(sc->dev, "envy24ht_dmainit(): bus_dmamem_alloc(): sc->rbuf\n"); #endif if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_NOWAIT, &sc->rmap)) goto bad; #if(0) device_printf(sc->dev, "envy24ht_dmainit(): bus_dmamem_load(): sc->pmap\n"); #endif if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->psize, envy24ht_dmapsetmap, sc, BUS_DMA_NOWAIT)) goto bad; #if(0) device_printf(sc->dev, "envy24ht_dmainit(): bus_dmamem_load(): sc->rmap\n"); #endif if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->rsize, envy24ht_dmarsetmap, sc, BUS_DMA_NOWAIT)) goto bad; bzero(sc->pbuf, sc->psize); bzero(sc->rbuf, sc->rsize); return 0; bad: envy24ht_dmafree(sc); return ENOSPC; } static void envy24ht_putcfg(struct sc_info *sc) { device_printf(sc->dev, "system configuration\n"); printf(" SubVendorID: 0x%04x, SubDeviceID: 0x%04x\n", sc->cfg->subvendor, sc->cfg->subdevice); printf(" XIN2 Clock Source: "); switch (sc->cfg->scfg & ENVY24HT_CCSM_SCFG_XIN2) { case 0x00: printf("24.576MHz(96kHz*256)\n"); break; case 0x40: printf("49.152MHz(192kHz*256)\n"); break; case 0x80: printf("reserved\n"); break; default: printf("illegal system setting\n"); } printf(" MPU-401 UART(s) #: "); if (sc->cfg->scfg & ENVY24HT_CCSM_SCFG_MPU) printf("1\n"); else printf("not implemented\n"); switch (sc->adcn) { case 0x01: case 0x02: printf(" ADC #: "); printf("%d\n", sc->adcn); break; case 0x03: printf(" ADC #: "); printf("%d", 1); printf(" and SPDIF receiver connected\n"); break; default: printf(" no physical inputs\n"); } printf(" DAC #: "); printf("%d\n", sc->dacn); printf(" Multi-track converter type: "); if ((sc->cfg->acl & ENVY24HT_CCSM_ACL_MTC) == 0) { printf("AC'97(SDATA_OUT:"); if (sc->cfg->acl & ENVY24HT_CCSM_ACL_OMODE) printf("packed"); else printf("split"); printf(")\n"); } else { printf("I2S("); if (sc->cfg->i2s & ENVY24HT_CCSM_I2S_VOL) printf("with volume, "); if (sc->cfg->i2s & ENVY24HT_CCSM_I2S_192KHZ) printf("192KHz support, "); else if (sc->cfg->i2s & ENVY24HT_CCSM_I2S_96KHZ) printf("192KHz support, "); else printf("48KHz support, "); switch (sc->cfg->i2s & ENVY24HT_CCSM_I2S_RES) { case ENVY24HT_CCSM_I2S_16BIT: printf("16bit resolution, "); break; case ENVY24HT_CCSM_I2S_18BIT: printf("18bit resolution, "); break; case ENVY24HT_CCSM_I2S_20BIT: printf("20bit resolution, "); break; case ENVY24HT_CCSM_I2S_24BIT: printf("24bit resolution, "); break; } printf("ID#0x%x)\n", sc->cfg->i2s & ENVY24HT_CCSM_I2S_ID); } printf(" S/PDIF(IN/OUT): "); if (sc->cfg->spdif & ENVY24HT_CCSM_SPDIF_IN) printf("1/"); else printf("0/"); if (sc->cfg->spdif & ENVY24HT_CCSM_SPDIF_OUT) printf("1 "); else printf("0 "); if (sc->cfg->spdif & (ENVY24HT_CCSM_SPDIF_IN | ENVY24HT_CCSM_SPDIF_OUT)) printf("ID# 0x%02x\n", (sc->cfg->spdif & ENVY24HT_CCSM_SPDIF_ID) >> 2); printf(" GPIO(mask/dir/state): 0x%02x/0x%02x/0x%02x\n", sc->cfg->gpiomask, sc->cfg->gpiodir, sc->cfg->gpiostate); } static int envy24ht_init(struct sc_info *sc) { u_int32_t data; #if(0) int rtn; #endif int i; u_int32_t sv, sd; #if(0) device_printf(sc->dev, "envy24ht_init()\n"); #endif /* reset chip */ #if 0 envy24ht_wrcs(sc, ENVY24HT_CCS_CTL, ENVY24HT_CCS_CTL_RESET, 1); DELAY(200); envy24ht_wrcs(sc, ENVY24HT_CCS_CTL, ENVY24HT_CCS_CTL_NATIVE, 1); DELAY(200); /* legacy hardware disable */ data = pci_read_config(sc->dev, PCIR_LAC, 2); data |= PCIM_LAC_DISABLE; pci_write_config(sc->dev, PCIR_LAC, data, 2); #endif /* check system configuration */ sc->cfg = NULL; for (i = 0; cfg_table[i].subvendor != 0 || cfg_table[i].subdevice != 0; i++) { /* 1st: search configuration from table */ sv = pci_get_subvendor(sc->dev); sd = pci_get_subdevice(sc->dev); if (sv == cfg_table[i].subvendor && sd == cfg_table[i].subdevice) { #if(0) device_printf(sc->dev, "Set configuration from table\n"); #endif sc->cfg = &cfg_table[i]; break; } } if (sc->cfg == NULL) { /* 2nd: read configuration from table */ sc->cfg = envy24ht_rom2cfg(sc); } sc->adcn = ((sc->cfg->scfg & ENVY24HT_CCSM_SCFG_ADC) >> 2) + 1; /* need to be fixed */ sc->dacn = (sc->cfg->scfg & ENVY24HT_CCSM_SCFG_DAC) + 1; if (1 /* bootverbose */) { envy24ht_putcfg(sc); } /* set system configuration */ envy24ht_wrcs(sc, ENVY24HT_CCS_SCFG, sc->cfg->scfg, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_ACL, sc->cfg->acl, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_I2S, sc->cfg->i2s, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_SPDIF, sc->cfg->spdif, 1); envy24ht_gpiosetmask(sc, sc->cfg->gpiomask); envy24ht_gpiosetdir(sc, sc->cfg->gpiodir); envy24ht_gpiowr(sc, sc->cfg->gpiostate); if ((sc->cfg->subvendor == 0x3031) && (sc->cfg->subdevice == 0x4553)) { envy24ht_wri2c(sc, 0x22, 0x00, 0x07); envy24ht_wri2c(sc, 0x22, 0x04, 0x5f | 0x80); envy24ht_wri2c(sc, 0x22, 0x05, 0x5f | 0x80); } for (i = 0; i < sc->adcn; i++) { sc->adc[i] = sc->cfg->codec->create(sc->dev, sc, PCMDIR_REC, i); sc->cfg->codec->init(sc->adc[i]); } for (i = 0; i < sc->dacn; i++) { sc->dac[i] = sc->cfg->codec->create(sc->dev, sc, PCMDIR_PLAY, i); sc->cfg->codec->init(sc->dac[i]); } /* initialize DMA buffer */ #if(0) device_printf(sc->dev, "envy24ht_init(): initialize DMA buffer\n"); #endif if (envy24ht_dmainit(sc)) return ENOSPC; /* initialize status */ sc->run[0] = sc->run[1] = 0; sc->intr[0] = sc->intr[1] = 0; sc->speed = 0; sc->caps[0].fmtlist = envy24ht_playfmt; sc->caps[1].fmtlist = envy24ht_recfmt; /* set channel router */ #if 0 envy24ht_route(sc, ENVY24HT_ROUTE_DAC_1, ENVY24HT_ROUTE_CLASS_MIX, 0, 0); envy24ht_route(sc, ENVY24HT_ROUTE_DAC_SPDIF, ENVY24HT_ROUTE_CLASS_DMA, 0, 0); envy24ht_route(sc, ENVY24HT_ROUTE_DAC_SPDIF, ENVY24HT_ROUTE_CLASS_MIX, 0, 0); #endif /* set macro interrupt mask */ data = envy24ht_rdcs(sc, ENVY24HT_CCS_IMASK, 1); envy24ht_wrcs(sc, ENVY24HT_CCS_IMASK, data & ~ENVY24HT_CCS_IMASK_PMT, 1); data = envy24ht_rdcs(sc, ENVY24HT_CCS_IMASK, 1); #if(0) device_printf(sc->dev, "envy24ht_init(): CCS_IMASK-->0x%02x\n", data); #endif return 0; } static int envy24ht_alloc_resource(struct sc_info *sc) { /* allocate I/O port resource */ sc->csid = PCIR_CCS; - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->csid, RF_ACTIVE); sc->mtid = ENVY24HT_PCIR_MT; - sc->mt = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->mtid, 0, ~0, 1, RF_ACTIVE); + sc->mt = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->mtid, RF_ACTIVE); if (!sc->cs || !sc->mt) { device_printf(sc->dev, "unable to map IO port space\n"); return ENXIO; } sc->cst = rman_get_bustag(sc->cs); sc->csh = rman_get_bushandle(sc->cs); sc->mtt = rman_get_bustag(sc->mt); sc->mth = rman_get_bushandle(sc->mt); #if(0) device_printf(sc->dev, "IO port register values\nCCS: 0x%lx\nMT: 0x%lx\n", pci_read_config(sc->dev, PCIR_CCS, 4), pci_read_config(sc->dev, PCIR_MT, 4)); #endif /* allocate interrupt resource */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(sc->dev, sc->irq, INTR_MPSAFE, envy24ht_intr, sc, &sc->ih)) { device_printf(sc->dev, "unable to map interrupt\n"); return ENXIO; } /* allocate DMA resource */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev), /*alignment*/4, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_ENVY24, /*nsegments*/1, /*maxsegsz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &sc->dmat) != 0) { device_printf(sc->dev, "unable to create dma tag\n"); return ENXIO; } return 0; } static int envy24ht_pci_attach(device_t dev) { struct sc_info *sc; char status[SND_STATUSLEN]; int err = 0; int i; #if(0) device_printf(dev, "envy24ht_pci_attach()\n"); #endif /* get sc_info data area */ if ((sc = malloc(sizeof(*sc), M_ENVY24HT, M_NOWAIT)) == NULL) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } bzero(sc, sizeof(*sc)); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_envy24ht softc"); sc->dev = dev; /* initialize PCI interface */ pci_enable_busmaster(dev); /* allocate resources */ err = envy24ht_alloc_resource(sc); if (err) { device_printf(dev, "unable to allocate system resources\n"); goto bad; } /* initialize card */ err = envy24ht_init(sc); if (err) { device_printf(dev, "unable to initialize the card\n"); goto bad; } /* set multi track mixer */ mixer_init(dev, &envy24htmixer_class, sc); /* set channel information */ /* err = pcm_register(dev, sc, 5, 2 + sc->adcn); */ err = pcm_register(dev, sc, 1, 2 + sc->adcn); if (err) goto bad; sc->chnum = 0; /* for (i = 0; i < 5; i++) { */ pcm_addchan(dev, PCMDIR_PLAY, &envy24htchan_class, sc); sc->chnum++; /* } */ for (i = 0; i < 2 + sc->adcn; i++) { pcm_addchan(dev, PCMDIR_REC, &envy24htchan_class, sc); sc->chnum++; } /* set status iformation */ snprintf(status, SND_STATUSLEN, "at io 0x%lx:%ld,0x%lx:%ld irq %ld", rman_get_start(sc->cs), rman_get_end(sc->cs) - rman_get_start(sc->cs) + 1, rman_get_start(sc->mt), rman_get_end(sc->mt) - rman_get_start(sc->mt) + 1, rman_get_start(sc->irq)); pcm_setstatus(dev, status); return 0; bad: if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); envy24ht_dmafree(sc); if (sc->dmat) bus_dma_tag_destroy(sc->dmat); if (sc->cfg->codec->destroy != NULL) { for (i = 0; i < sc->adcn; i++) sc->cfg->codec->destroy(sc->adc[i]); for (i = 0; i < sc->dacn; i++) sc->cfg->codec->destroy(sc->dac[i]); } envy24ht_cfgfree(sc->cfg); if (sc->cs) bus_release_resource(dev, SYS_RES_IOPORT, sc->csid, sc->cs); if (sc->mt) bus_release_resource(dev, SYS_RES_IOPORT, sc->mtid, sc->mt); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_ENVY24HT); return err; } static int envy24ht_pci_detach(device_t dev) { struct sc_info *sc; int r; int i; #if(0) device_printf(dev, "envy24ht_pci_detach()\n"); #endif sc = pcm_getdevinfo(dev); if (sc == NULL) return 0; r = pcm_unregister(dev); if (r) return r; envy24ht_dmafree(sc); if (sc->cfg->codec->destroy != NULL) { for (i = 0; i < sc->adcn; i++) sc->cfg->codec->destroy(sc->adc[i]); for (i = 0; i < sc->dacn; i++) sc->cfg->codec->destroy(sc->dac[i]); } envy24ht_cfgfree(sc->cfg); bus_dma_tag_destroy(sc->dmat); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); bus_release_resource(dev, SYS_RES_IOPORT, sc->csid, sc->cs); bus_release_resource(dev, SYS_RES_IOPORT, sc->mtid, sc->mt); snd_mtxfree(sc->lock); free(sc, M_ENVY24HT); return 0; } static device_method_t envy24ht_methods[] = { /* Device interface */ DEVMETHOD(device_probe, envy24ht_pci_probe), DEVMETHOD(device_attach, envy24ht_pci_attach), DEVMETHOD(device_detach, envy24ht_pci_detach), { 0, 0 } }; static driver_t envy24ht_driver = { "pcm", envy24ht_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_envy24ht, pci, envy24ht_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_envy24ht, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_envy24ht, snd_spicds, 1, 1, 1); MODULE_VERSION(snd_envy24ht, 1); Index: head/sys/dev/sound/pci/hdspe.c =================================================================== --- head/sys/dev/sound/pci/hdspe.c (revision 295789) +++ head/sys/dev/sound/pci/hdspe.c (revision 295790) @@ -1,397 +1,397 @@ /*- * Copyright (c) 2012 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * RME HDSPe driver for FreeBSD. * Supported cards: AIO, RayDAT. */ #include #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); static struct hdspe_channel chan_map_aio[] = { { 0, 1, "line", 1, 1 }, { 6, 7, "phone", 1, 0 }, { 8, 9, "aes", 1, 1 }, { 10, 11, "s/pdif", 1, 1 }, { 12, 16, "adat", 1, 1 }, /* Single or double speed. */ { 14, 18, "adat", 1, 1 }, /* Single speed only. */ { 13, 15, "adat", 1, 1 }, { 17, 19, "adat", 1, 1 }, { 0, 0, NULL, 0, 0 }, }; static struct hdspe_channel chan_map_rd[] = { { 0, 1, "aes", 1, 1 }, { 2, 3, "s/pdif", 1, 1 }, { 4, 5, "adat", 1, 1 }, { 6, 7, "adat", 1, 1 }, { 8, 9, "adat", 1, 1 }, { 10, 11, "adat", 1, 1 }, /* Single or double speed. */ { 12, 13, "adat", 1, 1 }, { 14, 15, "adat", 1, 1 }, { 16, 17, "adat", 1, 1 }, { 18, 19, "adat", 1, 1 }, /* Single speed only. */ { 20, 21, "adat", 1, 1 }, { 22, 23, "adat", 1, 1 }, { 24, 25, "adat", 1, 1 }, { 26, 27, "adat", 1, 1 }, { 28, 29, "adat", 1, 1 }, { 30, 31, "adat", 1, 1 }, { 32, 33, "adat", 1, 1 }, { 34, 35, "adat", 1, 1 }, { 0, 0, NULL, 0, 0 }, }; static void hdspe_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; struct sc_pcminfo *scp; device_t *devlist; int devcount, status; int i, err; snd_mtxlock(sc->lock); status = hdspe_read_1(sc, HDSPE_STATUS_REG); if (status & HDSPE_AUDIO_IRQ_PENDING) { if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return; for (i = 0; i < devcount; i++) { scp = device_get_ivars(devlist[i]); if (scp->ih != NULL) scp->ih(scp); } hdspe_write_1(sc, HDSPE_INTERRUPT_ACK, 0); free(devlist, M_TEMP); } snd_mtxunlock(sc->lock); } static void hdspe_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { #if 0 struct sc_info *sc = (struct sc_info *)arg; device_printf(sc->dev, "hdspe_dmapsetmap()\n"); #endif } static int hdspe_alloc_resources(struct sc_info *sc) { /* Allocate resource. */ sc->csid = PCIR_BAR(0); - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, + &sc->csid, RF_ACTIVE); if (!sc->cs) { device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n"); return (ENXIO); } sc->cst = rman_get_bustag(sc->cs); sc->csh = rman_get_bushandle(sc->cs); /* Allocate interrupt resource. */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV, NULL, hdspe_intr, sc, &sc->ih)) { device_printf(sc->dev, "Unable to alloc interrupt resource.\n"); return (ENXIO); } /* Allocate DMA resources. */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev), /*alignment*/4, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/2 * HDSPE_DMASEGSIZE, /*nsegments*/2, /*maxsegsz*/HDSPE_DMASEGSIZE, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, /*dmatag*/&sc->dmat) != 0) { device_printf(sc->dev, "Unable to create dma tag.\n"); return (ENXIO); } sc->bufsize = HDSPE_DMASEGSIZE; /* pbuf (play buffer). */ if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_NOWAIT, &sc->pmap)) { device_printf(sc->dev, "Can't alloc pbuf.\n"); return (ENXIO); } if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->bufsize, hdspe_dmapsetmap, sc, 0)) { device_printf(sc->dev, "Can't load pbuf.\n"); return (ENXIO); } /* rbuf (rec buffer). */ if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_NOWAIT, &sc->rmap)) { device_printf(sc->dev, "Can't alloc rbuf.\n"); return (ENXIO); } if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->bufsize, hdspe_dmapsetmap, sc, 0)) { device_printf(sc->dev, "Can't load rbuf.\n"); return (ENXIO); } bzero(sc->pbuf, sc->bufsize); bzero(sc->rbuf, sc->bufsize); return (0); } static void hdspe_map_dmabuf(struct sc_info *sc) { uint32_t paddr,raddr; int i; paddr = vtophys(sc->pbuf); raddr = vtophys(sc->rbuf); for (i = 0; i < HDSPE_MAX_SLOTS * 16; i++) { hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_OUT + 4 * i, paddr + i * 4096); hdspe_write_4(sc, HDSPE_PAGE_ADDR_BUF_IN + 4 * i, raddr + i * 4096); } } static int hdspe_probe(device_t dev) { uint32_t rev; if (pci_get_vendor(dev) == PCI_VENDOR_XILINX && pci_get_device(dev) == PCI_DEVICE_XILINX_HDSPE) { rev = pci_get_revid(dev); switch (rev) { case PCI_REVISION_AIO: device_set_desc(dev, "RME HDSPe AIO"); return 0; case PCI_REVISION_RAYDAT: device_set_desc(dev, "RME HDSPe RayDAT"); return 0; } } return (ENXIO); } static int hdspe_init(struct sc_info *sc) { long long period; /* Set defaults. */ sc->ctrl_register |= HDSPM_CLOCK_MODE_MASTER; /* Set latency. */ sc->period = 32; sc->ctrl_register = hdspe_encode_latency(7); /* Set rate. */ sc->speed = HDSPE_SPEED_DEFAULT; sc->ctrl_register &= ~HDSPE_FREQ_MASK; sc->ctrl_register |= HDSPE_FREQ_MASK_DEFAULT; hdspe_write_4(sc, HDSPE_CONTROL_REG, sc->ctrl_register); switch (sc->type) { case RAYDAT: case AIO: period = HDSPE_FREQ_AIO; break; default: return (ENXIO); } /* Set DDS value. */ period /= sc->speed; hdspe_write_4(sc, HDSPE_FREQ_REG, period); /* Other settings. */ sc->settings_register = 0; hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register); return 0; } static int hdspe_attach(device_t dev) { struct sc_info *sc; struct sc_pcminfo *scp; struct hdspe_channel *chan_map; uint32_t rev; int i, err; #if 0 device_printf(dev, "hdspe_attach()\n"); #endif sc = device_get_softc(dev); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_hdspe softc"); sc->dev = dev; pci_enable_busmaster(dev); rev = pci_get_revid(dev); switch (rev) { case PCI_REVISION_AIO: sc->type = AIO; chan_map = chan_map_aio; break; case PCI_REVISION_RAYDAT: sc->type = RAYDAT; chan_map = chan_map_rd; break; default: return ENXIO; } /* Allocate resources. */ err = hdspe_alloc_resources(sc); if (err) { device_printf(dev, "Unable to allocate system resources.\n"); return ENXIO; } if (hdspe_init(sc) != 0) return ENXIO; for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) { scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); scp->hc = &chan_map[i]; scp->sc = sc; scp->dev = device_add_child(dev, "pcm", -1); device_set_ivars(scp->dev, scp); } hdspe_map_dmabuf(sc); return (bus_generic_attach(dev)); } static void hdspe_dmafree(struct sc_info *sc) { bus_dmamap_unload(sc->dmat, sc->rmap); bus_dmamap_unload(sc->dmat, sc->pmap); bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap); bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap); sc->rbuf = sc->pbuf = NULL; } static int hdspe_detach(device_t dev) { struct sc_info *sc; int err; sc = device_get_softc(dev); if (sc == NULL) { device_printf(dev,"Can't detach: softc is null.\n"); return 0; } err = device_delete_children(dev); if (err) return (err); hdspe_dmafree(sc); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->dmat) bus_dma_tag_destroy(sc->dmat); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); if (sc->cs) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->cs); if (sc->lock) snd_mtxfree(sc->lock); return 0; } static device_method_t hdspe_methods[] = { DEVMETHOD(device_probe, hdspe_probe), DEVMETHOD(device_attach, hdspe_attach), DEVMETHOD(device_detach, hdspe_detach), { 0, 0 } }; static driver_t hdspe_driver = { "hdspe", hdspe_methods, PCM_SOFTC_SIZE, }; static devclass_t hdspe_devclass; DRIVER_MODULE(snd_hdspe, pci, hdspe_driver, hdspe_devclass, 0, 0); Index: head/sys/dev/sound/pci/vibes.c =================================================================== --- head/sys/dev/sound/pci/vibes.c (revision 295789) +++ head/sys/dev/sound/pci/vibes.c (revision 295790) @@ -1,945 +1,945 @@ /*- * Copyright (c) 2001 Orion Hodson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This card has the annoying habit of "clicking" when attached and * detached, haven't been able to remedy this with any combination of * muting. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); /* ------------------------------------------------------------------------- */ /* Constants */ #define SV_PCI_ID 0xca005333 #define SV_DEFAULT_BUFSZ 16384 #define SV_MIN_BLKSZ 128 #define SV_INTR_PER_BUFFER 2 #ifndef DEB #define DEB(x) /* (x) */ #endif /* ------------------------------------------------------------------------- */ /* Structures */ struct sc_info; struct sc_chinfo { struct sc_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t fmt, spd; int dir; int dma_active, dma_was_active; }; struct sc_info { device_t dev; /* DMA buffer allocator */ bus_dma_tag_t parent_dmat; /* Enhanced register resources */ struct resource *enh_reg; bus_space_tag_t enh_st; bus_space_handle_t enh_sh; int enh_type; int enh_rid; /* DMA configuration */ struct resource *dmaa_reg, *dmac_reg; bus_space_tag_t dmaa_st, dmac_st; bus_space_handle_t dmaa_sh, dmac_sh; int dmaa_type, dmac_type; int dmaa_rid, dmac_rid; /* Interrupt resources */ struct resource *irq; int irqid; void *ih; /* User configurable buffer size */ unsigned int bufsz; struct sc_chinfo rch, pch; u_int8_t rev; }; static u_int32_t sc_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps sc_caps = {8000, 48000, sc_fmt, 0}; /* ------------------------------------------------------------------------- */ /* Register Manipulations */ #define sv_direct_set(x, y, z) _sv_direct_set(x, y, z, __LINE__) static u_int8_t sv_direct_get(struct sc_info *sc, u_int8_t reg) { return bus_space_read_1(sc->enh_st, sc->enh_sh, reg); } static void _sv_direct_set(struct sc_info *sc, u_int8_t reg, u_int8_t val, int line) { u_int8_t n; bus_space_write_1(sc->enh_st, sc->enh_sh, reg, val); n = sv_direct_get(sc, reg); if (n != val) { device_printf(sc->dev, "sv_direct_set register 0x%02x %d != %d from line %d\n", reg, n, val, line); } } static u_int8_t sv_indirect_get(struct sc_info *sc, u_int8_t reg) { if (reg == SV_REG_FORMAT || reg == SV_REG_ANALOG_PWR) reg |= SV_CM_INDEX_MCE; bus_space_write_1(sc->enh_st, sc->enh_sh, SV_CM_INDEX, reg); return bus_space_read_1(sc->enh_st, sc->enh_sh, SV_CM_DATA); } #define sv_indirect_set(x, y, z) _sv_indirect_set(x, y, z, __LINE__) static void _sv_indirect_set(struct sc_info *sc, u_int8_t reg, u_int8_t val, int line) { if (reg == SV_REG_FORMAT || reg == SV_REG_ANALOG_PWR) reg |= SV_CM_INDEX_MCE; bus_space_write_1(sc->enh_st, sc->enh_sh, SV_CM_INDEX, reg); bus_space_write_1(sc->enh_st, sc->enh_sh, SV_CM_DATA, val); reg &= ~SV_CM_INDEX_MCE; if (reg != SV_REG_ADC_PLLM) { u_int8_t n; n = sv_indirect_get(sc, reg); if (n != val) { device_printf(sc->dev, "sv_indirect_set register 0x%02x %d != %d line %d\n", reg, n, val, line); } } } static void sv_dma_set_config(bus_space_tag_t st, bus_space_handle_t sh, u_int32_t base, u_int32_t count, u_int8_t mode) { bus_space_write_4(st, sh, SV_DMA_ADDR, base); bus_space_write_4(st, sh, SV_DMA_COUNT, count & 0xffffff); bus_space_write_1(st, sh, SV_DMA_MODE, mode); DEB(printf("base 0x%08x count %5d mode 0x%02x\n", base, count, mode)); } static u_int32_t sv_dma_get_count(bus_space_tag_t st, bus_space_handle_t sh) { return bus_space_read_4(st, sh, SV_DMA_COUNT) & 0xffffff; } /* ------------------------------------------------------------------------- */ /* Play / Record Common Interface */ static void * svchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_chinfo *ch; ch = (dir == PCMDIR_PLAY) ? &sc->pch : &sc->rch; ch->parent = sc; ch->channel = c; ch->dir = dir; if (sndbuf_alloc(b, sc->parent_dmat, 0, sc->bufsz) != 0) { DEB(printf("svchan_init failed\n")); return NULL; } ch->buffer = b; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = DSP_DEFAULT_SPEED; ch->dma_active = ch->dma_was_active = 0; return ch; } static struct pcmchan_caps * svchan_getcaps(kobj_t obj, void *data) { return &sc_caps; } static u_int32_t svchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; /* user has requested interrupts every blocksize bytes */ RANGE(blocksize, SV_MIN_BLKSZ, sc->bufsz / SV_INTR_PER_BUFFER); sndbuf_resize(ch->buffer, SV_INTR_PER_BUFFER, blocksize); DEB(printf("svchan_setblocksize: %d\n", blocksize)); return blocksize; } static int svchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; /* NB Just note format here as setting format register * generates noise if dma channel is inactive. */ ch->fmt = (AFMT_CHANNEL(format) > 1) ? SV_AFMT_STEREO : SV_AFMT_MONO; ch->fmt |= (format & AFMT_16BIT) ? SV_AFMT_S16 : SV_AFMT_U8; return 0; } static u_int32_t svchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; RANGE(speed, 8000, 48000); ch->spd = speed; return speed; } /* ------------------------------------------------------------------------- */ /* Recording interface */ static int sv_set_recspeed(struct sc_info *sc, u_int32_t speed) { u_int32_t f_out, f_actual; u_int32_t rs, re, r, best_r = 0, r2, t, n, best_n = 0; int32_t m, best_m = 0, ms, me, err, min_err; /* This algorithm is a variant described in sonicvibes.pdf * appendix A. This search is marginally more extensive and * results in (nominally) better sample rate matching. */ f_out = SV_F_SCALE * speed; min_err = 0x7fffffff; /* Find bounds of r to examine, rs <= r <= re */ t = 80000000 / f_out; for (rs = 1; (1 << rs) < t; rs++); t = 150000000 / f_out; for (re = 1; (2 << re) < t; re++); if (re > 7) re = 7; /* Search over r, n, m */ for (r = rs; r <= re; r++) { r2 = (1 << r); for (n = 3; n < 34; n++) { m = f_out * n / (SV_F_REF / r2); ms = (m > 3) ? (m - 1) : 3; me = (m < 129) ? (m + 1) : 129; for (m = ms; m <= me; m++) { f_actual = m * SV_F_REF / (n * r2); if (f_actual > f_out) { err = f_actual - f_out; } else { err = f_out - f_actual; } if (err < min_err) { best_r = r; best_m = m - 2; best_n = n - 2; min_err = err; if (err == 0) break; } } } } sv_indirect_set(sc, SV_REG_ADC_PLLM, best_m); sv_indirect_set(sc, SV_REG_ADC_PLLN, SV_ADC_PLLN(best_n) | SV_ADC_PLLR(best_r)); DEB(printf("svrchan_setspeed: %d -> PLLM 0x%02x PLLNR 0x%08x\n", speed, sv_indirect_get(sc, SV_REG_ADC_PLLM), sv_indirect_get(sc, SV_REG_ADC_PLLN))); return 0; } static int svrchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t count, enable; u_int8_t v; switch(go) { case PCMTRIG_START: /* Set speed */ sv_set_recspeed(sc, ch->spd); /* Set format */ v = sv_indirect_get(sc, SV_REG_FORMAT) & ~SV_AFMT_DMAC_MSK; v |= SV_AFMT_DMAC(ch->fmt); sv_indirect_set(sc, SV_REG_FORMAT, v); /* Program DMA */ count = sndbuf_getsize(ch->buffer) / 2; /* DMAC uses words */ sv_dma_set_config(sc->dmac_st, sc->dmac_sh, sndbuf_getbufaddr(ch->buffer), count - 1, SV_DMA_MODE_AUTO | SV_DMA_MODE_RD); count = count / SV_INTR_PER_BUFFER - 1; sv_indirect_set(sc, SV_REG_DMAC_COUNT_HI, count >> 8); sv_indirect_set(sc, SV_REG_DMAC_COUNT_LO, count & 0xff); /* Enable DMA */ enable = sv_indirect_get(sc, SV_REG_ENABLE) | SV_RECORD_ENABLE; sv_indirect_set(sc, SV_REG_ENABLE, enable); ch->dma_active = 1; break; case PCMTRIG_STOP: case PCMTRIG_ABORT: enable = sv_indirect_get(sc, SV_REG_ENABLE) & ~SV_RECORD_ENABLE; sv_indirect_set(sc, SV_REG_ENABLE, enable); ch->dma_active = 0; break; } return 0; } static u_int32_t svrchan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t sz, remain; sz = sndbuf_getsize(ch->buffer); /* DMAC uses words */ remain = (sv_dma_get_count(sc->dmac_st, sc->dmac_sh) + 1) * 2; return sz - remain; } static kobj_method_t svrchan_methods[] = { KOBJMETHOD(channel_init, svchan_init), KOBJMETHOD(channel_setformat, svchan_setformat), KOBJMETHOD(channel_setspeed, svchan_setspeed), KOBJMETHOD(channel_setblocksize, svchan_setblocksize), KOBJMETHOD(channel_trigger, svrchan_trigger), KOBJMETHOD(channel_getptr, svrchan_getptr), KOBJMETHOD(channel_getcaps, svchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(svrchan); /* ------------------------------------------------------------------------- */ /* Playback interface */ static int svpchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t count, enable, speed; u_int8_t v; switch(go) { case PCMTRIG_START: /* Set speed */ speed = (ch->spd * 65536) / 48000; if (speed > 65535) speed = 65535; sv_indirect_set(sc, SV_REG_PCM_SAMPLING_HI, speed >> 8); sv_indirect_set(sc, SV_REG_PCM_SAMPLING_LO, speed & 0xff); /* Set format */ v = sv_indirect_get(sc, SV_REG_FORMAT) & ~SV_AFMT_DMAA_MSK; v |= SV_AFMT_DMAA(ch->fmt); sv_indirect_set(sc, SV_REG_FORMAT, v); /* Program DMA */ count = sndbuf_getsize(ch->buffer); sv_dma_set_config(sc->dmaa_st, sc->dmaa_sh, sndbuf_getbufaddr(ch->buffer), count - 1, SV_DMA_MODE_AUTO | SV_DMA_MODE_WR); count = count / SV_INTR_PER_BUFFER - 1; sv_indirect_set(sc, SV_REG_DMAA_COUNT_HI, count >> 8); sv_indirect_set(sc, SV_REG_DMAA_COUNT_LO, count & 0xff); /* Enable DMA */ enable = sv_indirect_get(sc, SV_REG_ENABLE); enable = (enable | SV_PLAY_ENABLE) & ~SV_PLAYBACK_PAUSE; sv_indirect_set(sc, SV_REG_ENABLE, enable); ch->dma_active = 1; break; case PCMTRIG_STOP: case PCMTRIG_ABORT: enable = sv_indirect_get(sc, SV_REG_ENABLE) & ~SV_PLAY_ENABLE; sv_indirect_set(sc, SV_REG_ENABLE, enable); ch->dma_active = 0; break; } return 0; } static u_int32_t svpchan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t sz, remain; sz = sndbuf_getsize(ch->buffer); /* DMAA uses bytes */ remain = sv_dma_get_count(sc->dmaa_st, sc->dmaa_sh) + 1; return (sz - remain); } static kobj_method_t svpchan_methods[] = { KOBJMETHOD(channel_init, svchan_init), KOBJMETHOD(channel_setformat, svchan_setformat), KOBJMETHOD(channel_setspeed, svchan_setspeed), KOBJMETHOD(channel_setblocksize, svchan_setblocksize), KOBJMETHOD(channel_trigger, svpchan_trigger), KOBJMETHOD(channel_getptr, svpchan_getptr), KOBJMETHOD(channel_getcaps, svchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(svpchan); /* ------------------------------------------------------------------------- */ /* Mixer support */ struct sv_mix_props { u_int8_t reg; /* Register */ u_int8_t stereo:1; /* Supports 2 channels */ u_int8_t mute:1; /* Supports muting */ u_int8_t neg:1; /* Negative gain */ u_int8_t max; /* Max gain */ u_int8_t iselect; /* Input selector */ } static const mt [SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_LINE1] = {SV_REG_AUX1, 1, 1, 1, SV_DEFAULT_MAX, SV_INPUT_AUX1}, [SOUND_MIXER_CD] = {SV_REG_CD, 1, 1, 1, SV_DEFAULT_MAX, SV_INPUT_CD}, [SOUND_MIXER_LINE] = {SV_REG_LINE, 1, 1, 1, SV_DEFAULT_MAX, SV_INPUT_LINE}, [SOUND_MIXER_MIC] = {SV_REG_MIC, 0, 1, 1, SV_MIC_MAX, SV_INPUT_MIC}, [SOUND_MIXER_SYNTH] = {SV_REG_SYNTH, 0, 1, 1, SV_DEFAULT_MAX, 0}, [SOUND_MIXER_LINE2] = {SV_REG_AUX2, 1, 1, 1, SV_DEFAULT_MAX, SV_INPUT_AUX2}, [SOUND_MIXER_VOLUME] = {SV_REG_MIX, 1, 1, 1, SV_DEFAULT_MAX, 0}, [SOUND_MIXER_PCM] = {SV_REG_PCM, 1, 1, 1, SV_PCM_MAX, 0}, [SOUND_MIXER_RECLEV] = {SV_REG_ADC_INPUT, 1, 0, 0, SV_ADC_MAX, 0}, }; static void sv_channel_gain(struct sc_info *sc, u_int32_t dev, u_int32_t gain, u_int32_t channel) { u_int8_t v; int32_t g; g = mt[dev].max * gain / 100; if (mt[dev].neg) g = mt[dev].max - g; v = sv_indirect_get(sc, mt[dev].reg + channel) & ~mt[dev].max; v |= g; if (mt[dev].mute) { if (gain == 0) { v |= SV_MUTE; } else { v &= ~SV_MUTE; } } sv_indirect_set(sc, mt[dev].reg + channel, v); } static int sv_gain(struct sc_info *sc, u_int32_t dev, u_int32_t left, u_int32_t right) { sv_channel_gain(sc, dev, left, 0); if (mt[dev].stereo) sv_channel_gain(sc, dev, right, 1); return 0; } static void sv_mix_mute_all(struct sc_info *sc) { int32_t i; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (mt[i].reg) sv_gain(sc, i, 0, 0); } } static int sv_mix_init(struct snd_mixer *m) { u_int32_t i, v; for(i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (mt[i].max) v |= (1 << i); } mix_setdevs(m, v); for(i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (mt[i].iselect) v |= (1 << i); } mix_setrecdevs(m, v); return 0; } static int sv_mix_set(struct snd_mixer *m, u_int32_t dev, u_int32_t left, u_int32_t right) { struct sc_info *sc = mix_getdevinfo(m); return sv_gain(sc, dev, left, right); } static u_int32_t sv_mix_setrecsrc(struct snd_mixer *m, u_int32_t mask) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t i, v; v = sv_indirect_get(sc, SV_REG_ADC_INPUT) & SV_INPUT_GAIN_MASK; for(i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if ((1 << i) & mask) { v |= mt[i].iselect; } } DEB(printf("sv_mix_setrecsrc: mask 0x%08x adc_input 0x%02x\n", mask, v)); sv_indirect_set(sc, SV_REG_ADC_INPUT, v); return mask; } static kobj_method_t sv_mixer_methods[] = { KOBJMETHOD(mixer_init, sv_mix_init), KOBJMETHOD(mixer_set, sv_mix_set), KOBJMETHOD(mixer_setrecsrc, sv_mix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(sv_mixer); /* ------------------------------------------------------------------------- */ /* Power management and reset */ static void sv_power(struct sc_info *sc, int state) { u_int8_t v; switch (state) { case 0: /* power on */ v = sv_indirect_get(sc, SV_REG_ANALOG_PWR) &~ SV_ANALOG_OFF; v |= SV_ANALOG_OFF_SRS | SV_ANALOG_OFF_SPLL; sv_indirect_set(sc, SV_REG_ANALOG_PWR, v); v = sv_indirect_get(sc, SV_REG_DIGITAL_PWR) &~ SV_DIGITAL_OFF; v |= SV_DIGITAL_OFF_SYN | SV_DIGITAL_OFF_MU | SV_DIGITAL_OFF_GP; sv_indirect_set(sc, SV_REG_DIGITAL_PWR, v); break; default: /* power off */ v = sv_indirect_get(sc, SV_REG_ANALOG_PWR) | SV_ANALOG_OFF; sv_indirect_set(sc, SV_REG_ANALOG_PWR, v); v = sv_indirect_get(sc, SV_REG_DIGITAL_PWR) | SV_DIGITAL_OFF; sv_indirect_set(sc, SV_REG_DIGITAL_PWR, SV_DIGITAL_OFF); break; } DEB(printf("Power state %d\n", state)); } static int sv_init(struct sc_info *sc) { u_int8_t v; /* Effect reset */ v = sv_direct_get(sc, SV_CM_CONTROL) & ~SV_CM_CONTROL_ENHANCED; v |= SV_CM_CONTROL_RESET; sv_direct_set(sc, SV_CM_CONTROL, v); DELAY(50); v = sv_direct_get(sc, SV_CM_CONTROL) & ~SV_CM_CONTROL_RESET; sv_direct_set(sc, SV_CM_CONTROL, v); DELAY(50); /* Set in enhanced mode */ v = sv_direct_get(sc, SV_CM_CONTROL); v |= SV_CM_CONTROL_ENHANCED; sv_direct_set(sc, SV_CM_CONTROL, v); /* Enable interrupts (UDM and MIDM are superfluous) */ v = sv_direct_get(sc, SV_CM_IMR); v &= ~(SV_CM_IMR_AMSK | SV_CM_IMR_CMSK | SV_CM_IMR_SMSK); sv_direct_set(sc, SV_CM_IMR, v); /* Select ADC PLL for ADC clock */ v = sv_indirect_get(sc, SV_REG_CLOCK_SOURCE) & ~SV_CLOCK_ALTERNATE; sv_indirect_set(sc, SV_REG_CLOCK_SOURCE, v); /* Disable loopback - binds ADC and DAC rates */ v = sv_indirect_get(sc, SV_REG_LOOPBACK) & ~SV_LOOPBACK_ENABLE; sv_indirect_set(sc, SV_REG_LOOPBACK, v); /* Disable SRS */ v = sv_indirect_get(sc, SV_REG_SRS_SPACE) | SV_SRS_DISABLED; sv_indirect_set(sc, SV_REG_SRS_SPACE, v); /* Get revision */ sc->rev = sv_indirect_get(sc, SV_REG_REVISION); return 0; } static int sv_suspend(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); sc->rch.dma_was_active = sc->rch.dma_active; svrchan_trigger(NULL, &sc->rch, PCMTRIG_ABORT); sc->pch.dma_was_active = sc->pch.dma_active; svrchan_trigger(NULL, &sc->pch, PCMTRIG_ABORT); sv_mix_mute_all(sc); sv_power(sc, 3); return 0; } static int sv_resume(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); sv_mix_mute_all(sc); sv_power(sc, 0); if (sv_init(sc) == -1) { device_printf(dev, "unable to reinitialize the card\n"); return ENXIO; } if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to reinitialize the mixer\n"); return ENXIO; } if (sc->rch.dma_was_active) { svrchan_trigger(0, &sc->rch, PCMTRIG_START); } if (sc->pch.dma_was_active) { svpchan_trigger(0, &sc->pch, PCMTRIG_START); } return 0; } /* ------------------------------------------------------------------------- */ /* Resource related */ static void sv_intr(void *data) { struct sc_info *sc = data; u_int8_t status; status = sv_direct_get(sc, SV_CM_STATUS); if (status & SV_CM_STATUS_AINT) chn_intr(sc->pch.channel); if (status & SV_CM_STATUS_CINT) chn_intr(sc->rch.channel); status &= ~(SV_CM_STATUS_AINT|SV_CM_STATUS_CINT); DEB(if (status) printf("intr 0x%02x ?\n", status)); return; } static int sv_probe(device_t dev) { switch(pci_get_devid(dev)) { case SV_PCI_ID: device_set_desc(dev, "S3 Sonicvibes"); return BUS_PROBE_DEFAULT; default: return ENXIO; } } static int sv_attach(device_t dev) { struct sc_info *sc; rman_res_t count, midi_start, games_start; u_int32_t data; char status[SND_STATUSLEN]; u_long sdmaa, sdmac, ml, mu; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; pci_enable_busmaster(dev); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } sc->enh_rid = SV_PCI_ENHANCED; sc->enh_type = SYS_RES_IOPORT; sc->enh_reg = bus_alloc_resource(dev, sc->enh_type, &sc->enh_rid, 0, ~0, SV_PCI_ENHANCED_SIZE, RF_ACTIVE); if (sc->enh_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate enh\n"); return ENXIO; } sc->enh_st = rman_get_bustag(sc->enh_reg); sc->enh_sh = rman_get_bushandle(sc->enh_reg); data = pci_read_config(dev, SV_PCI_DMAA, 4); DEB(printf("sv_attach: initial dmaa 0x%08x\n", data)); data = pci_read_config(dev, SV_PCI_DMAC, 4); DEB(printf("sv_attach: initial dmac 0x%08x\n", data)); /* Initialize DMA_A and DMA_C */ pci_write_config(dev, SV_PCI_DMAA, SV_PCI_DMA_EXTENDED, 4); pci_write_config(dev, SV_PCI_DMAC, 0, 4); /* Register IRQ handler */ sc->irqid = 0; - sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, 0, sv_intr, sc, &sc->ih)) { device_printf(dev, "sv_attach: Unable to map interrupt\n"); goto fail; } sc->bufsz = pcm_getbuffersize(dev, 4096, SV_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->parent_dmat) != 0) { device_printf(dev, "sv_attach: Unable to create dma tag\n"); goto fail; } /* Power up and initialize */ sv_mix_mute_all(sc); sv_power(sc, 0); sv_init(sc); if (mixer_init(dev, &sv_mixer_class, sc) != 0) { device_printf(dev, "sv_attach: Mixer failed to initialize\n"); goto fail; } /* XXX This is a hack, and it's ugly. Okay, the deal is this * card has two more io regions that available for automatic * configuration by the pci code. These need to be allocated * to used as control registers for the DMA engines. * Unfortunately FBSD has no bus_space_foo() functions so we * have to grab port space in region of existing resources. Go * for space between midi and game ports. */ bus_get_resource(dev, SYS_RES_IOPORT, SV_PCI_MIDI, &midi_start, &count); bus_get_resource(dev, SYS_RES_IOPORT, SV_PCI_GAMES, &games_start, &count); if (games_start < midi_start) { ml = games_start; mu = midi_start; } else { ml = midi_start; mu = games_start; } /* Check assumptions about space availability and alignment. How driver loaded can determine whether games_start > midi_start or vice versa */ if ((mu - ml >= 0x800) || ((mu - ml) % 0x200)) { device_printf(dev, "sv_attach: resource assumptions not met " "(midi 0x%08lx, games 0x%08lx)\n", midi_start, games_start); goto fail; } sdmaa = ml + 0x40; sdmac = sdmaa + 0x40; /* Add resources to list of pci resources for this device - from here on * they look like normal pci resources. */ bus_set_resource(dev, SYS_RES_IOPORT, SV_PCI_DMAA, sdmaa, SV_PCI_DMAA_SIZE); bus_set_resource(dev, SYS_RES_IOPORT, SV_PCI_DMAC, sdmac, SV_PCI_DMAC_SIZE); /* Cache resource short-cuts for dma_a */ sc->dmaa_rid = SV_PCI_DMAA; sc->dmaa_type = SYS_RES_IOPORT; sc->dmaa_reg = bus_alloc_resource(dev, sc->dmaa_type, &sc->dmaa_rid, 0, ~0, SV_PCI_ENHANCED_SIZE, RF_ACTIVE); if (sc->dmaa_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate dmaa\n"); goto fail; } sc->dmaa_st = rman_get_bustag(sc->dmaa_reg); sc->dmaa_sh = rman_get_bushandle(sc->dmaa_reg); /* Poke port into dma_a configuration, nb bit flags to enable dma */ data = pci_read_config(dev, SV_PCI_DMAA, 4) | SV_PCI_DMA_ENABLE | SV_PCI_DMA_EXTENDED; data = ((u_int32_t)sdmaa & 0xfffffff0) | (data & 0x0f); pci_write_config(dev, SV_PCI_DMAA, data, 4); DEB(printf("dmaa: 0x%x 0x%x\n", data, pci_read_config(dev, SV_PCI_DMAA, 4))); /* Cache resource short-cuts for dma_c */ sc->dmac_rid = SV_PCI_DMAC; sc->dmac_type = SYS_RES_IOPORT; sc->dmac_reg = bus_alloc_resource(dev, sc->dmac_type, &sc->dmac_rid, 0, ~0, SV_PCI_ENHANCED_SIZE, RF_ACTIVE); if (sc->dmac_reg == NULL) { device_printf(dev, "sv_attach: cannot allocate dmac\n"); goto fail; } sc->dmac_st = rman_get_bustag(sc->dmac_reg); sc->dmac_sh = rman_get_bushandle(sc->dmac_reg); /* Poke port into dma_c configuration, nb bit flags to enable dma */ data = pci_read_config(dev, SV_PCI_DMAC, 4) | SV_PCI_DMA_ENABLE | SV_PCI_DMA_EXTENDED; data = ((u_int32_t)sdmac & 0xfffffff0) | (data & 0x0f); pci_write_config(dev, SV_PCI_DMAC, data, 4); DEB(printf("dmac: 0x%x 0x%x\n", data, pci_read_config(dev, SV_PCI_DMAC, 4))); if (bootverbose) printf("Sonicvibes: revision %d.\n", sc->rev); if (pcm_register(dev, sc, 1, 1)) { device_printf(dev, "sv_attach: pcm_register fail\n"); goto fail; } pcm_addchan(dev, PCMDIR_PLAY, &svpchan_class, sc); pcm_addchan(dev, PCMDIR_REC, &svrchan_class, sc); snprintf(status, SND_STATUSLEN, "at io 0x%lx irq %ld %s", rman_get_start(sc->enh_reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_vibes)); pcm_setstatus(dev, status); DEB(printf("sv_attach: succeeded\n")); return 0; fail: if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->enh_reg) bus_release_resource(dev, sc->enh_type, sc->enh_rid, sc->enh_reg); if (sc->dmaa_reg) bus_release_resource(dev, sc->dmaa_type, sc->dmaa_rid, sc->dmaa_reg); if (sc->dmac_reg) bus_release_resource(dev, sc->dmac_type, sc->dmac_rid, sc->dmac_reg); return ENXIO; } static int sv_detach(device_t dev) { struct sc_info *sc; int r; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); sv_mix_mute_all(sc); sv_power(sc, 3); bus_dma_tag_destroy(sc->parent_dmat); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); bus_release_resource(dev, sc->enh_type, sc->enh_rid, sc->enh_reg); bus_release_resource(dev, sc->dmaa_type, sc->dmaa_rid, sc->dmaa_reg); bus_release_resource(dev, sc->dmac_type, sc->dmac_rid, sc->dmac_reg); free(sc, M_DEVBUF); return 0; } static device_method_t sc_methods[] = { DEVMETHOD(device_probe, sv_probe), DEVMETHOD(device_attach, sv_attach), DEVMETHOD(device_detach, sv_detach), DEVMETHOD(device_resume, sv_resume), DEVMETHOD(device_suspend, sv_suspend), { 0, 0 } }; static driver_t sonicvibes_driver = { "pcm", sc_methods, PCM_SOFTC_SIZE }; DRIVER_MODULE(snd_vibes, pci, sonicvibes_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_vibes, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_vibes, 1); Index: head/sys/dev/twa/tw_osl_freebsd.c =================================================================== --- head/sys/dev/twa/tw_osl_freebsd.c (revision 295789) +++ head/sys/dev/twa/tw_osl_freebsd.c (revision 295790) @@ -1,1712 +1,1712 @@ /* * Copyright (c) 2004-07 Applied Micro Circuits Corporation. * Copyright (c) 2004-05 Vinod Kashyap. * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMCC'S 3ware driver for 9000 series storage controllers. * * Author: Vinod Kashyap * Modifications by: Adam Radford * Modifications by: Manjunath Ranganathaiah */ /* * FreeBSD specific functions not related to CAM, and other * miscellaneous functions. */ #include #include #include #include #ifdef TW_OSL_DEBUG TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; #endif /* TW_OSL_DEBUG */ static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); static d_open_t twa_open; static d_close_t twa_close; static d_ioctl_t twa_ioctl; static struct cdevsw twa_cdevsw = { .d_version = D_VERSION, .d_open = twa_open, .d_close = twa_close, .d_ioctl = twa_ioctl, .d_name = "twa", }; static devclass_t twa_devclass; /* * Function name: twa_open * Description: Called when the controller is opened. * Simply marks the controller as open. * * Input: dev -- control device corresponding to the ctlr * flags -- mode of open * fmt -- device type (character/block etc.) * proc -- current process * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); tw_osli_dbg_dprintf(5, sc, "entered"); sc->open = TW_CL_TRUE; return(0); } /* * Function name: twa_close * Description: Called when the controller is closed. * Simply marks the controller as not open. * * Input: dev -- control device corresponding to the ctlr * flags -- mode of corresponding open * fmt -- device type (character/block etc.) * proc -- current process * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); tw_osli_dbg_dprintf(5, sc, "entered"); sc->open = TW_CL_FALSE; return(0); } /* * Function name: twa_ioctl * Description: Called when an ioctl is posted to the controller. * Handles any OS Layer specific cmds, passes the rest * on to the Common Layer. * * Input: dev -- control device corresponding to the ctlr * cmd -- ioctl cmd * buf -- ptr to buffer in kernel memory, which is * a copy of the input buffer in user-space * flags -- mode of corresponding open * proc -- current process * Output: buf -- ptr to buffer in kernel memory, which will * be copied to the output buffer in user-space * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); TW_INT32 error; tw_osli_dbg_dprintf(5, sc, "entered"); switch (cmd) { case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); break; case TW_OSL_IOCTL_SCAN_BUS: /* Request CAM for a bus scan. */ tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); error = tw_osli_request_bus_scan(sc); break; default: tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); break; } return(error); } static TW_INT32 twa_probe(device_t dev); static TW_INT32 twa_attach(device_t dev); static TW_INT32 twa_detach(device_t dev); static TW_INT32 twa_shutdown(device_t dev); static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op); static TW_VOID twa_pci_intr(TW_VOID *arg); static TW_VOID twa_watchdog(TW_VOID *arg); int twa_setup_intr(struct twa_softc *sc); int twa_teardown_intr(struct twa_softc *sc); static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); static TW_VOID tw_osli_free_resources(struct twa_softc *sc); static TW_VOID twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); static TW_VOID twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); static device_method_t twa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, twa_probe), DEVMETHOD(device_attach, twa_attach), DEVMETHOD(device_detach, twa_detach), DEVMETHOD(device_shutdown, twa_shutdown), DEVMETHOD_END }; static driver_t twa_pci_driver = { "twa", twa_methods, sizeof(struct twa_softc) }; DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0); MODULE_DEPEND(twa, cam, 1, 1, 1); MODULE_DEPEND(twa, pci, 1, 1, 1); /* * Function name: twa_probe * Description: Called at driver load time. Claims 9000 ctlrs. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: <= 0 -- success * > 0 -- failure */ static TW_INT32 twa_probe(device_t dev) { static TW_UINT8 first_ctlr = 1; tw_osli_dbg_printf(3, "entered"); if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { device_set_desc(dev, TW_OSLI_DEVICE_NAME); /* Print the driver version only once. */ if (first_ctlr) { printf("3ware device driver for 9000 series storage " "controllers, version: %s\n", TW_OSL_DRIVER_VERSION_STRING); first_ctlr = 0; } return(0); } return(ENXIO); } int twa_setup_intr(struct twa_softc *sc) { int error = 0; if (!(sc->intr_handle) && (sc->irq_res)) { error = bus_setup_intr(sc->bus_dev, sc->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, twa_pci_intr, sc, &sc->intr_handle); } return( error ); } int twa_teardown_intr(struct twa_softc *sc) { int error = 0; if ((sc->intr_handle) && (sc->irq_res)) { error = bus_teardown_intr(sc->bus_dev, sc->irq_res, sc->intr_handle); sc->intr_handle = NULL; } return( error ); } /* * Function name: twa_attach * Description: Allocates pci resources; updates sc; adds a node to the * sysctl tree to expose the driver version; makes calls * (to the Common Layer) to initialize ctlr, and to * attach to CAM. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_attach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 bar_num; TW_INT32 bar0_offset; TW_INT32 bar_size; TW_INT32 error; tw_osli_dbg_dprintf(3, sc, "entered"); sc->ctlr_handle.osl_ctlr_ctxt = sc; /* Initialize the softc structure. */ sc->bus_dev = dev; sc->device_id = pci_get_device(dev); /* Initialize the mutexes right here. */ sc->io_lock = &(sc->io_lock_handle); mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN); sc->q_lock = &(sc->q_lock_handle); mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN); sc->sim_lock = &(sc->sim_lock_handle); mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE); sysctl_ctx_init(&sc->sysctl_ctxt); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if (sc->sysctl_tree == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2000, "Cannot add sysctl tree node", ENXIO); return(ENXIO); } SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); /* Force the busmaster enable bit on, in case the BIOS forgot. */ pci_enable_busmaster(dev); /* Allocate the PCI register window. */ if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, &bar_num, &bar0_offset, &bar_size))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201F, "Can't get PCI BAR info", error); tw_osli_free_resources(sc); return(error); } sc->reg_res_id = PCIR_BARS + bar0_offset; - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2002, "Can't allocate register window", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Allocate and register our interrupt. */ sc->irq_res_id = 0; - if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, - &(sc->irq_res_id), 0, ~0, 1, + if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ, + &(sc->irq_res_id), RF_SHAREABLE | RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2003, "Can't allocate interrupt", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } if ((error = twa_setup_intr(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2004, "Can't set up interrupt", error); tw_osli_free_resources(sc); return(error); } if ((error = tw_osli_alloc_mem(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2005, "Memory allocation failure", error); tw_osli_free_resources(sc); return(error); } /* Initialize the Common Layer for this controller. */ if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, sc->non_dma_mem, sc->dma_mem, sc->dma_mem_phys ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2006, "Failed to initialize Common Layer/controller", error); tw_osli_free_resources(sc); return(error); } /* Create the control device. */ sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twa%d", device_get_unit(sc->bus_dev)); sc->ctrl_dev->si_drv1 = sc; if ((error = tw_osli_cam_attach(sc))) { tw_osli_free_resources(sc); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2007, "Failed to initialize CAM", error); return(error); } sc->watchdog_index = 0; callout_init(&(sc->watchdog_callout[0]), 1); callout_init(&(sc->watchdog_callout[1]), 1); callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); return(0); } static TW_VOID twa_watchdog(TW_VOID *arg) { struct tw_cl_ctlr_handle *ctlr_handle = (struct tw_cl_ctlr_handle *)arg; struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; int i; int i_need_a_reset = 0; int driver_is_active = 0; int my_watchdog_was_pending = 1234; TW_UINT64 current_time; struct tw_osli_req_context *my_req; //============================================================================== current_time = (TW_UINT64) (tw_osl_get_local_time()); for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { my_req = &(sc->req_ctx_buf[i]); if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && (my_req->deadline) && (my_req->deadline < current_time)) { tw_cl_set_reset_needed(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Request %d timed out!\n", i); #endif /* TW_OSL_DEBUG */ break; } } //============================================================================== i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); i = (int) ((sc->watchdog_index++) & 1); driver_is_active = tw_cl_is_active(ctlr_handle); if (i_need_a_reset) { #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); #endif /* TW_OSL_DEBUG */ my_watchdog_was_pending = callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); tw_cl_reset_ctlr(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); #endif /* TW_OSL_DEBUG */ } else if (driver_is_active) { my_watchdog_was_pending = callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); } #ifdef TW_OSL_DEBUG if (i_need_a_reset || my_watchdog_was_pending) device_printf((sc)->bus_dev, "i_need_a_reset = %d, " "driver_is_active = %d, my_watchdog_was_pending = %d\n", i_need_a_reset, driver_is_active, my_watchdog_was_pending); #endif /* TW_OSL_DEBUG */ } /* * Function name: tw_osli_alloc_mem * Description: Allocates memory needed both by CL and OSL. * * Input: sc -- OSL internal controller context * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc) { struct tw_osli_req_context *req; TW_UINT32 max_sg_elements; TW_UINT32 non_dma_mem_size; TW_UINT32 dma_mem_size; TW_INT32 error; TW_INT32 i; tw_osli_dbg_dprintf(3, sc, "entered"); sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; max_sg_elements = (sizeof(bus_addr_t) == 8) ? TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, &(sc->alignment), &(sc->sg_size_factor), &non_dma_mem_size, &dma_mem_size ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2008, "Can't get Common Layer's memory requirements", error); return(error); } if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2009, "Can't allocate non-dma memory", ENOMEM); return(ENOMEM); } /* Create the parent dma tag. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->parent_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200A, "Can't allocate parent DMA tag", ENOMEM); return(ENOMEM); } /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem_size, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->cmd_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200B, "Can't allocate DMA tag for Common Layer's " "DMA'able memory", ENOMEM); return(ENOMEM); } if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { /* Try a second time. */ if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200C, "Can't allocate DMA'able memory for the" "Common Layer", ENOMEM); return(ENOMEM); } } bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, dma_mem_size, twa_map_load_callback, &sc->dma_mem_phys, 0); /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ twa_busdma_lock, /* lockfunc */ sc->io_lock, /* lockfuncarg */ &sc->dma_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200F, "Can't allocate DMA tag for data buffers", ENOMEM); return(ENOMEM); } /* * Create a dma tag for ioctl data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ twa_busdma_lock, /* lockfunc */ sc->io_lock, /* lockfuncarg */ &sc->ioctl_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2010, "Can't allocate DMA tag for ioctl data buffers", ENOMEM); return(ENOMEM); } /* Create just one map for all ioctl request data buffers. */ if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2011, "Can't create ioctl map", ENOMEM); return(ENOMEM); } /* Initialize request queues. */ tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); if ((sc->req_ctx_buf = (struct tw_osli_req_context *) malloc((sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2012, "Failed to allocate request packets", ENOMEM); return(ENOMEM); } bzero(sc->req_ctx_buf, sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS); for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { req = &(sc->req_ctx_buf[i]); req->ctlr = sc; if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { tw_osli_printf(sc, "request # = %d, error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2013, "Can't create dma map", i, ENOMEM); return(ENOMEM); } /* Initialize the ioctl wakeup/ timeout mutex */ req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF); /* Insert request into the free queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); } return(0); } /* * Function name: tw_osli_free_resources * Description: Performs clean-up at the time of going down. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: None */ static TW_VOID tw_osli_free_resources(struct twa_softc *sc) { struct tw_osli_req_context *req; TW_INT32 error = 0; tw_osli_dbg_dprintf(3, sc, "entered"); /* Detach from CAM */ tw_osli_cam_detach(sc); if (sc->req_ctx_buf) while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != NULL) { mtx_destroy(req->ioctl_wake_timeout_lock); if ((error = bus_dmamap_destroy(sc->dma_tag, req->dma_map))) tw_osli_dbg_dprintf(1, sc, "dmamap_destroy(dma) returned %d", error); } if ((sc->ioctl_tag) && (sc->ioctl_map)) if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) tw_osli_dbg_dprintf(1, sc, "dmamap_destroy(ioctl) returned %d", error); /* Free all memory allocated so far. */ if (sc->req_ctx_buf) free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); if (sc->non_dma_mem) free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); if (sc->dma_mem) { bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); } if (sc->cmd_tag) if ((error = bus_dma_tag_destroy(sc->cmd_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(cmd) returned %d", error); if (sc->dma_tag) if ((error = bus_dma_tag_destroy(sc->dma_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(dma) returned %d", error); if (sc->ioctl_tag) if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(ioctl) returned %d", error); if (sc->parent_tag) if ((error = bus_dma_tag_destroy(sc->parent_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(parent) returned %d", error); /* Disconnect the interrupt handler. */ if ((error = twa_teardown_intr(sc))) tw_osli_dbg_dprintf(1, sc, "teardown_intr returned %d", error); if (sc->irq_res != NULL) if ((error = bus_release_resource(sc->bus_dev, SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) tw_osli_dbg_dprintf(1, sc, "release_resource(irq) returned %d", error); /* Release the register window mapping. */ if (sc->reg_res != NULL) if ((error = bus_release_resource(sc->bus_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) tw_osli_dbg_dprintf(1, sc, "release_resource(io) returned %d", error); /* Destroy the control device. */ if (sc->ctrl_dev != (struct cdev *)NULL) destroy_dev(sc->ctrl_dev); if ((error = sysctl_ctx_free(&sc->sysctl_ctxt))) tw_osli_dbg_dprintf(1, sc, "sysctl_ctx_free returned %d", error); } /* * Function name: twa_detach * Description: Called when the controller is being detached from * the pci bus. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_detach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 error; tw_osli_dbg_dprintf(3, sc, "entered"); error = EBUSY; if (sc->open) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2014, "Device open", error); goto out; } /* Shut the controller down. */ if ((error = twa_shutdown(dev))) goto out; /* Free all resources associated with this controller. */ tw_osli_free_resources(sc); error = 0; out: return(error); } /* * Function name: twa_shutdown * Description: Called at unload/shutdown time. Lets the controller * know that we are going down. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_shutdown(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 error = 0; tw_osli_dbg_dprintf(3, sc, "entered"); /* Disconnect interrupts. */ error = twa_teardown_intr(sc); /* Stop watchdog task. */ callout_drain(&(sc->watchdog_callout[0])); callout_drain(&(sc->watchdog_callout[1])); /* Disconnect from the controller. */ if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2015, "Failed to shutdown Common Layer/controller", error); } return(error); } /* * Function name: twa_busdma_lock * Description: Function to provide synchronization during busdma_swi. * * Input: lock_arg -- lock mutex sent as argument * op -- operation (lock/unlock) expected of the function * Output: None * Return value: None */ TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op) { struct mtx *lock; lock = (struct mtx *)lock_arg; switch (op) { case BUS_DMA_LOCK: mtx_lock_spin(lock); break; case BUS_DMA_UNLOCK: mtx_unlock_spin(lock); break; default: panic("Unknown operation 0x%x for twa_busdma_lock!", op); } } /* * Function name: twa_pci_intr * Description: Interrupt handler. Wrapper for twa_interrupt. * * Input: arg -- ptr to OSL internal ctlr context * Output: None * Return value: None */ static TW_VOID twa_pci_intr(TW_VOID *arg) { struct twa_softc *sc = (struct twa_softc *)arg; tw_osli_dbg_dprintf(10, sc, "entered"); tw_cl_interrupt(&(sc->ctlr_handle)); } /* * Function name: tw_osli_fw_passthru * Description: Builds a fw passthru cmd pkt, and submits it to CL. * * Input: sc -- ptr to OSL internal ctlr context * buf -- ptr to ioctl pkt understood by CL * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) { struct tw_osli_req_context *req; struct tw_osli_ioctl_no_data_buf *user_buf = (struct tw_osli_ioctl_no_data_buf *)buf; TW_TIME end_time; TW_UINT32 timeout = 60; TW_UINT32 data_buf_size_adjusted; struct tw_cl_req_packet *req_pkt; struct tw_cl_passthru_req_packet *pt_req; TW_INT32 error; tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); if ((req = tw_osli_get_request(sc)) == NULL) return(EBUSY); req->req_handle.osl_req_ctxt = req; req->orig_req = buf; req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; req_pkt = &(req->req_pkt); req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_passthru; /* Let the Common Layer retry the request on cmd queue full. */ req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; pt_req = &(req_pkt->gen_req_pkt.pt_req); /* * Make sure that the data buffer sent to firmware is a * 512 byte multiple in size. */ data_buf_size_adjusted = (user_buf->driver_pkt.buffer_length + (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); if ((req->length = data_buf_size_adjusted)) { if ((req->data = malloc(data_buf_size_adjusted, TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { error = ENOMEM; tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2016, "Could not alloc mem for " "fw_passthru data_buf", error); goto fw_passthru_err; } /* Copy the payload. */ if ((error = copyin((TW_VOID *)(user_buf->pdata), req->data, user_buf->driver_pkt.buffer_length)) != 0) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2017, "Could not copyin fw_passthru data_buf", error); goto fw_passthru_err; } pt_req->sgl_entries = 1; /* will be updated during mapping */ req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | TW_OSLI_REQ_FLAGS_DATA_OUT); } else pt_req->sgl_entries = 0; /* no payload */ pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); if ((error = tw_osli_map_request(req))) goto fw_passthru_err; end_time = tw_osl_get_local_time() + timeout; while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { mtx_lock(req->ioctl_wake_timeout_lock); req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0, "twa_passthru", timeout*hz); mtx_unlock(req->ioctl_wake_timeout_lock); if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) error = 0; req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; if (! error) { if (((error = req->error_code)) || ((error = (req->state != TW_OSLI_REQ_STATE_COMPLETE))) || ((error = req_pkt->status))) goto fw_passthru_err; break; } if (req_pkt->status) { error = req_pkt->status; goto fw_passthru_err; } if (error == EWOULDBLOCK) { /* Time out! */ if ((!(req->error_code)) && (req->state == TW_OSLI_REQ_STATE_COMPLETE) && (!(req_pkt->status)) ) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x7777, "FALSE Passthru timeout!", req); #endif /* TW_OSL_DEBUG */ error = 0; /* False error */ break; } if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2018, "Passthru request timed out!", req); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Passthru request timed out!\n"); #endif /* TW_OSL_DEBUG */ tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); } error = 0; end_time = tw_osl_get_local_time() + timeout; continue; /* * Don't touch req after a reset. It (and any * associated data) will be * unmapped by the callback. */ } /* * Either the request got completed, or we were woken up by a * signal. Calculate the new timeout, in case it was the latter. */ timeout = (end_time - tw_osl_get_local_time()); } /* End of while loop */ /* If there was a payload, copy it back. */ if ((!error) && (req->length)) if ((error = copyout(req->data, user_buf->pdata, user_buf->driver_pkt.buffer_length))) tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2019, "Could not copyout fw_passthru data_buf", error); fw_passthru_err: if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) error = EBUSY; user_buf->driver_pkt.os_status = error; /* Free resources. */ if (req->data) free(req->data, TW_OSLI_MALLOC_CLASS); tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); return(error); } /* * Function name: tw_osl_complete_passthru * Description: Called to complete passthru requests. * * Input: req_handle -- ptr to request handle * Output: None * Return value: None */ TW_VOID tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(&req->req_pkt); struct twa_softc *sc = req->ctlr; tw_osli_dbg_dprintf(5, sc, "entered"); if (req->state != TW_OSLI_REQ_STATE_BUSY) { tw_osli_printf(sc, "request = %p, status = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201B, "Unposted command completed!!", req, req->state); } /* * Remove request from the busy queue. Just mark it complete. * There's no need to move it into the complete queue as we are * going to be done with it right now. */ req->state = TW_OSLI_REQ_STATE_COMPLETE; tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); tw_osli_unmap_request(req); /* * Don't do a wake up if there was an error even before the request * was sent down to the Common Layer, and we hadn't gotten an * EINPROGRESS. The request originator will then be returned an * error, and he can do the clean-up. */ if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) return; if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { /* Wake up the sleeping command originator. */ tw_osli_dbg_dprintf(5, sc, "Waking up originator of request %p", req); req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; wakeup_one(req); } else { /* * If the request completed even before mtx_sleep * was called, simply return. */ if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) return; if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) return; tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201C, "Passthru callback called, " "and caller not sleeping", req); } } else { tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201D, "Passthru callback called for non-passthru request", req); } } /* * Function name: tw_osli_get_request * Description: Gets a request pkt from the free queue. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: ptr to request pkt -- success * NULL -- failure */ struct tw_osli_req_context * tw_osli_get_request(struct twa_softc *sc) { struct tw_osli_req_context *req; tw_osli_dbg_dprintf(4, sc, "entered"); /* Get a free request packet. */ req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); /* Initialize some fields to their defaults. */ if (req) { req->req_handle.osl_req_ctxt = NULL; req->req_handle.cl_req_ctxt = NULL; req->req_handle.is_io = 0; req->data = NULL; req->length = 0; req->deadline = 0; req->real_data = NULL; req->real_length = 0; req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ req->flags = 0; req->error_code = 0; req->orig_req = NULL; bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); } return(req); } /* * Function name: twa_map_load_data_callback * Description: Callback of bus_dmamap_load for the buffer associated * with data. Updates the cmd pkt (size/sgl_entries * fields, as applicable) to reflect the number of sg * elements. * * Input: arg -- ptr to OSL internal request context * segs -- ptr to a list of segment descriptors * nsegments--# of segments * error -- 0 if no errors encountered before callback, * non-zero if errors were encountered * Output: None * Return value: None */ static TW_VOID twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error) { struct tw_osli_req_context *req = (struct tw_osli_req_context *)arg; struct twa_softc *sc = req->ctlr; struct tw_cl_req_packet *req_pkt = &(req->req_pkt); tw_osli_dbg_dprintf(10, sc, "entered"); if (error == EINVAL) { req->error_code = error; return; } /* Mark the request as currently being processed. */ req->state = TW_OSLI_REQ_STATE_BUSY; /* Move the request into the busy queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; if (error == EFBIG) { req->error_code = error; goto out; } if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { struct tw_cl_passthru_req_packet *pt_req; if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_PREREAD); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { /* * If we're using an alignment buffer, and we're * writing data, copy the real data out. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->real_data, req->data, req->real_length); bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_PREWRITE); } pt_req = &(req_pkt->gen_req_pkt.pt_req); pt_req->sg_list = (TW_UINT8 *)segs; pt_req->sgl_entries += (nsegments - 1); error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, &(req->req_handle)); } else { struct tw_cl_scsi_req_packet *scsi_req; if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_PREREAD); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { /* * If we're using an alignment buffer, and we're * writing data, copy the real data out. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->real_data, req->data, req->real_length); bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_PREWRITE); } scsi_req = &(req_pkt->gen_req_pkt.scsi_req); scsi_req->sg_list = (TW_UINT8 *)segs; scsi_req->sgl_entries += (nsegments - 1); error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, &(req->req_handle)); } out: if (error) { req->error_code = error; req_pkt->tw_osl_callback(&(req->req_handle)); /* * If the caller had been returned EINPROGRESS, and he has * registered a callback for handling completion, the callback * will never get called because we were unable to submit the * request. So, free up the request right here. */ if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); } } /* * Function name: twa_map_load_callback * Description: Callback of bus_dmamap_load for the buffer associated * with a cmd pkt. * * Input: arg -- ptr to variable to hold phys addr * segs -- ptr to a list of segment descriptors * nsegments--# of segments * error -- 0 if no errors encountered before callback, * non-zero if errors were encountered * Output: None * Return value: None */ static TW_VOID twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error) { *((bus_addr_t *)arg) = segs[0].ds_addr; } /* * Function name: tw_osli_map_request * Description: Maps a cmd pkt and data associated with it, into * DMA'able memory. * * Input: req -- ptr to request pkt * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_map_request(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; TW_INT32 error = 0; tw_osli_dbg_dprintf(10, sc, "entered"); /* If the command involves data, map that too. */ if (req->data != NULL) { /* * It's sufficient for the data pointer to be 4-byte aligned * to work with 9000. However, if 4-byte aligned addresses * are passed to bus_dmamap_load, we can get back sg elements * that are not 512-byte multiples in size. So, we will let * only those buffers that are 512-byte aligned to pass * through, and bounce the rest, so as to make sure that we * always get back sg elements that are 512-byte multiples * in size. */ if (((vm_offset_t)req->data % sc->sg_size_factor) || (req->length % sc->sg_size_factor)) { req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; /* Save original data pointer and length. */ req->real_data = req->data; req->real_length = req->length; req->length = (req->length + (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS, M_NOWAIT); if (req->data == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201E, "Failed to allocate memory " "for bounce buffer", ENOMEM); /* Restore original data pointer and length. */ req->data = req->real_data; req->length = req->real_length; return(ENOMEM); } } /* * Map the data buffer into bus space and build the SG list. */ if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { /* Lock against multiple simultaneous ioctl calls. */ mtx_lock_spin(sc->io_lock); error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, req->data, req->length, twa_map_load_data_callback, req, BUS_DMA_WAITOK); mtx_unlock_spin(sc->io_lock); } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) { error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map, req->orig_req, twa_map_load_data_callback, req, BUS_DMA_WAITOK); } else { /* * There's only one CAM I/O thread running at a time. * So, there's no need to hold the io_lock. */ error = bus_dmamap_load(sc->dma_tag, req->dma_map, req->data, req->length, twa_map_load_data_callback, req, BUS_DMA_WAITOK); } if (!error) error = req->error_code; else { if (error == EINPROGRESS) { /* * Specifying sc->io_lock as the lockfuncarg * in ...tag_create should protect the access * of ...FLAGS_MAPPED from the callback. */ mtx_lock_spin(sc->io_lock); if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS; tw_osli_disallow_new_requests(sc, &(req->req_handle)); mtx_unlock_spin(sc->io_lock); error = 0; } else { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x9999, "Failed to map DMA memory " "for I/O request", error); req->flags |= TW_OSLI_REQ_FLAGS_FAILED; /* Free alignment buffer if it was used. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { free(req->data, TW_OSLI_MALLOC_CLASS); /* * Restore original data pointer * and length. */ req->data = req->real_data; req->length = req->real_length; } } } } else { /* Mark the request as currently being processed. */ req->state = TW_OSLI_REQ_STATE_BUSY; /* Move the request into the busy queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) error = tw_cl_fw_passthru(&sc->ctlr_handle, &(req->req_pkt), &(req->req_handle)); else error = tw_cl_start_io(&sc->ctlr_handle, &(req->req_pkt), &(req->req_handle)); if (error) { req->error_code = error; req->req_pkt.tw_osl_callback(&(req->req_handle)); } } return(error); } /* * Function name: tw_osli_unmap_request * Description: Undoes the mapping done by tw_osli_map_request. * * Input: req -- ptr to request pkt * Output: None * Return value: None */ TW_VOID tw_osli_unmap_request(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; tw_osli_dbg_dprintf(10, sc, "entered"); /* If the command involved data, unmap that too. */ if (req->data != NULL) { if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { /* Lock against multiple simultaneous ioctl calls. */ mtx_lock_spin(sc->io_lock); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_POSTREAD); /* * If we are using a bounce buffer, and we are * reading data, copy the real data in. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->data, req->real_data, req->real_length); } if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map); mtx_unlock_spin(sc->io_lock); } else { if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_POSTREAD); /* * If we are using a bounce buffer, and we are * reading data, copy the real data in. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->data, req->real_data, req->real_length); } if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dma_tag, req->dma_map); } } /* Free alignment buffer if it was used. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { free(req->data, TW_OSLI_MALLOC_CLASS); /* Restore original data pointer and length. */ req->data = req->real_data; req->length = req->real_length; } } #ifdef TW_OSL_DEBUG TW_VOID twa_report_stats(TW_VOID); TW_VOID twa_reset_stats(TW_VOID); TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc); TW_VOID twa_print_req_info(struct tw_osli_req_context *req); /* * Function name: twa_report_stats * Description: For being called from ddb. Calls functions that print * OSL and CL internal stats for the controller. * * Input: None * Output: None * Return value: None */ TW_VOID twa_report_stats(TW_VOID) { struct twa_softc *sc; TW_INT32 i; for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { tw_osli_print_ctlr_stats(sc); tw_cl_print_ctlr_stats(&sc->ctlr_handle); } } /* * Function name: tw_osli_print_ctlr_stats * Description: For being called from ddb. Prints OSL controller stats * * Input: sc -- ptr to OSL internal controller context * Output: None * Return value: None */ TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc) { twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc); twa_printf(sc, "OSLq type current max\n"); twa_printf(sc, "free %04d %04d\n", sc->q_stats[TW_OSLI_FREE_Q].cur_len, sc->q_stats[TW_OSLI_FREE_Q].max_len); twa_printf(sc, "busy %04d %04d\n", sc->q_stats[TW_OSLI_BUSY_Q].cur_len, sc->q_stats[TW_OSLI_BUSY_Q].max_len); } /* * Function name: twa_print_req_info * Description: For being called from ddb. Calls functions that print * OSL and CL internal details for the request. * * Input: req -- ptr to OSL internal request context * Output: None * Return value: None */ TW_VOID twa_print_req_info(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; twa_printf(sc, "OSL details for request:\n"); twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n" "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n" "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n" "next_req = %p, prev_req = %p, dma_map = %p\n", req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt, req->data, req->length, req->real_data, req->real_length, req->state, req->flags, req->error_code, req->orig_req, req->link.next, req->link.prev, req->dma_map); tw_cl_print_req_info(&(req->req_handle)); } /* * Function name: twa_reset_stats * Description: For being called from ddb. * Resets some OSL controller stats. * * Input: None * Output: None * Return value: None */ TW_VOID twa_reset_stats(TW_VOID) { struct twa_softc *sc; TW_INT32 i; for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { sc->q_stats[TW_OSLI_FREE_Q].max_len = 0; sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0; tw_cl_reset_stats(&sc->ctlr_handle); } } #endif /* TW_OSL_DEBUG */ Index: head/sys/dev/tws/tws.c =================================================================== --- head/sys/dev/tws/tws.c (revision 295789) +++ head/sys/dev/tws/tws.c (revision 295790) @@ -1,922 +1,922 @@ /* * Copyright (c) 2010, LSI Corp. * All rights reserved. * Author : Manjunath Ranganathaiah * Support: freebsdraid@lsi.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of the nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver"); int tws_queue_depth = TWS_MAX_REQS; int tws_enable_msi = 0; int tws_enable_msix = 0; /* externs */ extern int tws_cam_attach(struct tws_softc *sc); extern void tws_cam_detach(struct tws_softc *sc); extern int tws_init_ctlr(struct tws_softc *sc); extern boolean tws_ctlr_ready(struct tws_softc *sc); extern void tws_turn_off_interrupts(struct tws_softc *sc); extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_request(struct tws_softc *sc, struct tws_request *req, u_int8_t q_type ); extern struct tws_request *tws_q_remove_head(struct tws_softc *sc, u_int8_t q_type ); extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id); extern boolean tws_ctlr_reset(struct tws_softc *sc); extern void tws_intr(void *arg); extern int tws_use_32bit_sgls; struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type); int tws_init_connect(struct tws_softc *sc, u_int16_t mc); void tws_send_event(struct tws_softc *sc, u_int8_t event); uint8_t tws_get_state(struct tws_softc *sc); void tws_release_request(struct tws_request *req); /* Function prototypes */ static d_open_t tws_open; static d_close_t tws_close; static d_read_t tws_read; static d_write_t tws_write; extern d_ioctl_t tws_ioctl; static int tws_init(struct tws_softc *sc); static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size); static int tws_init_aen_q(struct tws_softc *sc); static int tws_init_trace_q(struct tws_softc *sc); static int tws_setup_irq(struct tws_softc *sc); int tws_setup_intr(struct tws_softc *sc, int irqs); int tws_teardown_intr(struct tws_softc *sc); /* Character device entry points */ static struct cdevsw tws_cdevsw = { .d_version = D_VERSION, .d_open = tws_open, .d_close = tws_close, .d_read = tws_read, .d_write = tws_write, .d_ioctl = tws_ioctl, .d_name = "tws", }; /* * In the cdevsw routines, we find our softc by using the si_drv1 member * of struct cdev. We set this variable to point to our softc in our * attach routine when we create the /dev entry. */ int tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, oflags); return (0); } int tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, fflag); return (0); } int tws_read(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } int tws_write(struct cdev *dev, struct uio *uio, int ioflag) { struct tws_softc *sc = dev->si_drv1; if ( sc ) TWS_TRACE_DEBUG(sc, "entry", dev, ioflag); return (0); } /* PCI Support Functions */ /* * Compare the device ID of this device against the IDs that this driver * supports. If there is a match, set the description and return success. */ static int tws_probe(device_t dev) { static u_int8_t first_ctlr = 1; if ((pci_get_vendor(dev) == TWS_VENDOR_ID) && (pci_get_device(dev) == TWS_DEVICE_ID)) { device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller"); if (first_ctlr) { printf("LSI 3ware device driver for SAS/SATA storage " "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING); first_ctlr = 0; } return(BUS_PROBE_DEFAULT); } return (ENXIO); } /* Attach function is only called if the probe is successful. */ static int tws_attach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); u_int32_t bar; int error=0,i; /* no tracing yet */ /* Look up our softc and initialize its fields. */ sc->tws_dev = dev; sc->device_id = pci_get_device(dev); sc->subvendor_id = pci_get_subvendor(dev); sc->subdevice_id = pci_get_subdevice(dev); /* Intialize mutexes */ mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); callout_init(&sc->stats_timer, 1); if ( tws_init_trace_q(sc) == FAILURE ) printf("trace init failure\n"); /* send init event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_START); mtx_unlock(&sc->gen_lock); #if _BYTE_ORDER == _BIG_ENDIAN TWS_TRACE(sc, "BIG endian", 0, 0); #endif /* sysctl context setup */ sysctl_ctx_init(&sc->tws_clist); sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if ( sc->tws_oidp == NULL ) { tws_log(sc, SYSCTL_TREE_NODE_ADD); goto attach_fail_1; } SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), OID_AUTO, "driver_version", CTLFLAG_RD, TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); pci_enable_busmaster(dev); bar = pci_read_config(dev, TWS_PCI_BAR0, 4); TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); bar = pci_read_config(dev, TWS_PCI_BAR1, 4); bar = bar & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); /* MFA base address is BAR2 register used for * push mode. Firmware will evatualy move to * pull mode during witch this needs to change */ #ifndef TWS_PULL_MODE_ENABLE sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); sc->mfa_base = sc->mfa_base & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); #endif /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; } sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); #endif /* Allocate and register our interrupt. */ sc->intr_type = TWS_INTx; /* default */ if ( tws_enable_msi ) sc->intr_type = TWS_MSI; if ( tws_setup_irq(sc) == FAILURE ) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_3; } /* * Create a /dev entry for this device. The kernel will assign us * a major number automatically. We use the unit number of this * device as the minor number and name the character device * "tws". */ sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", device_get_unit(dev)); sc->tws_cdev->si_drv1 = sc; if ( tws_init(sc) == FAILURE ) { tws_log(sc, TWS_INIT_FAILURE); goto attach_fail_4; } if ( tws_init_ctlr(sc) == FAILURE ) { tws_log(sc, TWS_CTLR_INIT_FAILURE); goto attach_fail_4; } if ((error = tws_cam_attach(sc))) { tws_log(sc, TWS_CAM_ATTACH); goto attach_fail_4; } /* send init complete event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_COMPLETE); mtx_unlock(&sc->gen_lock); TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); return(0); attach_fail_4: tws_teardown_intr(sc); destroy_dev(sc->tws_cdev); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); attach_fail_3: for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus irq res", 0, 0); } } #ifndef TWS_PULL_MODE_ENABLE attach_fail_2: #endif if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); } attach_fail_1: mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); sysctl_ctx_free(&sc->tws_clist); return (ENXIO); } /* Detach device. */ static int tws_detach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); int i; u_int32_t reg; TWS_TRACE_DEBUG(sc, "entry", 0, 0); mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_UNINIT_START); mtx_unlock(&sc->gen_lock); /* needs to disable interrupt before detaching from cam */ tws_turn_off_interrupts(sc); /* clear door bell */ tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4); TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0); sc->obfl_q_overrun = false; tws_init_connect(sc, 1); /* Teardown the state in our softc created in our attach routine. */ /* Disconnect the interrupt handler. */ tws_teardown_intr(sc); /* Release irq resource */ for(i=0;iirqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus release irq resource", i, sc->irq_res_id[i]); } } if ( sc->intr_type == TWS_MSI ) { pci_release_msi(sc->tws_dev); } tws_cam_detach(sc); if (sc->dma_mem_phys) bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); if (sc->dma_mem) bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); if (sc->cmd_tag) bus_dma_tag_destroy(sc->cmd_tag); /* Release memory resource */ if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id); } for ( i=0; i< tws_queue_depth; i++) { if (sc->reqs[i].dma_map) bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map); callout_drain(&sc->reqs[i].timeout); } callout_drain(&sc->stats_timer); free(sc->reqs, M_TWS); free(sc->sense_bufs, M_TWS); free(sc->scan_ccb, M_TWS); if (sc->ioctl_data_mem) bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map); if (sc->data_tag) bus_dma_tag_destroy(sc->data_tag); free(sc->aen_q.q, M_TWS); free(sc->trace_q.q, M_TWS); mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); destroy_dev(sc->tws_cdev); sysctl_ctx_free(&sc->tws_clist); return (0); } int tws_setup_intr(struct tws_softc *sc, int irqs) { int i, error; for(i=0;iintr_handle[i])) { if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i], INTR_TYPE_CAM | INTR_MPSAFE, #if (__FreeBSD_version >= 700000) NULL, #endif tws_intr, sc, &sc->intr_handle[i]))) { tws_log(sc, SETUP_INTR_RES); return(FAILURE); } } } return(SUCCESS); } int tws_teardown_intr(struct tws_softc *sc) { int i, error; for(i=0;iirqs;i++) { if (sc->intr_handle[i]) { error = bus_teardown_intr(sc->tws_dev, sc->irq_res[i], sc->intr_handle[i]); sc->intr_handle[i] = NULL; } } return(SUCCESS); } static int tws_setup_irq(struct tws_softc *sc) { int messages; switch(sc->intr_type) { case TWS_INTx : sc->irqs = 1; sc->irq_res_id[0] = 0; sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( ! sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using legacy INTx\n"); break; case TWS_MSI : sc->irqs = 1; sc->irq_res_id[0] = 1; messages = 1; if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) { TWS_TRACE(sc, "pci alloc msi fail", 0, messages); return(FAILURE); } sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( !sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using MSI\n"); break; } return(SUCCESS); } static int tws_init(struct tws_softc *sc) { u_int32_t max_sg_elements; u_int32_t dma_mem_size; int error; u_int32_t reg; sc->seq_id = 0; if ( tws_queue_depth > TWS_MAX_REQS ) tws_queue_depth = TWS_MAX_REQS; if (tws_queue_depth < TWS_RESERVED_REQS+1) tws_queue_depth = TWS_RESERVED_REQS+1; sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false; max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? TWS_MAX_64BIT_SG_ELEMENTS : TWS_MAX_32BIT_SG_ELEMENTS; dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) + (TWS_SECTOR_SIZE) ; if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ max_sg_elements, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->parent_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* In bound message frame requires 16byte alignment. * Outbound MF's can live with 4byte alignment - for now just * use 16 for both. */ if ( bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_IN_MF_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem_size, /* maxsize */ 1, /* numsegs */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->cmd_tag /* tag */ )) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit); return(ENOMEM); } /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */ sc->dma_mem_phys=0; error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, dma_mem_size, tws_dmamap_cmds_load_cbfn, &sc->dma_mem_phys, 0); /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ TWS_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TWS_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TWS_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->io_lock, /* lockfuncarg */ &sc->data_tag /* tag */)) { TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit); return(ENOMEM); } sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); if ( sc->reqs == NULL ) { TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit); return(ENOMEM); } sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS, M_WAITOK | M_ZERO); if ( sc->sense_bufs == NULL ) { TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit); return(ENOMEM); } sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO); if ( sc->scan_ccb == NULL ) { TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit); return(ENOMEM); } if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) { device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n"); return(ENOMEM); } if ( !tws_ctlr_ready(sc) ) if( !tws_ctlr_reset(sc) ) return(FAILURE); bzero(&sc->stats, sizeof(struct tws_stats)); tws_init_qs(sc); tws_turn_off_interrupts(sc); /* * enable pull mode by setting bit1 . * setting bit0 to 1 will enable interrupt coalesing * will revisit. */ #ifdef TWS_PULL_MODE_ENABLE reg = tws_read_reg(sc, TWS_I2O0_CTL, 4); TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL); tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4); #endif TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL); if ( tws_init_reqs(sc, dma_mem_size) == FAILURE ) return(FAILURE); if ( tws_init_aen_q(sc) == FAILURE ) return(FAILURE); return(SUCCESS); } static int tws_init_aen_q(struct tws_softc *sc) { sc->aen_q.head=0; sc->aen_q.tail=0; sc->aen_q.depth=256; sc->aen_q.overflow=0; sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth, M_TWS, M_WAITOK | M_ZERO); if ( ! sc->aen_q.q ) return(FAILURE); return(SUCCESS); } static int tws_init_trace_q(struct tws_softc *sc) { sc->trace_q.head=0; sc->trace_q.tail=0; sc->trace_q.depth=256; sc->trace_q.overflow=0; sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth, M_TWS, M_WAITOK | M_ZERO); if ( ! sc->trace_q.q ) return(FAILURE); return(SUCCESS); } static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size) { struct tws_command_packet *cmd_buf; cmd_buf = (struct tws_command_packet *)sc->dma_mem; int i; bzero(cmd_buf, dma_mem_size); TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0); mtx_lock(&sc->q_lock); for ( i=0; i< tws_queue_depth; i++) { if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) { /* log a ENOMEM failure msg here */ mtx_unlock(&sc->q_lock); return(FAILURE); } sc->reqs[i].cmd_pkt = &cmd_buf[i]; sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ; sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys + (i * sizeof(struct tws_command_packet)); sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys + sizeof(struct tws_command_header) + (i * sizeof(struct tws_command_packet)); sc->reqs[i].request_id = i; sc->reqs[i].sc = sc; sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128; callout_init(&sc->reqs[i].timeout, 1); sc->reqs[i].state = TWS_REQ_STATE_FREE; if ( i >= TWS_RESERVED_REQS ) tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q); } mtx_unlock(&sc->q_lock); return(SUCCESS); } static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs, int nseg, int error) { /* printf("command load done \n"); */ *((bus_addr_t *)arg) = segs[0].ds_addr; } void tws_send_event(struct tws_softc *sc, u_int8_t event) { mtx_assert(&sc->gen_lock, MA_OWNED); TWS_TRACE_DEBUG(sc, "received event ", 0, event); switch (event) { case TWS_INIT_START: sc->tws_state = TWS_INIT; break; case TWS_INIT_COMPLETE: if (sc->tws_state != TWS_INIT) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state); } else { sc->tws_state = TWS_ONLINE; } break; case TWS_RESET_START: /* We can transition to reset state from any state except reset*/ if (sc->tws_state != TWS_RESET) { sc->tws_prev_state = sc->tws_state; sc->tws_state = TWS_RESET; } break; case TWS_RESET_COMPLETE: if (sc->tws_state != TWS_RESET) { device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state); } else { sc->tws_state = sc->tws_prev_state; } break; case TWS_SCAN_FAILURE: if (sc->tws_state != TWS_ONLINE) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state); } else { sc->tws_state = TWS_OFFLINE; } break; case TWS_UNINIT_START: if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) { device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state); } else { sc->tws_state = TWS_UNINIT; } break; } } uint8_t tws_get_state(struct tws_softc *sc) { return((u_int8_t)sc->tws_state); } /* Called during system shutdown after sync. */ static int tws_shutdown(device_t dev) { struct tws_softc *sc = device_get_softc(dev); TWS_TRACE_DEBUG(sc, "entry", 0, 0); tws_turn_off_interrupts(sc); tws_init_connect(sc, 1); return (0); } /* * Device suspend routine. */ static int tws_suspend(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } /* * Device resume routine. */ static int tws_resume(device_t dev) { struct tws_softc *sc = device_get_softc(dev); if ( sc ) TWS_TRACE_DEBUG(sc, "entry", 0, 0); return (0); } struct tws_request * tws_get_request(struct tws_softc *sc, u_int16_t type) { struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock); struct tws_request *r = NULL; mtx_lock(my_mutex); if (type == TWS_REQ_TYPE_SCSI_IO) { r = tws_q_remove_head(sc, TWS_FREE_Q); } else { if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) { r = &sc->reqs[type]; } } if ( r ) { bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache)); r->data = NULL; r->length = 0; r->type = type; r->flags = TWS_DIR_UNKNOWN; r->error_code = TWS_REQ_RET_INVALID; r->cb = NULL; r->ccb_ptr = NULL; callout_stop(&r->timeout); r->next = r->prev = NULL; r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY); } mtx_unlock(my_mutex); return(r); } void tws_release_request(struct tws_request *req) { struct tws_softc *sc = req->sc; TWS_TRACE_DEBUG(sc, "entry", sc, 0); mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_FREE_Q); mtx_unlock(&sc->q_lock); } static device_method_t tws_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tws_probe), DEVMETHOD(device_attach, tws_attach), DEVMETHOD(device_detach, tws_detach), DEVMETHOD(device_shutdown, tws_shutdown), DEVMETHOD(device_suspend, tws_suspend), DEVMETHOD(device_resume, tws_resume), DEVMETHOD_END }; static driver_t tws_driver = { "tws", tws_methods, sizeof(struct tws_softc) }; static devclass_t tws_devclass; /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */ DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0); MODULE_DEPEND(tws, cam, 1, 1, 1); MODULE_DEPEND(tws, pci, 1, 1, 1); TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth); TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi); Index: head/sys/isa/isa_common.c =================================================================== --- head/sys/isa/isa_common.c (revision 295789) +++ head/sys/isa/isa_common.c (revision 295790) @@ -1,1125 +1,1125 @@ /*- * Copyright (c) 1999 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Modifications for Intel architecture by Garrett A. Wollman. * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Parts of the ISA bus implementation common to all architectures. */ #include __FBSDID("$FreeBSD$"); #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include static int isa_print_child(device_t bus, device_t dev); static MALLOC_DEFINE(M_ISADEV, "isadev", "ISA device"); static int isa_running; /* * At 'probe' time, we add all the devices which we know about to the * bus. The generic attach routine will probe and attach them if they * are alive. */ static int isa_probe(device_t dev) { device_set_desc(dev, "ISA bus"); isa_init(dev); /* Allow machdep code to initialise */ return (0); } extern device_t isa_bus_device; static int isa_attach(device_t dev) { /* * Arrange for isa_probe_children(dev) to be called later. XXX */ isa_bus_device = dev; return (0); } /* * Find a working set of memory regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_memory(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NMEM]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NMEM; i++) { bus_delete_resource(child, SYS_RES_MEMORY, i); res[i] = NULL; } success = 1; result->ic_nmem = config->ic_nmem; for (i = 0; i < config->ic_nmem; i++) { uint32_t start, end, size, align; size = config->ic_mem[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_mem[i].ir_start = 0; result->ic_mem[i].ir_end = 0; result->ic_mem[i].ir_size = 0; result->ic_mem[i].ir_align = 0; continue; } for (start = config->ic_mem[i].ir_start, end = config->ic_mem[i].ir_end, align = config->ic_mem[i].ir_align; start + size - 1 <= end && start + size > start; start += MAX(align, 1)) { bus_set_resource(child, SYS_RES_MEMORY, i, start, size); - res[i] = bus_alloc_resource(child, - SYS_RES_MEMORY, &i, 0, ~0, 1, + res[i] = bus_alloc_resource_any(child, + SYS_RES_MEMORY, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_mem[i].ir_start = start; result->ic_mem[i].ir_end = start + size - 1; result->ic_mem[i].ir_size = size; result->ic_mem[i].ir_align = align; break; } } /* * If we didn't find a place for memory range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NMEM; i++) { if (res[i]) bus_release_resource(child, SYS_RES_MEMORY, i, res[i]); } return (success); } /* * Find a working set of port regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_port(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NPORT]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NPORT; i++) { bus_delete_resource(child, SYS_RES_IOPORT, i); res[i] = NULL; } success = 1; result->ic_nport = config->ic_nport; for (i = 0; i < config->ic_nport; i++) { uint32_t start, end, size, align; size = config->ic_port[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_port[i].ir_start = 0; result->ic_port[i].ir_end = 0; result->ic_port[i].ir_size = 0; result->ic_port[i].ir_align = 0; continue; } for (start = config->ic_port[i].ir_start, end = config->ic_port[i].ir_end, align = config->ic_port[i].ir_align; start + size - 1 <= end; start += align) { bus_set_resource(child, SYS_RES_IOPORT, i, start, size); - res[i] = bus_alloc_resource(child, - SYS_RES_IOPORT, &i, 0, ~0, 1, + res[i] = bus_alloc_resource_any(child, + SYS_RES_IOPORT, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_port[i].ir_start = start; result->ic_port[i].ir_end = start + size - 1; result->ic_port[i].ir_size = size; result->ic_port[i].ir_align = align; break; } } /* * If we didn't find a place for port range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NPORT; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IOPORT, i, res[i]); } return success; } /* * Return the index of the first bit in the mask (or -1 if mask is empty. */ static int find_first_bit(uint32_t mask) { return (ffs(mask) - 1); } /* * Return the index of the next bit in the mask, or -1 if there are no more. */ static int find_next_bit(uint32_t mask, int bit) { bit++; while (bit < 32 && !(mask & (1 << bit))) bit++; if (bit != 32) return (bit); return (-1); } /* * Find a working set of irqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * irqs was found. */ static int isa_find_irq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NIRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NIRQ; i++) { bus_delete_resource(child, SYS_RES_IRQ, i); res[i] = NULL; } success = 1; result->ic_nirq = config->ic_nirq; for (i = 0; i < config->ic_nirq; i++) { uint32_t mask = config->ic_irqmask[i]; int irq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_irqmask[i] = 0; continue; } for (irq = find_first_bit(mask); irq != -1; irq = find_next_bit(mask, irq)) { bus_set_resource(child, SYS_RES_IRQ, i, irq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_IRQ, &i, 0 /* !RF_ACTIVE */ ); if (res[i]) { result->ic_irqmask[i] = (1 << irq); break; } } /* * If we didn't find a place for irq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NIRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IRQ, i, res[i]); } return (success); } /* * Find a working set of drqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * drqs was found. */ static int isa_find_drq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NDRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NDRQ; i++) { bus_delete_resource(child, SYS_RES_DRQ, i); res[i] = NULL; } success = 1; result->ic_ndrq = config->ic_ndrq; for (i = 0; i < config->ic_ndrq; i++) { uint32_t mask = config->ic_drqmask[i]; int drq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_drqmask[i] = 0; continue; } for (drq = find_first_bit(mask); drq != -1; drq = find_next_bit(mask, drq)) { bus_set_resource(child, SYS_RES_DRQ, i, drq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_DRQ, &i, 0 /* !RF_ACTIVE */); if (res[i]) { result->ic_drqmask[i] = (1 << drq); break; } } /* * If we didn't find a place for drq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NDRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_DRQ, i, res[i]); } return (success); } /* * Attempt to find a working set of resources for a device. Return * non-zero if a working configuration is found. */ static int isa_assign_resources(device_t child) { struct isa_device *idev = DEVTOISA(child); struct isa_config_entry *ice; struct isa_config *cfg; const char *reason; reason = "Empty ISA id_configs"; cfg = malloc(sizeof(struct isa_config), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) return(0); TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { reason = "memory"; if (!isa_find_memory(child, &ice->ice_config, cfg)) continue; reason = "port"; if (!isa_find_port(child, &ice->ice_config, cfg)) continue; reason = "irq"; if (!isa_find_irq(child, &ice->ice_config, cfg)) continue; reason = "drq"; if (!isa_find_drq(child, &ice->ice_config, cfg)) continue; /* * A working configuration was found enable the device * with this configuration. */ reason = "no callback"; if (idev->id_config_cb) { idev->id_config_cb(idev->id_config_arg, cfg, 1); free(cfg, M_TEMP); return (1); } } /* * Disable the device. */ bus_print_child_header(device_get_parent(child), child); printf(" can't assign resources (%s)\n", reason); if (bootverbose) isa_print_child(device_get_parent(child), child); bzero(cfg, sizeof (*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); device_disable(child); free(cfg, M_TEMP); return (0); } /* * Claim any unallocated resources to keep other devices from using * them. */ static void isa_claim_resources(device_t dev, device_t child) { struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; int rid; STAILQ_FOREACH(rle, rl, link) { if (!rle->res) { rid = rle->rid; resource_list_alloc(rl, dev, child, rle->type, &rid, 0ul, ~0ul, 1, 0); } } } /* * Called after other devices have initialised to probe for isa devices. */ void isa_probe_children(device_t dev) { struct isa_device *idev; device_t *children, child; struct isa_config *cfg; int nchildren, i; /* * Create all the non-hinted children by calling drivers' * identify methods. */ bus_generic_probe(dev); if (device_get_children(dev, &children, &nchildren)) return; /* * First disable all pnp devices so that they don't get * matched by legacy probes. */ if (bootverbose) printf("isa_probe_children: disabling PnP devices\n"); cfg = malloc(sizeof(*cfg), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) { free(children, M_TEMP); return; } for (i = 0; i < nchildren; i++) { idev = DEVTOISA(children[i]); bzero(cfg, sizeof(*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); } free(cfg, M_TEMP); /* * Next, probe all the PnP BIOS devices so they can subsume any * hints. */ for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (idev->id_order > ISA_ORDER_PNPBIOS) continue; if (!TAILQ_EMPTY(&idev->id_configs) && !isa_assign_resources(child)) continue; if (device_probe_and_attach(child) == 0) isa_claim_resources(dev, child); } free(children, M_TEMP); /* * Next, enumerate hinted devices and probe all non-pnp devices so * that they claim their resources first. */ bus_enumerate_hinted_children(dev); if (device_get_children(dev, &children, &nchildren)) return; if (bootverbose) printf("isa_probe_children: probing non-PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || !TAILQ_EMPTY(&idev->id_configs)) continue; device_probe_and_attach(child); } /* * Finally assign resource to pnp devices and probe them. */ if (bootverbose) printf("isa_probe_children: probing PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || TAILQ_EMPTY(&idev->id_configs)) continue; if (isa_assign_resources(child)) { device_probe_and_attach(child); isa_claim_resources(dev, child); } } free(children, M_TEMP); isa_running = 1; } /* * Add a new child with default ivars. */ static device_t isa_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct isa_device *idev; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); idev = malloc(sizeof(struct isa_device), M_ISADEV, M_NOWAIT | M_ZERO); if (!idev) return (0); resource_list_init(&idev->id_resources); TAILQ_INIT(&idev->id_configs); idev->id_order = order; device_set_ivars(child, idev); return (child); } static int isa_print_all_resources(device_t dev) { struct isa_device *idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; int retval = 0; if (STAILQ_FIRST(rl) || device_get_flags(dev)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%ld"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); #ifdef ISAPNP if (idev->id_vendorid) retval += printf(" pnpid %s", pnp_eisaformat(idev->id_vendorid)); #endif return (retval); } static int isa_print_child(device_t bus, device_t dev) { int retval = 0; retval += bus_print_child_header(bus, dev); retval += isa_print_all_resources(dev); retval += bus_print_child_footer(bus, dev); return (retval); } static void isa_probe_nomatch(device_t dev, device_t child) { if (bootverbose) { bus_print_child_header(dev, child); printf(" failed to probe"); isa_print_all_resources(child); bus_print_child_footer(dev, child); } return; } static int isa_read_ivar(device_t bus, device_t dev, int index, uintptr_t * result) { struct isa_device* idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; switch (index) { case ISA_IVAR_PORT_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORT_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORTSIZE_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_PORTSIZE_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MADDR_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MADDR_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MEMSIZE_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MEMSIZE_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_IRQ_0: rle = resource_list_find(rl, SYS_RES_IRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_IRQ_1: rle = resource_list_find(rl, SYS_RES_IRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_0: rle = resource_list_find(rl, SYS_RES_DRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_1: rle = resource_list_find(rl, SYS_RES_DRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_VENDORID: *result = idev->id_vendorid; break; case ISA_IVAR_SERIAL: *result = idev->id_serial; break; case ISA_IVAR_LOGICALID: *result = idev->id_logicalid; break; case ISA_IVAR_COMPATID: *result = idev->id_compatid; break; case ISA_IVAR_CONFIGATTR: *result = idev->id_config_attr; break; case ISA_IVAR_PNP_CSN: *result = idev->id_pnp_csn; break; case ISA_IVAR_PNP_LDN: *result = idev->id_pnp_ldn; break; case ISA_IVAR_PNPBIOS_HANDLE: *result = idev->id_pnpbios_handle; break; default: return (ENOENT); } return (0); } static int isa_write_ivar(device_t bus, device_t dev, int index, uintptr_t value) { struct isa_device* idev = DEVTOISA(dev); switch (index) { case ISA_IVAR_PORT_0: case ISA_IVAR_PORT_1: case ISA_IVAR_PORTSIZE_0: case ISA_IVAR_PORTSIZE_1: case ISA_IVAR_MADDR_0: case ISA_IVAR_MADDR_1: case ISA_IVAR_MEMSIZE_0: case ISA_IVAR_MEMSIZE_1: case ISA_IVAR_IRQ_0: case ISA_IVAR_IRQ_1: case ISA_IVAR_DRQ_0: case ISA_IVAR_DRQ_1: return (EINVAL); case ISA_IVAR_VENDORID: idev->id_vendorid = value; break; case ISA_IVAR_SERIAL: idev->id_serial = value; break; case ISA_IVAR_LOGICALID: idev->id_logicalid = value; break; case ISA_IVAR_COMPATID: idev->id_compatid = value; break; case ISA_IVAR_CONFIGATTR: idev->id_config_attr = value; break; default: return (ENOENT); } return (0); } /* * Free any resources which the driver missed or which we were holding for * it (see isa_probe_children). */ static void isa_child_detached(device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } static void isa_driver_added(device_t dev, driver_t *driver) { device_t *children; int nchildren, i; /* * Don't do anything if drivers are dynamically * added during autoconfiguration (cf. ymf724). * since that would end up calling identify * twice. */ if (!isa_running) return; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { device_t child = children[i]; struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; if (device_get_state(child) != DS_NOTPRESENT) continue; if (!device_is_enabled(child)) continue; /* * Free resources which we were holding on behalf of * the device. */ STAILQ_FOREACH(rle, &idev->id_resources, link) { if (rle->res) resource_list_release(rl, dev, child, rle->type, rle->rid, rle->res); } if (TAILQ_FIRST(&idev->id_configs)) if (!isa_assign_resources(child)) continue; device_probe_and_attach(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } free(children, M_TEMP); } static int isa_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= ISA_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= ISA_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= ISA_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= ISA_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); return (0); } static struct resource_list * isa_get_resource_list (device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (!rl) return (NULL); return (rl); } static int isa_add_config(device_t dev, device_t child, int priority, struct isa_config *config) { struct isa_device* idev = DEVTOISA(child); struct isa_config_entry *newice, *ice; newice = malloc(sizeof *ice, M_DEVBUF, M_NOWAIT); if (!newice) return (ENOMEM); newice->ice_priority = priority; newice->ice_config = *config; TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { if (ice->ice_priority > priority) break; } if (ice) TAILQ_INSERT_BEFORE(ice, newice, ice_link); else TAILQ_INSERT_TAIL(&idev->id_configs, newice, ice_link); return (0); } static void isa_set_config_callback(device_t dev, device_t child, isa_config_cb *fn, void *arg) { struct isa_device* idev = DEVTOISA(child); idev->id_config_cb = fn; idev->id_config_arg = arg; } static int isa_pnp_probe(device_t dev, device_t child, struct isa_pnp_id *ids) { struct isa_device* idev = DEVTOISA(child); if (!idev->id_vendorid) return (ENOENT); while (ids && ids->ip_id) { /* * Really ought to support >1 compat id per device. */ if (idev->id_logicalid == ids->ip_id || idev->id_compatid == ids->ip_id) { if (ids->ip_desc) device_set_desc(child, ids->ip_desc); return (0); } ids++; } return (ENXIO); } static int isa_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { #ifdef ISAPNP struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) snprintf(buf, buflen, "pnpid=%s", pnp_eisaformat(idev->id_vendorid)); #endif return (0); } static int isa_child_location_str(device_t bus, device_t child, char *buf, size_t buflen) { #if 0 /* id_pnphandle isn't there yet */ struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) snprintf(buf, buflen, "pnphandle=%d", idev->id_pnphandle); #endif /* Nothing here yet */ *buf = '\0'; return (0); } static device_method_t isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isa_probe), DEVMETHOD(device_attach, isa_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, isa_add_child), DEVMETHOD(bus_print_child, isa_print_child), DEVMETHOD(bus_probe_nomatch, isa_probe_nomatch), DEVMETHOD(bus_read_ivar, isa_read_ivar), DEVMETHOD(bus_write_ivar, isa_write_ivar), DEVMETHOD(bus_child_detached, isa_child_detached), DEVMETHOD(bus_driver_added, isa_driver_added), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_resource_list,isa_get_resource_list), DEVMETHOD(bus_alloc_resource, isa_alloc_resource), DEVMETHOD(bus_release_resource, isa_release_resource), DEVMETHOD(bus_set_resource, isa_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_child_pnpinfo_str, isa_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, isa_child_location_str), DEVMETHOD(bus_hinted_child, isa_hinted_child), DEVMETHOD(bus_hint_device_unit, isa_hint_device_unit), /* ISA interface */ DEVMETHOD(isa_add_config, isa_add_config), DEVMETHOD(isa_set_config_callback, isa_set_config_callback), DEVMETHOD(isa_pnp_probe, isa_pnp_probe), { 0, 0 } }; DEFINE_CLASS_0(isa, isa_driver, isa_methods, 0); devclass_t isa_devclass; /* * ISA can be attached to a PCI-ISA bridge, or other locations on some * platforms. */ DRIVER_MODULE(isa, isab, isa_driver, isa_devclass, 0, 0); DRIVER_MODULE(isa, eisab, isa_driver, isa_devclass, 0, 0); MODULE_VERSION(isa, 1); /* * Code common to ISA bridges. */ devclass_t isab_devclass; int isab_attach(device_t dev) { device_t child; child = device_add_child(dev, "isa", 0); if (child != NULL) return (bus_generic_attach(dev)); return (ENXIO); } Index: head/sys/isa/vga_isa.c =================================================================== --- head/sys/isa/vga_isa.c (revision 295789) +++ head/sys/isa/vga_isa.c (revision 295790) @@ -1,382 +1,382 @@ /*- * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_vga.h" #include "opt_fb.h" #include "opt_syscons.h" /* should be removed in the future, XXX */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #include #endif #include #include #include #include static void vga_suspend(device_t dev) { vga_softc_t *sc; int nbytes; sc = device_get_softc(dev); /* Save the video state across the suspend. */ if (sc->state_buf != NULL) goto save_palette; nbytes = vidd_save_state(sc->adp, NULL, 0); if (nbytes <= 0) goto save_palette; sc->state_buf = malloc(nbytes, M_TEMP, M_NOWAIT); if (sc->state_buf == NULL) goto save_palette; if (bootverbose) device_printf(dev, "saving %d bytes of video state\n", nbytes); if (vidd_save_state(sc->adp, sc->state_buf, nbytes) != 0) { device_printf(dev, "failed to save state (nbytes=%d)\n", nbytes); free(sc->state_buf, M_TEMP); sc->state_buf = NULL; } save_palette: /* Save the color palette across the suspend. */ if (sc->pal_buf != NULL) return; sc->pal_buf = malloc(256 * 3, M_TEMP, M_NOWAIT); if (sc->pal_buf == NULL) return; if (bootverbose) device_printf(dev, "saving color palette\n"); if (vidd_save_palette(sc->adp, sc->pal_buf) != 0) { device_printf(dev, "failed to save palette\n"); free(sc->pal_buf, M_TEMP); sc->pal_buf = NULL; } } static void vga_resume(device_t dev) { vga_softc_t *sc; sc = device_get_softc(dev); if (sc->state_buf != NULL) { if (vidd_load_state(sc->adp, sc->state_buf) != 0) device_printf(dev, "failed to reload state\n"); free(sc->state_buf, M_TEMP); sc->state_buf = NULL; } if (sc->pal_buf != NULL) { if (vidd_load_palette(sc->adp, sc->pal_buf) != 0) device_printf(dev, "failed to reload palette\n"); free(sc->pal_buf, M_TEMP); sc->pal_buf = NULL; } } #define VGA_SOFTC(unit) \ ((vga_softc_t *)devclass_get_softc(isavga_devclass, unit)) static devclass_t isavga_devclass; #ifdef FB_INSTALL_CDEV static d_open_t isavga_open; static d_close_t isavga_close; static d_read_t isavga_read; static d_write_t isavga_write; static d_ioctl_t isavga_ioctl; static d_mmap_t isavga_mmap; static struct cdevsw isavga_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = isavga_open, .d_close = isavga_close, .d_read = isavga_read, .d_write = isavga_write, .d_ioctl = isavga_ioctl, .d_mmap = isavga_mmap, .d_name = VGA_DRIVER_NAME, }; #endif /* FB_INSTALL_CDEV */ static void isavga_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, VGA_DRIVER_NAME, 0); } static int isavga_probe(device_t dev) { video_adapter_t adp; int error; /* No pnp support */ if (isa_get_vendorid(dev)) return (ENXIO); error = vga_probe_unit(device_get_unit(dev), &adp, device_get_flags(dev)); if (error == 0) { device_set_desc(dev, "Generic ISA VGA"); bus_set_resource(dev, SYS_RES_IOPORT, 0, adp.va_io_base, adp.va_io_size); bus_set_resource(dev, SYS_RES_MEMORY, 0, adp.va_mem_base, adp.va_mem_size); #if 0 isa_set_port(dev, adp.va_io_base); isa_set_portsize(dev, adp.va_io_size); isa_set_maddr(dev, adp.va_mem_base); isa_set_msize(dev, adp.va_mem_size); #endif } return (error); } static int isavga_attach(device_t dev) { vga_softc_t *sc; int unit; int rid; int error; unit = device_get_unit(dev); sc = device_get_softc(dev); rid = 0; - bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); + bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE); rid = 0; - bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, - 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); + bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE | RF_SHAREABLE); error = vga_attach_unit(unit, sc, device_get_flags(dev)); if (error) return (error); #ifdef FB_INSTALL_CDEV /* attach a virtual frame buffer device */ error = fb_attach(VGA_MKMINOR(unit), sc->adp, &isavga_cdevsw); if (error) return (error); #endif /* FB_INSTALL_CDEV */ if (0 && bootverbose) vidd_diag(sc->adp, bootverbose); #if 0 /* experimental */ device_add_child(dev, "fb", -1); bus_generic_attach(dev); #endif return (0); } static int isavga_suspend(device_t dev) { int error; error = bus_generic_suspend(dev); if (error != 0) return (error); vga_suspend(dev); return (error); } static int isavga_resume(device_t dev) { vga_resume(dev); return (bus_generic_resume(dev)); } #ifdef FB_INSTALL_CDEV static int isavga_open(struct cdev *dev, int flag, int mode, struct thread *td) { return (vga_open(dev, VGA_SOFTC(VGA_UNIT(dev)), flag, mode, td)); } static int isavga_close(struct cdev *dev, int flag, int mode, struct thread *td) { return (vga_close(dev, VGA_SOFTC(VGA_UNIT(dev)), flag, mode, td)); } static int isavga_read(struct cdev *dev, struct uio *uio, int flag) { return (vga_read(dev, VGA_SOFTC(VGA_UNIT(dev)), uio, flag)); } static int isavga_write(struct cdev *dev, struct uio *uio, int flag) { return (vga_write(dev, VGA_SOFTC(VGA_UNIT(dev)), uio, flag)); } static int isavga_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { return (vga_ioctl(dev, VGA_SOFTC(VGA_UNIT(dev)), cmd, arg, flag, td)); } static int isavga_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { return (vga_mmap(dev, VGA_SOFTC(VGA_UNIT(dev)), offset, paddr, prot, memattr)); } #endif /* FB_INSTALL_CDEV */ static device_method_t isavga_methods[] = { DEVMETHOD(device_identify, isavga_identify), DEVMETHOD(device_probe, isavga_probe), DEVMETHOD(device_attach, isavga_attach), DEVMETHOD(device_suspend, isavga_suspend), DEVMETHOD(device_resume, isavga_resume), DEVMETHOD_END }; static driver_t isavga_driver = { VGA_DRIVER_NAME, isavga_methods, sizeof(vga_softc_t), }; DRIVER_MODULE(vga, isa, isavga_driver, isavga_devclass, 0, 0); static devclass_t vgapm_devclass; static void vgapm_identify(driver_t *driver, device_t parent) { if (device_get_flags(parent) != 0) device_add_child(parent, "vgapm", 0); } static int vgapm_probe(device_t dev) { device_set_desc(dev, "VGA suspend/resume"); device_quiet(dev); return (BUS_PROBE_DEFAULT); } static int vgapm_attach(device_t dev) { bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int vgapm_suspend(device_t dev) { device_t vga_dev; int error; error = bus_generic_suspend(dev); if (error != 0) return (error); vga_dev = devclass_get_device(isavga_devclass, 0); if (vga_dev == NULL) return (0); vga_suspend(vga_dev); return (0); } static int vgapm_resume(device_t dev) { device_t vga_dev; vga_dev = devclass_get_device(isavga_devclass, 0); if (vga_dev != NULL) vga_resume(vga_dev); return (bus_generic_resume(dev)); } static device_method_t vgapm_methods[] = { DEVMETHOD(device_identify, vgapm_identify), DEVMETHOD(device_probe, vgapm_probe), DEVMETHOD(device_attach, vgapm_attach), DEVMETHOD(device_suspend, vgapm_suspend), DEVMETHOD(device_resume, vgapm_resume), { 0, 0 } }; static driver_t vgapm_driver = { "vgapm", vgapm_methods, 0 }; DRIVER_MODULE(vgapm, vgapci, vgapm_driver, vgapm_devclass, 0, 0); Index: head/sys/mips/sibyte/ata_zbbus.c =================================================================== --- head/sys/mips/sibyte/ata_zbbus.c (revision 295789) +++ head/sys/mips/sibyte/ata_zbbus.c (revision 295790) @@ -1,170 +1,170 @@ /*- * Copyright (c) 2009 Neelkanth Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include __FBSDID("$FreeBSD$"); static int ata_zbbus_probe(device_t dev) { return (ata_probe(dev)); } static int ata_zbbus_attach(device_t dev) { int i, rid, regshift, regoffset; struct ata_channel *ch; struct resource *io; ch = device_get_softc(dev); if (ch->attached) return (0); ch->attached = 1; rid = 0; - io = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); + io = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (io == NULL) return (ENXIO); /* * SWARM needs an address shift of 5 when accessing ATA registers. * * For e.g. an access to register 4 actually needs an address * of (4 << 5) to be output on the generic bus. */ regshift = 5; resource_int_value(device_get_name(dev), device_get_unit(dev), "regshift", ®shift); if (regshift && bootverbose) device_printf(dev, "using a register shift of %d\n", regshift); regoffset = 0x1F0; resource_int_value(device_get_name(dev), device_get_unit(dev), "regoffset", ®offset); if (regoffset && bootverbose) { device_printf(dev, "using a register offset of 0x%0x\n", regoffset); } /* setup the ata register addresses */ for (i = ATA_DATA; i <= ATA_COMMAND; ++i) { ch->r_io[i].res = io; ch->r_io[i].offset = (regoffset + i) << regshift; } ch->r_io[ATA_CONTROL].res = io; ch->r_io[ATA_CONTROL].offset = (regoffset + ATA_CTLOFFSET) << regshift; ch->r_io[ATA_IDX_ADDR].res = io; /* XXX what is this used for */ ata_default_registers(dev); /* initialize softc for this channel */ ch->unit = 0; ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); return (ata_attach(dev)); } static int ata_zbbus_detach(device_t dev) { int error; struct ata_channel *ch = device_get_softc(dev); if (!ch->attached) return (0); ch->attached = 0; error = ata_detach(dev); bus_release_resource(dev, SYS_RES_MEMORY, 0, ch->r_io[ATA_IDX_ADDR].res); return (error); } static int ata_zbbus_suspend(device_t dev) { struct ata_channel *ch = device_get_softc(dev); if (!ch->attached) return (0); return (ata_suspend(dev)); } static int ata_zbbus_resume(device_t dev) { struct ata_channel *ch = device_get_softc(dev); if (!ch->attached) return (0); return (ata_resume(dev)); } static device_method_t ata_zbbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_zbbus_probe), DEVMETHOD(device_attach, ata_zbbus_attach), DEVMETHOD(device_detach, ata_zbbus_detach), DEVMETHOD(device_suspend, ata_zbbus_suspend), DEVMETHOD(device_resume, ata_zbbus_resume), { 0, 0 } }; static driver_t ata_zbbus_driver = { "ata", ata_zbbus_methods, sizeof(struct ata_channel) }; DRIVER_MODULE(ata, zbbus, ata_zbbus_driver, ata_devclass, 0, 0);