Index: head/share/man/man4/isp.4 =================================================================== --- head/share/man/man4/isp.4 (revision 291220) +++ head/share/man/man4/isp.4 (revision 291221) @@ -1,216 +1,214 @@ .\" Copyright (c) 2009-2015 Alexander Motin .\" Copyright (c) 2006 Marcus Alves Grando .\" Copyright (c) 1998-2001 Matthew Jacob, for NASA/Ames Research Center .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd November 22, 2015 .Dt ISP 4 .Os .Sh NAME .Nm isp .Nd Qlogic based SPI and FibreChannel SCSI Host Adapters .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device scbus" .Cd "device isp" .Ed .Pp Alternatively, to load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent isp_load="YES" .Ed .Sh DESCRIPTION This driver provides access to .Tn SPI or .Tn FibreChannel SCSI devices. .Pp SPI supports initiator mode for Ultra SCSI and wide mode transactions for .Tn SCSI , Ultra2 LVD (1080, 1280), and Ultra3 LVD (10160, 12160). .Pp Fibre Channel supports initiator and target modes of FCP SCSI profile, utilizing Class 3 and Class 2 (2200 and later) connections. Support is available for Public and Private loops, Point-to-Point and Fabric connections. .Sh FIRMWARE Firmware loading is supported if the .Xr ispfw 4 module is loaded. It is strongly recommended that you use the firmware available from .Xr ispfw 4 as it is the most likely to have been tested with this driver. .Sh HARDWARE Cards supported by the .Nm driver include: .Bl -tag -width xxxxxx -offset indent .It Qlogic 1000 Fast Wide, Ultra Fast Wide cards, Single Ended or Differential SBus cards. .It Qlogic 1020 Fast Wide and Differential Fast Wide SCSI PCI cards. .It Qlogic 1040 Ultra Wide and Differential Ultra Wide SCSI PCI cards. Also known as the DEC KZPBA-CA (single ended) and KZPBA-CB (HVD differential). .It Qlogic 1080 LVD Ultra2 Wide SCSI PCI cards. .It Qlogic 10160 LVD Ultra3 Wide SCSI PCI cards. .It Qlogic 1240 Dual Bus Ultra Wide and Differential Ultra Wide SCSI PCI cards. .It Qlogic 1280 Dual Bus LVD Ultra2 Wide SCSI PCI cards. .It Qlogic 12160 Dual Bus LVD Ultra3 Wide SCSI PCI cards. .It Qlogic 210X Copper and Optical Fibre Channel Arbitrated Loop PCI cards (single, dual). .It Qlogic 220X Copper and Optical Fibre Channel Arbitrated Loop PCI cards (single, dual, quad). .It Qlogic 2300 Optical 2Gb Fibre Channel PCI cards. .It Qlogic 2312 Optical 2Gb Fibre Channel PCI cards. .It Qlogic 234X Optical 2Gb Fibre Channel PCI cards (2312 chipset, single and dual attach). .It Qlogic 2322 Optical 2Gb Fibre Channel PCIe cards. .It Qlogic 200 Dell branded version of the QLogic 2312. .It Qlogic 2422 Optical 4Gb Fibre Channel PCI cards. .It Qlogic 2432 Optical 4Gb Fibre Channel PCIe cards. .It Qlogic 2532 Optical 8Gb Fibre Channel PCIe cards. .El .Sh CONFIGURATION OPTIONS Target mode support for Fibre Channel adapters may be enabled with the .Pp .Cd options ISP_TARGET_MODE .Pp option. .Sh BOOT OPTIONS The following options are switchable by setting values in .Pa /boot/device.hints . .Pp They are: .Bl -tag -width indent -.It Va hint.isp.0.disable -A hint value to disable driver in kernel. .It Va hint.isp.0.fwload_disable A hint value to disable loading of firmware .Xr ispfw 4 . .It Va hint.isp.0.prefer_memmap A hint value to use PCI memory space instead of I/O space access for. .It Va hint.isp.0.prefer_iomap A hint value to use PCI I/O space instead of Memory space access for. .It Va hint.isp.0.ignore_nvram A hint value to ignore board NVRAM settings for. Otherwise use NVRAM settings. .It Va hint.isp.0.fullduplex A hint value to set full duplex mode. .It Va hint.isp.0.topology A hint value to select topology of connection. Supported values are: .Pp .Bl -tag -width ".Li lport-only" -compact .It Li lport Prefer loopback and fallback to point to point. .It Li nport Prefer point to point and fallback to loopback. .It Li lport-only Loopback only. .It Li nport-only Point to point only. .El .It Va hint.isp.0.portwwn This should be the full 64 bit World Wide Port Name you would like to use, overriding the value in NVRAM for the card. .It Va hint.isp.0.nodewwn This should be the full 64 bit World Wide Node Name you would like to use, overriding the value in NVRAM for the card. .It Va hint.isp.0.iid A hint to override or set the Initiator ID or Loop ID. For Fibre Channel cards in Local Loop topologies it is .Ar strongly recommended that you set this value to non-zero. .It Va hint.isp.0.role A hint to define default role for isp instance (0 -- none, 1 -- target, 2 -- initiator, 3 -- both). .It Va hint.isp.0.debug A hint value for a driver debug level (see the file .Pa /usr/src/sys/dev/isp/ispvar.h for the values. .It Va hint.isp.0.vports A hint to create specified number of additional virtual ports. .El .Sh SYSCTL OPTIONS .Bl -tag -width indent .It Va dev.isp.N.loop_down_limit This value says how long to wait in seconds after loop has gone down before giving up and expiring all of the devices that were visible. The default is 300 seconds (5 minutes). A separate (nonadjustable) timeout is used when booting to not stop booting on lack of FC connectivity. .It Va dev.isp.N.gone_device_time This value says how long to wait for devices to reappear if they (temporarily) disappear due to loop or fabric events. While this timeout is running, I/O to those devices will simply be held. .It Va dev.isp.N.wwnn This is the readonly World Wide Node Name value for this port. .It Va dev.isp.N.wwpn This is the readonly World Wide Port Name value for this port. .El .Sh SEE ALSO .Xr da 4 , .Xr intro 4 , .Xr ispfw 4 , .Xr sa 4 , .Xr scsi 4 , .Xr gmultipath 8 .Sh AUTHORS The .Nm driver was written by .An Matthew Jacob originally for NetBSD at NASA/Ames Research Center. Some later improvement was done by .An Alexander Motin Aq Mt mav@FreeBSD.org . .Sh BUGS The driver currently ignores some NVRAM settings. .Pp Fabric support for 2100 cards has been so problematic, and these cards are so old now that it is just not worth your time to try it. Index: head/sys/dev/isp/isp_freebsd.h =================================================================== --- head/sys/dev/isp/isp_freebsd.h (revision 291220) +++ head/sys/dev/isp/isp_freebsd.h (revision 291221) @@ -1,756 +1,755 @@ /* $FreeBSD$ */ /*- * Qlogic ISP SCSI Host Adapter FreeBSD Wrapper Definitions * * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _ISP_FREEBSD_H #define _ISP_FREEBSD_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ddb.h" #include "opt_isp.h" #define ISP_PLATFORM_VERSION_MAJOR 7 #define ISP_PLATFORM_VERSION_MINOR 10 /* * Efficiency- get rid of SBus code && tests unless we need them. */ #ifdef __sparc64__ #define ISP_SBUS_SUPPORTED 1 #else #define ISP_SBUS_SUPPORTED 0 #endif #define ISP_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE #define N_XCMDS 64 #define XCMD_SIZE 512 struct ispsoftc; typedef union isp_ecmd { union isp_ecmd * next; uint8_t data[XCMD_SIZE]; } isp_ecmd_t; isp_ecmd_t * isp_get_ecmd(struct ispsoftc *); void isp_put_ecmd(struct ispsoftc *, isp_ecmd_t *); #ifdef ISP_TARGET_MODE /* Not quite right, but there was no bump for this change */ #if __FreeBSD_version < 225469 #define SDFIXED(x) (&x) #else #define SDFIXED(x) ((struct scsi_sense_data_fixed *)(&x)) #endif #define ISP_TARGET_FUNCTIONS 1 #define ATPDPSIZE 4096 #define ATPDPHASHSIZE 32 #define ATPDPHASH(x) ((((x) >> 24) ^ ((x) >> 16) ^ ((x) >> 8) ^ (x)) & \ ((ATPDPHASHSIZE) - 1)) #include typedef struct atio_private_data { LIST_ENTRY(atio_private_data) next; uint32_t orig_datalen; uint32_t bytes_xfered; uint32_t bytes_in_transit; uint32_t tag; /* typically f/w RX_ID */ uint32_t lun; uint32_t nphdl; uint32_t sid; uint32_t portid; uint16_t rxid; /* wire rxid */ uint16_t oxid; /* wire oxid */ uint16_t word3; /* PRLI word3 params */ uint16_t ctcnt; /* number of CTIOs currently active */ uint8_t seqno; /* CTIO sequence number */ uint32_t srr_notify_rcvd : 1, cdb0 : 8, sendst : 1, dead : 1, tattr : 3, state : 3; void * ests; /* * The current SRR notify copy */ uint8_t srr[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ void * srr_ccb; uint32_t nsrr; } atio_private_data_t; #define ATPD_STATE_FREE 0 #define ATPD_STATE_ATIO 1 #define ATPD_STATE_CAM 2 #define ATPD_STATE_CTIO 3 #define ATPD_STATE_LAST_CTIO 4 #define ATPD_STATE_PDON 5 #define ATPD_CCB_OUTSTANDING 16 #define ATPD_SEQ_MASK 0x7f #define ATPD_SEQ_NOTIFY_CAM 0x80 #define ATPD_SET_SEQNO(hdrp, atp) ((isphdr_t *)hdrp)->rqs_seqno &= ~ATPD_SEQ_MASK, ((isphdr_t *)hdrp)->rqs_seqno |= (atp)->seqno #define ATPD_GET_SEQNO(hdrp) (((isphdr_t *)hdrp)->rqs_seqno & ATPD_SEQ_MASK) #define ATPD_GET_NCAM(hdrp) ((((isphdr_t *)hdrp)->rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0) typedef union inot_private_data inot_private_data_t; union inot_private_data { inot_private_data_t *next; struct { isp_notify_t nt; /* must be first! */ uint8_t data[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ uint32_t tag_id, seq_id; } rd; }; typedef struct isp_timed_notify_ack { void *isp; void *not; uint8_t data[64]; /* sb QENTRY_LEN, but order of definitions is wrong */ struct callout timer; } isp_tna_t; TAILQ_HEAD(isp_ccbq, ccb_hdr); typedef struct tstate { SLIST_ENTRY(tstate) next; lun_id_t ts_lun; struct cam_path *owner; struct isp_ccbq waitq; /* waiting CCBs */ struct ccb_hdr_slist atios; struct ccb_hdr_slist inots; uint32_t hold; uint16_t atio_count; uint16_t inot_count; inot_private_data_t * restart_queue; inot_private_data_t * ntfree; inot_private_data_t ntpool[ATPDPSIZE]; LIST_HEAD(, atio_private_data) atfree; LIST_HEAD(, atio_private_data) atused[ATPDPHASHSIZE]; atio_private_data_t atpool[ATPDPSIZE]; } tstate_t; #define LUN_HASH_SIZE 32 #define LUN_HASH_FUNC(lun) ((lun) & (LUN_HASH_SIZE - 1)) #endif /* * Per command info. */ struct isp_pcmd { struct isp_pcmd * next; bus_dmamap_t dmap; /* dma map for this command */ struct ispsoftc * isp; /* containing isp */ struct callout wdog; /* watchdog timer */ uint32_t datalen; /* data length for this command (target mode only) */ uint8_t totslen; /* sense length on status response */ uint8_t cumslen; /* sense length on status response */ uint8_t crn; /* command reference number */ }; #define ISP_PCMD(ccb) (ccb)->ccb_h.spriv_ptr1 #define PISP_PCMD(ccb) ((struct isp_pcmd *)ISP_PCMD(ccb)) /* * Per nexus info. */ struct isp_nexus { uint64_t lun; /* LUN for target */ uint32_t tgt; /* TGT for target */ uint8_t crnseed; /* next command reference number */ struct isp_nexus *next; }; #define NEXUS_HASH_WIDTH 32 #define INITIAL_NEXUS_COUNT MAX_FC_TARG #define NEXUS_HASH(tgt, lun) ((tgt + lun) % NEXUS_HASH_WIDTH) /* * Per channel information */ SLIST_HEAD(tslist, tstate); struct isp_fc { struct cam_sim *sim; struct cam_path *path; struct ispsoftc *isp; struct proc *kproc; bus_dma_tag_t tdmat; bus_dmamap_t tdmap; uint64_t def_wwpn; uint64_t def_wwnn; uint32_t loop_down_time; uint32_t loop_down_limit; uint32_t gone_device_time; /* * Per target/lun info- just to keep a per-ITL nexus crn count */ struct isp_nexus *nexus_hash[NEXUS_HASH_WIDTH]; struct isp_nexus *nexus_free_list; uint32_t simqfrozen : 3, default_id : 8, hysteresis : 8, def_role : 2, /* default role */ gdt_running : 1, loop_dead : 1, fcbsy : 1, ready : 1; struct callout ldt; /* loop down timer */ struct callout gdt; /* gone device timer */ struct task ltask; struct task gtask; #ifdef ISP_TARGET_MODE struct tslist lun_hash[LUN_HASH_SIZE]; #if defined(DEBUG) unsigned int inject_lost_data_frame; #endif #endif int num_threads; }; struct isp_spi { struct cam_sim *sim; struct cam_path *path; uint32_t simqfrozen : 3, iid : 4; #ifdef ISP_TARGET_MODE struct tslist lun_hash[LUN_HASH_SIZE]; #endif int num_threads; }; struct isposinfo { /* * Linkage, locking, and identity */ struct mtx lock; device_t dev; struct cdev * cdev; struct intr_config_hook ehook; struct cam_devq * devq; /* * Firmware pointer */ const struct firmware * fw; /* * DMA related sdtuff */ bus_space_tag_t bus_tag; bus_dma_tag_t dmat; bus_space_handle_t bus_handle; bus_dma_tag_t cdmat; bus_dmamap_t cdmap; /* * Command and transaction related related stuff */ struct isp_pcmd * pcmd_pool; struct isp_pcmd * pcmd_free; uint32_t #ifdef ISP_TARGET_MODE tmwanted : 1, tmbusy : 1, #else : 2, #endif sixtyfourbit : 1, /* sixtyfour bit platform */ timer_active : 1, autoconf : 1, ehook_active : 1, - disabled : 1, mbox_sleeping : 1, mbox_sleep_ok : 1, mboxcmd_done : 1, mboxbsy : 1; struct callout tmo; /* general timer */ /* * misc- needs to be sorted better XXXXXX */ int framesize; int exec_throttle; int cont_max; bus_addr_t ecmd_dma; isp_ecmd_t * ecmd_base; isp_ecmd_t * ecmd_free; /* * Per-type private storage... */ union { struct isp_fc *fc; struct isp_spi *spi; void *ptr; } pc; int is_exiting; }; #define ISP_FC_PC(isp, chan) (&(isp)->isp_osinfo.pc.fc[(chan)]) #define ISP_SPI_PC(isp, chan) (&(isp)->isp_osinfo.pc.spi[(chan)]) #define ISP_GET_PC(isp, chan, tag, rslt) \ if (IS_SCSI(isp)) { \ rslt = ISP_SPI_PC(isp, chan)-> tag; \ } else { \ rslt = ISP_FC_PC(isp, chan)-> tag; \ } #define ISP_GET_PC_ADDR(isp, chan, tag, rp) \ if (IS_SCSI(isp)) { \ rp = &ISP_SPI_PC(isp, chan)-> tag; \ } else { \ rp = &ISP_FC_PC(isp, chan)-> tag; \ } #define ISP_SET_PC(isp, chan, tag, val) \ if (IS_SCSI(isp)) { \ ISP_SPI_PC(isp, chan)-> tag = val; \ } else { \ ISP_FC_PC(isp, chan)-> tag = val; \ } #define FCP_NEXT_CRN isp_fcp_next_crn #define isp_lock isp_osinfo.lock #define isp_bus_tag isp_osinfo.bus_tag #define isp_bus_handle isp_osinfo.bus_handle /* * Locking macros... */ #define ISP_LOCK(isp) mtx_lock(&(isp)->isp_osinfo.lock) #define ISP_UNLOCK(isp) mtx_unlock(&(isp)->isp_osinfo.lock) #define ISP_ASSERT_LOCKED(isp) mtx_assert(&(isp)->isp_osinfo.lock, MA_OWNED) /* * Required Macros/Defines */ #define ISP_FC_SCRLEN 0x1000 #define ISP_MEMZERO(a, b) memset(a, 0, b) #define ISP_MEMCPY memcpy #define ISP_SNPRINTF snprintf #define ISP_DELAY(x) DELAY(x) #if __FreeBSD_version < 1000029 #define ISP_SLEEP(isp, x) msleep(&(isp)->isp_osinfo.is_exiting, \ &(isp)->isp_osinfo.lock, 0, "isp_sleep", ((x) + tick - 1) / tick) #else #define ISP_SLEEP(isp, x) msleep_sbt(&(isp)->isp_osinfo.is_exiting, \ &(isp)->isp_osinfo.lock, 0, "isp_sleep", (x) * SBT_1US, 0, 0) #endif #define ISP_MIN imin #ifndef DIAGNOSTIC #define ISP_INLINE __inline #else #define ISP_INLINE #endif #define NANOTIME_T struct timespec #define GET_NANOTIME nanotime #define GET_NANOSEC(x) ((x)->tv_sec * 1000000000 + (x)->tv_nsec) #define NANOTIME_SUB isp_nanotime_sub #define MAXISPREQUEST(isp) ((IS_FC(isp) || IS_ULTRA2(isp))? 1024 : 256) #define MEMORYBARRIER(isp, type, offset, size, chan) \ switch (type) { \ case SYNC_SFORDEV: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(fc->tdmat, fc->tdmap, \ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ break; \ } \ case SYNC_REQUEST: \ bus_dmamap_sync(isp->isp_osinfo.cdmat, \ isp->isp_osinfo.cdmap, \ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_SFORCPU: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(fc->tdmat, fc->tdmap, \ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \ break; \ } \ case SYNC_RESULT: \ bus_dmamap_sync(isp->isp_osinfo.cdmat, \ isp->isp_osinfo.cdmap, \ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); \ break; \ case SYNC_REG: \ bus_space_barrier(isp->isp_osinfo.bus_tag, \ isp->isp_osinfo.bus_handle, offset, size, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); \ break; \ default: \ break; \ } #define MEMORYBARRIERW(isp, type, offset, size, chan) \ switch (type) { \ case SYNC_SFORDEV: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(fc->tdmat, fc->tdmap, \ BUS_DMASYNC_PREWRITE); \ break; \ } \ case SYNC_REQUEST: \ bus_dmamap_sync(isp->isp_osinfo.cdmat, \ isp->isp_osinfo.cdmap, BUS_DMASYNC_PREWRITE); \ break; \ case SYNC_SFORCPU: \ { \ struct isp_fc *fc = ISP_FC_PC(isp, chan); \ bus_dmamap_sync(fc->tdmat, fc->tdmap, \ BUS_DMASYNC_POSTWRITE); \ break; \ } \ case SYNC_RESULT: \ bus_dmamap_sync(isp->isp_osinfo.cdmat, \ isp->isp_osinfo.cdmap, BUS_DMASYNC_POSTWRITE); \ break; \ case SYNC_REG: \ bus_space_barrier(isp->isp_osinfo.bus_tag, \ isp->isp_osinfo.bus_handle, offset, size, \ BUS_SPACE_BARRIER_WRITE); \ break; \ default: \ break; \ } #define MBOX_ACQUIRE isp_mbox_acquire #define MBOX_WAIT_COMPLETE isp_mbox_wait_complete #define MBOX_NOTIFY_COMPLETE isp_mbox_notify_done #define MBOX_RELEASE isp_mbox_release #define FC_SCRATCH_ACQUIRE isp_fc_scratch_acquire #define FC_SCRATCH_RELEASE(isp, chan) isp->isp_osinfo.pc.fc[chan].fcbsy = 0 #ifndef SCSI_GOOD #define SCSI_GOOD SCSI_STATUS_OK #endif #ifndef SCSI_CHECK #define SCSI_CHECK SCSI_STATUS_CHECK_COND #endif #ifndef SCSI_BUSY #define SCSI_BUSY SCSI_STATUS_BUSY #endif #ifndef SCSI_QFULL #define SCSI_QFULL SCSI_STATUS_QUEUE_FULL #endif #define XS_T struct ccb_scsiio #define XS_DMA_ADDR_T bus_addr_t #define XS_GET_DMA64_SEG(a, b, c) \ { \ ispds64_t *d = a; \ bus_dma_segment_t *e = b; \ uint32_t f = c; \ e += f; \ d->ds_base = DMA_LO32(e->ds_addr); \ d->ds_basehi = DMA_HI32(e->ds_addr); \ d->ds_count = e->ds_len; \ } #define XS_GET_DMA_SEG(a, b, c) \ { \ ispds_t *d = a; \ bus_dma_segment_t *e = b; \ uint32_t f = c; \ e += f; \ d->ds_base = DMA_LO32(e->ds_addr); \ d->ds_count = e->ds_len; \ } #define XS_ISP(ccb) cam_sim_softc(xpt_path_sim((ccb)->ccb_h.path)) #define XS_CHANNEL(ccb) cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)) #define XS_TGT(ccb) (ccb)->ccb_h.target_id #define XS_LUN(ccb) (ccb)->ccb_h.target_lun #define XS_CDBP(ccb) \ (((ccb)->ccb_h.flags & CAM_CDB_POINTER)? \ (ccb)->cdb_io.cdb_ptr : (ccb)->cdb_io.cdb_bytes) #define XS_CDBLEN(ccb) (ccb)->cdb_len #define XS_XFRLEN(ccb) (ccb)->dxfer_len #define XS_TIME(ccb) (ccb)->ccb_h.timeout #define XS_GET_RESID(ccb) (ccb)->resid #define XS_SET_RESID(ccb, r) (ccb)->resid = r #define XS_STSP(ccb) (&(ccb)->scsi_status) #define XS_SNSP(ccb) (&(ccb)->sense_data) #define XS_TOT_SNSLEN(ccb) ccb->sense_len #define XS_CUR_SNSLEN(ccb) (ccb->sense_len - ccb->sense_resid) #define XS_SNSKEY(ccb) (scsi_get_sense_key(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_SNSASC(ccb) (scsi_get_asc(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_SNSASCQ(ccb) (scsi_get_ascq(&(ccb)->sense_data, \ ccb->sense_len - ccb->sense_resid, 1)) #define XS_TAG_P(ccb) \ (((ccb)->ccb_h.flags & CAM_TAG_ACTION_VALID) && \ (ccb)->tag_action != CAM_TAG_ACTION_NONE) #define XS_TAG_TYPE(ccb) \ ((ccb->tag_action == MSG_SIMPLE_Q_TAG)? REQFLAG_STAG : \ ((ccb->tag_action == MSG_HEAD_OF_Q_TAG)? REQFLAG_HTAG : REQFLAG_OTAG)) #define XS_SETERR(ccb, v) (ccb)->ccb_h.status &= ~CAM_STATUS_MASK, \ (ccb)->ccb_h.status |= v # define HBA_NOERROR CAM_REQ_INPROG # define HBA_BOTCH CAM_UNREC_HBA_ERROR # define HBA_CMDTIMEOUT CAM_CMD_TIMEOUT # define HBA_SELTIMEOUT CAM_SEL_TIMEOUT # define HBA_TGTBSY CAM_SCSI_STATUS_ERROR # define HBA_BUSRESET CAM_SCSI_BUS_RESET # define HBA_ABORTED CAM_REQ_ABORTED # define HBA_DATAOVR CAM_DATA_RUN_ERR # define HBA_ARQFAIL CAM_AUTOSENSE_FAIL #define XS_ERR(ccb) ((ccb)->ccb_h.status & CAM_STATUS_MASK) #define XS_NOERR(ccb) (((ccb)->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) #define XS_INITERR(ccb) XS_SETERR(ccb, CAM_REQ_INPROG), ccb->sense_resid = ccb->sense_len #define XS_SAVE_SENSE(xs, sense_ptr, totslen, slen) do { \ uint32_t tlen = slen; \ if (tlen > (xs)->sense_len) \ tlen = (xs)->sense_len; \ PISP_PCMD(xs)->totslen = imin((xs)->sense_len, totslen); \ PISP_PCMD(xs)->cumslen = tlen; \ memcpy(&(xs)->sense_data, sense_ptr, tlen); \ (xs)->sense_resid = (xs)->sense_len - tlen; \ (xs)->ccb_h.status |= CAM_AUTOSNS_VALID; \ } while (0) #define XS_SENSE_APPEND(xs, xsnsp, xsnsl) do { \ uint32_t off = PISP_PCMD(xs)->cumslen; \ uint8_t *ptr = &((uint8_t *)(&(xs)->sense_data))[off]; \ uint32_t amt = imin(xsnsl, PISP_PCMD(xs)->totslen - off); \ if (amt) { \ memcpy(ptr, xsnsp, amt); \ (xs)->sense_resid -= amt; \ PISP_PCMD(xs)->cumslen += amt; \ } \ } while (0) #define XS_SENSE_VALID(xs) (((xs)->ccb_h.status & CAM_AUTOSNS_VALID) != 0) #define DEFAULT_FRAMESIZE(isp) isp->isp_osinfo.framesize #define DEFAULT_EXEC_THROTTLE(isp) isp->isp_osinfo.exec_throttle #define DEFAULT_ROLE(isp, chan) \ (IS_FC(isp)? ISP_FC_PC(isp, chan)->def_role : ISP_ROLE_INITIATOR) #define DEFAULT_IID(isp, chan) isp->isp_osinfo.pc.spi[chan].iid #define DEFAULT_LOOPID(x, chan) isp->isp_osinfo.pc.fc[chan].default_id #define DEFAULT_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 0, 1) #define DEFAULT_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 0, 0) #define ACTIVE_NODEWWN(isp, chan) isp_default_wwn(isp, chan, 1, 1) #define ACTIVE_PORTWWN(isp, chan) isp_default_wwn(isp, chan, 1, 0) #if BYTE_ORDER == BIG_ENDIAN #ifdef ISP_SBUS_SUPPORTED #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap16(s) #define ISP_IOXPUT_32(isp, s, d) \ *(d) = (isp->isp_bustype == ISP_BT_SBUS)? s : bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint16_t *)s) : bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) \ d = (isp->isp_bustype == ISP_BT_SBUS)? \ *((uint32_t *)s) : bswap32(*((uint32_t *)s)) #else /* ISP_SBUS_SUPPORTED */ #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOXPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOXGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOXGET_16(isp, s, d) d = bswap16(*((uint16_t *)s)) #define ISP_IOXGET_32(isp, s, d) d = bswap32(*((uint32_t *)s)) #endif #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) *rp = bswap16(*rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) *rp = bswap32(*rp) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)s)) #define ISP_IOZGET_16(isp, s, d) d = (*((uint16_t *)s)) #define ISP_IOZGET_32(isp, s, d) d = (*((uint32_t *)s)) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = s #define ISP_IOZPUT_32(isp, s, d) *(d) = s #else #define ISP_IOXPUT_8(isp, s, d) *(d) = s #define ISP_IOXPUT_16(isp, s, d) *(d) = s #define ISP_IOXPUT_32(isp, s, d) *(d) = s #define ISP_IOXGET_8(isp, s, d) d = *(s) #define ISP_IOXGET_16(isp, s, d) d = *(s) #define ISP_IOXGET_32(isp, s, d) d = *(s) #define ISP_SWIZZLE_NVRAM_WORD(isp, rp) #define ISP_SWIZZLE_NVRAM_LONG(isp, rp) #define ISP_IOZPUT_8(isp, s, d) *(d) = s #define ISP_IOZPUT_16(isp, s, d) *(d) = bswap16(s) #define ISP_IOZPUT_32(isp, s, d) *(d) = bswap32(s) #define ISP_IOZGET_8(isp, s, d) d = (*((uint8_t *)(s))) #define ISP_IOZGET_16(isp, s, d) d = bswap16(*((uint16_t *)(s))) #define ISP_IOZGET_32(isp, s, d) d = bswap32(*((uint32_t *)(s))) #endif #define ISP_SWAP16(isp, s) bswap16(s) #define ISP_SWAP32(isp, s) bswap32(s) /* * Includes of common header files */ #include #include #include /* * isp_osinfo definiitions && shorthand */ #define SIMQFRZ_RESOURCE 0x1 #define SIMQFRZ_LOOPDOWN 0x2 #define SIMQFRZ_TIMED 0x4 #define isp_dev isp_osinfo.dev /* * prototypes for isp_pci && isp_freebsd to share */ extern int isp_attach(ispsoftc_t *); extern int isp_detach(ispsoftc_t *); extern void isp_uninit(ispsoftc_t *); extern uint64_t isp_default_wwn(ispsoftc_t *, int, int, int); /* * driver global data */ extern int isp_announced; extern int isp_fabric_hysteresis; extern int isp_loop_down_limit; extern int isp_gone_device_time; extern int isp_quickboot_time; /* * Platform private flags */ /* * Platform Library Functions */ void isp_prt(ispsoftc_t *, int level, const char *, ...) __printflike(3, 4); void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) __printflike(4, 5); uint64_t isp_nanotime_sub(struct timespec *, struct timespec *); int isp_mbox_acquire(ispsoftc_t *); void isp_mbox_wait_complete(ispsoftc_t *, mbreg_t *); void isp_mbox_notify_done(ispsoftc_t *); void isp_mbox_release(ispsoftc_t *); int isp_fc_scratch_acquire(ispsoftc_t *, int); int isp_mstohz(int); void isp_platform_intr(void *); void isp_common_dmateardown(ispsoftc_t *, struct ccb_scsiio *, uint32_t); void isp_fcp_reset_crn(ispsoftc_t *, int, uint32_t, int); int isp_fcp_next_crn(ispsoftc_t *, uint8_t *, XS_T *); /* * Platform Version specific defines */ #define BUS_DMA_ROOTARG(x) bus_get_dma_tag(x) #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ busdma_lock_mutex, &isp->isp_osinfo.lock, z) #define isp_setup_intr bus_setup_intr #define isp_sim_alloc(a, b, c, d, e, f, g, h) \ cam_sim_alloc(a, b, c, d, e, &(d)->isp_osinfo.lock, f, g, h) #define ISP_PATH_PRT(i, l, p, ...) \ if ((l) == ISP_LOGALL || ((l)& (i)->isp_dblev) != 0) { \ xpt_print(p, __VA_ARGS__); \ } /* * Platform specific inline functions */ /* * ISP General Library functions */ #include #endif /* _ISP_FREEBSD_H */ Index: head/sys/dev/isp/isp_pci.c =================================================================== --- head/sys/dev/isp/isp_pci.c (revision 291220) +++ head/sys/dev/isp/isp_pci.c (revision 291221) @@ -1,2028 +1,2008 @@ /*- * Copyright (c) 1997-2008 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. * FreeBSD Version. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __sparc64__ #include #include #endif #include static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_pci_mbxdma(ispsoftc_t *); static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); static void isp_pci_reset0(ispsoftc_t *); static void isp_pci_reset1(ispsoftc_t *); static void isp_pci_dumpregs(ispsoftc_t *, const char *); static struct ispmdvec mdvec = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_1080 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_12160 = { isp_pci_rd_isr, isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_2100 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2200 = { isp_pci_rd_isr, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2300 = { isp_pci_rd_isr_2300, isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2400 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2400, isp_pci_wr_reg_2400, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; static struct ispmdvec mdvec_2500 = { isp_pci_rd_isr_2400, isp_pci_rd_reg_2400, isp_pci_wr_reg_2400, isp_pci_mbxdma, isp_pci_dmasetup, isp_common_dmateardown, isp_pci_reset0, isp_pci_reset1, NULL }; #ifndef PCIM_CMD_INVEN #define PCIM_CMD_INVEN 0x10 #endif #ifndef PCIM_CMD_BUSMASTEREN #define PCIM_CMD_BUSMASTEREN 0x0004 #endif #ifndef PCIM_CMD_PERRESPEN #define PCIM_CMD_PERRESPEN 0x0040 #endif #ifndef PCIM_CMD_SEREN #define PCIM_CMD_SEREN 0x0100 #endif #ifndef PCIM_CMD_INTX_DISABLE #define PCIM_CMD_INTX_DISABLE 0x0400 #endif #ifndef PCIR_COMMAND #define PCIR_COMMAND 0x04 #endif #ifndef PCIR_CACHELNSZ #define PCIR_CACHELNSZ 0x0c #endif #ifndef PCIR_LATTIMER #define PCIR_LATTIMER 0x0d #endif #ifndef PCIR_ROMADDR #define PCIR_ROMADDR 0x30 #endif #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1020 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1080 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP10160 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP12160 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1240 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1280 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2100 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2300 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2312 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2322 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2422 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2432 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2532 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP6312 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP6322 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP5432 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 #endif #define PCI_QLOGIC_ISP5432 \ ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1020 \ ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1080 \ ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP10160 \ ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP12160 \ ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1240 \ ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1280 \ ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2100 \ ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2200 \ ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2300 \ ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2312 \ ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2322 \ ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2422 \ ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2432 \ ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2532 \ ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP6312 \ ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP6322 \ ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) /* * Odd case for some AMI raid cards... We need to *not* attach to this. */ #define AMI_RAID_SUBVENDOR_ID 0x101e #define IO_MAP_REG 0x10 #define MEM_MAP_REG 0x14 #define PCI_DFLT_LTNCY 0x40 #define PCI_DFLT_LNSZ 0x10 static int isp_pci_probe (device_t); static int isp_pci_attach (device_t); static int isp_pci_detach (device_t); #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev struct isp_pcisoftc { ispsoftc_t pci_isp; device_t pci_dev; struct resource * regs; void * irq; int iqd; int rtp; int rgd; void * ih; int16_t pci_poff[_NREG_BLKS]; bus_dma_tag_t dmat; int msicount; }; static device_method_t isp_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_pci_probe), DEVMETHOD(device_attach, isp_pci_attach), DEVMETHOD(device_detach, isp_pci_detach), { 0, 0 } }; static driver_t isp_pci_driver = { "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); MODULE_DEPEND(isp, cam, 1, 1, 1); MODULE_DEPEND(isp, firmware, 1, 1, 1); static int isp_nvports = 0; static int isp_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP1020: device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1080: device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1240: device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1280: device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP10160: device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP12160: if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { return (ENXIO); } device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP2100: device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2200: device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2300: device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2312: device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2322: device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2422: device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2432: device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2532: device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP5432: device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP6312: device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP6322: device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); break; default: return (ENXIO); } if (isp_announced == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); isp_announced++; } /* * XXXX: Here is where we might load the f/w module * XXXX: (or increase a reference count to it). */ return (BUS_PROBE_DEFAULT); } static void isp_get_generic_options(device_t dev, ispsoftc_t *isp) { int tval; - /* - * Figure out if we're supposed to skip this one. - */ tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { - device_printf(dev, "disabled at user request\n"); - isp->isp_osinfo.disabled = 1; - return; - } - - tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NONVRAM; } tval = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); if (tval) { isp->isp_dblev = tval; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) { isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; } tval = -1; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); if (tval > 0 && tval <= 254) { isp_nvports = tval; } tval = 7; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); isp_quickboot_time = tval; } static void isp_get_pci_options(device_t dev, int *m1, int *m2) { int tval; /* * Which we should try first - memory mapping or i/o mapping? * * We used to try memory first followed by i/o on alpha, otherwise * the reverse, but we should just try memory first all the time now. */ *m1 = PCIM_CMD_MEMEN; *m2 = PCIM_CMD_PORTEN; tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { *m1 = PCIM_CMD_PORTEN; *m2 = PCIM_CMD_MEMEN; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { *m1 = PCIM_CMD_MEMEN; *m2 = PCIM_CMD_PORTEN; } } static void isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) { const char *sptr; int tval = 0; char prefix[12], name[16]; if (chan == 0) prefix[0] = 0; else snprintf(prefix, sizeof(prefix), "chan%d.", chan); snprintf(name, sizeof(name), "%siid", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval)) { if (IS_FC(isp)) { ISP_FC_PC(isp, chan)->default_id = 109 - chan; } else { #ifdef __sparc64__ ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); #else ISP_SPI_PC(isp, chan)->iid = 7; #endif } } else { if (IS_FC(isp)) { ISP_FC_PC(isp, chan)->default_id = tval - chan; } else { ISP_SPI_PC(isp, chan)->iid = tval; } isp->isp_confopts |= ISP_CFG_OWNLOOPID; } if (IS_SCSI(isp)) return; tval = -1; snprintf(name, sizeof(name), "%srole", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval) == 0) { switch (tval) { case ISP_ROLE_NONE: case ISP_ROLE_INITIATOR: case ISP_ROLE_TARGET: case ISP_ROLE_BOTH: device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); break; default: tval = -1; break; } } if (tval == -1) { tval = ISP_DEFAULT_ROLES; } ISP_FC_PC(isp, chan)->def_role = tval; tval = 0; snprintf(name, sizeof(name), "%sfullduplex", prefix); if (resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; } sptr = 0; snprintf(name, sizeof(name), "%stopology", prefix); if (resource_string_value(device_get_name(dev), device_get_unit(dev), name, (const char **) &sptr) == 0 && sptr != 0) { if (strcmp(sptr, "lport") == 0) { isp->isp_confopts |= ISP_CFG_LPORT; } else if (strcmp(sptr, "nport") == 0) { isp->isp_confopts |= ISP_CFG_NPORT; } else if (strcmp(sptr, "lport-only") == 0) { isp->isp_confopts |= ISP_CFG_LPORT_ONLY; } else if (strcmp(sptr, "nport-only") == 0) { isp->isp_confopts |= ISP_CFG_NPORT_ONLY; } } tval = 0; snprintf(name, sizeof(name), "%snofctape", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval) { isp->isp_confopts |= ISP_CFG_NOFCTAPE; } tval = 0; snprintf(name, sizeof(name), "%sfctape", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval) { isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; isp->isp_confopts |= ISP_CFG_FCTAPE; } /* * Because the resource_*_value functions can neither return * 64 bit integer values, nor can they be directly coerced * to interpret the right hand side of the assignment as * you want them to interpret it, we have to force WWN * hint replacement to specify WWN strings with a leading * 'w' (e..g w50000000aaaa0001). Sigh. */ sptr = 0; snprintf(name, sizeof(name), "%sportwwn", prefix); tval = resource_string_value(device_get_name(dev), device_get_unit(dev), name, (const char **) &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { device_printf(dev, "mangled portwwn hint '%s'\n", sptr); ISP_FC_PC(isp, chan)->def_wwpn = 0; } } sptr = 0; snprintf(name, sizeof(name), "%snodewwn", prefix); tval = resource_string_value(device_get_name(dev), device_get_unit(dev), name, (const char **) &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); ISP_FC_PC(isp, chan)->def_wwnn = 0; } } tval = 0; snprintf(name, sizeof(name), "%shysteresis", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "name", &tval); if (tval >= 0 && tval < 256) { ISP_FC_PC(isp, chan)->hysteresis = tval; } else { ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; } tval = -1; snprintf(name, sizeof(name), "%sloop_down_limit", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval >= 0 && tval < 0xffff) { ISP_FC_PC(isp, chan)->loop_down_limit = tval; } else { ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; } tval = -1; snprintf(name, sizeof(name), "%sgone_device_time", prefix); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), name, &tval); if (tval >= 0 && tval < 0xffff) { ISP_FC_PC(isp, chan)->gone_device_time = tval; } else { ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; } } static int isp_pci_attach(device_t dev) { int i, m1, m2, locksetup = 0; uint32_t data, cmd, linesz, did; struct isp_pcisoftc *pcs; ispsoftc_t *isp; size_t psize, xsize; char fwname[32]; pcs = device_get_softc(dev); if (pcs == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(pcs, 0, sizeof (*pcs)); pcs->pci_dev = dev; isp = &pcs->pci_isp; isp->isp_dev = dev; isp->isp_nchan = 1; if (sizeof (bus_addr_t) > 4) isp->isp_osinfo.sixtyfourbit = 1; /* * Get Generic Options */ isp_nvports = 0; isp_get_generic_options(dev, isp); - - /* - * Check to see if options have us disabled - */ - if (isp->isp_osinfo.disabled) { - /* - * But return zero to preserve unit numbering - */ - return (0); - } /* * Get PCI options- which in this case are just mapping preferences. */ isp_get_pci_options(dev, &m1, &m2); linesz = PCI_DFLT_LNSZ; pcs->irq = pcs->regs = NULL; pcs->rgd = pcs->rtp = pcs->iqd = 0; pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); if (pcs->regs == NULL) { pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); } if (pcs->regs == NULL) { device_printf(dev, "unable to map any ports\n"); goto bad; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory"); } isp->isp_bus_tag = rman_get_bustag(pcs->regs); isp->isp_bus_handle = rman_get_bushandle(pcs->regs); pcs->pci_dev = dev; pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; switch (pci_get_devid(dev)) { case PCI_QLOGIC_ISP1020: did = 0x1040; isp->isp_mdvec = &mdvec; isp->isp_type = ISP_HA_SCSI_UNKNOWN; break; case PCI_QLOGIC_ISP1080: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1080; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP1240: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1240; isp->isp_nchan = 2; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP1280: did = 0x1080; isp->isp_mdvec = &mdvec_1080; isp->isp_type = ISP_HA_SCSI_1280; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP10160: did = 0x12160; isp->isp_mdvec = &mdvec_12160; isp->isp_type = ISP_HA_SCSI_10160; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP12160: did = 0x12160; isp->isp_nchan = 2; isp->isp_mdvec = &mdvec_12160; isp->isp_type = ISP_HA_SCSI_12160; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; break; case PCI_QLOGIC_ISP2100: did = 0x2100; isp->isp_mdvec = &mdvec_2100; isp->isp_type = ISP_HA_FC_2100; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; if (pci_get_revid(dev) < 3) { /* * XXX: Need to get the actual revision * XXX: number of the 2100 FB. At any rate, * XXX: lower cache line size for early revision * XXX; boards. */ linesz = 1; } break; case PCI_QLOGIC_ISP2200: did = 0x2200; isp->isp_mdvec = &mdvec_2200; isp->isp_type = ISP_HA_FC_2200; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; break; case PCI_QLOGIC_ISP2300: did = 0x2300; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2300; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2312: case PCI_QLOGIC_ISP6312: did = 0x2300; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2312; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2322: case PCI_QLOGIC_ISP6322: did = 0x2322; isp->isp_mdvec = &mdvec_2300; isp->isp_type = ISP_HA_FC_2322; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; break; case PCI_QLOGIC_ISP2422: case PCI_QLOGIC_ISP2432: did = 0x2400; isp->isp_nchan += isp_nvports; isp->isp_mdvec = &mdvec_2400; isp->isp_type = ISP_HA_FC_2400; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; case PCI_QLOGIC_ISP2532: did = 0x2500; isp->isp_nchan += isp_nvports; isp->isp_mdvec = &mdvec_2500; isp->isp_type = ISP_HA_FC_2500; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; case PCI_QLOGIC_ISP5432: did = 0x2500; isp->isp_mdvec = &mdvec_2500; isp->isp_type = ISP_HA_FC_2500; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; break; default: device_printf(dev, "unknown device type\n"); goto bad; break; } isp->isp_revision = pci_get_revid(dev); if (IS_FC(isp)) { psize = sizeof (fcparam); xsize = sizeof (struct isp_fc); } else { psize = sizeof (sdparam); xsize = sizeof (struct isp_spi); } psize *= isp->isp_nchan; xsize *= isp->isp_nchan; isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_param == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_osinfo.pc.ptr == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } /* * Now that we know who we are (roughly) get/set specific options */ for (i = 0; i < isp->isp_nchan; i++) { isp_get_specific_options(dev, i, isp); } isp->isp_osinfo.fw = NULL; if (isp->isp_osinfo.fw == NULL) { snprintf(fwname, sizeof (fwname), "isp_%04x", did); isp->isp_osinfo.fw = firmware_get(fwname); } if (isp->isp_osinfo.fw != NULL) { isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; if (IS_2300(isp)) { /* per QLogic errata */ cmd &= ~PCIM_CMD_INVEN; } if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { cmd &= ~PCIM_CMD_INTX_DISABLE; } if (IS_24XX(isp)) { cmd &= ~PCIM_CMD_INTX_DISABLE; } pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure the Cache Line Size register is set sensibly. */ data = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); data = linesz; pci_write_config(dev, PCIR_CACHELNSZ, data, 1); } /* * Make sure the Latency Timer is sane. */ data = pci_read_config(dev, PCIR_LATTIMER, 1); if (data < PCI_DFLT_LTNCY) { data = PCI_DFLT_LTNCY; isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); pci_write_config(dev, PCIR_LATTIMER, data, 1); } /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_ROMADDR, 4); data &= ~1; pci_write_config(dev, PCIR_ROMADDR, data, 4); /* * Do MSI * * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) */ if (IS_24XX(isp) || IS_2322(isp)) { pcs->msicount = pci_msi_count(dev); if (pcs->msicount > 1) { pcs->msicount = 1; } if (pci_alloc_msi(dev, &pcs->msicount) == 0) { pcs->iqd = 1; } else { pcs->iqd = 0; } } pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); if (pcs->irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); locksetup++; if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* * Last minute checks... */ if (IS_23XX(isp) || IS_24XX(isp)) { isp->isp_port = pci_get_function(dev); } /* * Make sure we're in reset state. */ ISP_LOCK(isp); if (isp_reinit(isp, 1) != 0) { ISP_UNLOCK(isp); goto bad; } ISP_UNLOCK(isp); if (isp_attach(isp)) { ISP_LOCK(isp); isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } return (0); bad: if (pcs->ih) { (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); } if (locksetup) { mtx_destroy(&isp->isp_osinfo.lock); } if (pcs->irq) { (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); } if (pcs->msicount) { pci_release_msi(dev); } if (pcs->regs) { (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); } if (pcs->pci_isp.isp_param) { free(pcs->pci_isp.isp_param, M_DEVBUF); pcs->pci_isp.isp_param = NULL; } if (pcs->pci_isp.isp_osinfo.pc.ptr) { free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); pcs->pci_isp.isp_osinfo.pc.ptr = NULL; } return (ENXIO); } static int isp_pci_detach(device_t dev) { struct isp_pcisoftc *pcs; ispsoftc_t *isp; int status; pcs = device_get_softc(dev); if (pcs == NULL) { return (ENXIO); } isp = (ispsoftc_t *) pcs; status = isp_detach(isp); if (status) return (status); ISP_LOCK(isp); isp_uninit(isp); if (pcs->ih) { (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); } ISP_UNLOCK(isp); mtx_destroy(&isp->isp_osinfo.lock); (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); if (pcs->msicount) { pci_release_msi(dev); } (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); /* * XXX: THERE IS A LOT OF LEAKAGE HERE */ if (pcs->pci_isp.isp_param) { free(pcs->pci_isp.isp_param, M_DEVBUF); pcs->pci_isp.isp_param = NULL; } if (pcs->pci_isp.isp_osinfo.pc.ptr) { free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); pcs->pci_isp.isp_osinfo.pc.ptr = NULL; } return (0); } #define IspVirt2Off(a, x) \ (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ _BLK_REG_SHFT] + ((x) & 0xfff)) #define BXR2(isp, off) \ bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) #define BXW2(isp, off, v) \ bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) #define BXR4(isp, off) \ bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) #define BXW4(isp, off, v) \ bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) static ISP_INLINE int isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) { uint32_t val0, val1; int i = 0; do { val0 = BXR2(isp, IspVirt2Off(isp, off)); val1 = BXR2(isp, IspVirt2Off(isp, off)); } while (val0 != val1 && ++i < 1000); if (val0 != val1) { return (1); } *rp = val0; return (0); } static int isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint16_t isr, sema; if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { return (0); } if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { return (0); } } else { isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); } isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; if (isr == 0 && sema == 0) { return (0); } *isrp = isr; if ((*semap = sema) != 0) { if (IS_2100(isp)) { if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { return (0); } } else { *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); } } return (1); } static int isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint32_t hccr, r2hisr; if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { *isrp = 0; return (0); } r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { case ISPR2HST_ROM_MBX_OK: case ISPR2HST_ROM_MBX_FAIL: case ISPR2HST_MBX_OK: case ISPR2HST_MBX_FAIL: case ISPR2HST_ASYNC_EVENT: *semap = 1; break; case ISPR2HST_RIO_16: *info = ASYNC_RIO16_1; *semap = 1; return (1); case ISPR2HST_FPOST: *info = ASYNC_CMD_CMPLT; *semap = 1; return (1); case ISPR2HST_FPOST_CTIO: *info = ASYNC_CTIO_DONE; *semap = 1; return (1); case ISPR2HST_RSPQ_UPDATE: *semap = 0; break; default: hccr = ISP_READ(isp, HCCR); if (hccr & HCCR_PAUSE) { ISP_WRITE(isp, HCCR, HCCR_RESET); isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); ISP_WRITE(isp, BIU_ICR, 0); } else { isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); } return (0); } *info = (r2hisr >> 16); return (1); } static int isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint32_t r2hisr; r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); if ((r2hisr & BIU_R2HST_INTR) == 0) { *isrp = 0; return (0); } switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { case ISPR2HST_ROM_MBX_OK: case ISPR2HST_ROM_MBX_FAIL: case ISPR2HST_MBX_OK: case ISPR2HST_MBX_FAIL: case ISPR2HST_ASYNC_EVENT: *semap = 1; break; case ISPR2HST_RSPQ_UPDATE: case ISPR2HST_RSPQ_UPDATE2: case ISPR2HST_ATIO_UPDATE: case ISPR2HST_ATIO_RSPQ_UPDATE: case ISPR2HST_ATIO_UPDATE2: *semap = 0; break; default: ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); return (0); } *info = (r2hisr >> 16); return (1); } static uint32_t isp_pci_rd_reg(ispsoftc_t *isp, int regoff) { uint16_t rv; int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } return (rv); } static void isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) { int oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } } static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) { uint32_t rv, oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } rv = BXR2(isp, IspVirt2Off(isp, regoff)); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } return (rv); } static void isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) { int oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { uint32_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc | BIU_PCI1080_CONF1_DMA); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); if (oc) { BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); } } static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) { uint32_t rv; int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: return (BXR2(isp, IspVirt2Off(isp, regoff))); case SXP_BLOCK: isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); return (0xffffffff); case RISC_BLOCK: isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); return (0xffffffff); case DMA_BLOCK: isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); return (0xffffffff); default: isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); return (0xffffffff); } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSLO: rv = BXR4(isp, IspVirt2Off(isp, regoff)); break; case BIU2400_R2HSTSHI: rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; break; default: isp_prt(isp, ISP_LOGERR, "isp_pci_rd_reg_2400: unknown offset %x", regoff); rv = 0xffffffff; break; } return (rv); } static void isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) { int block = regoff & _BLK_REG_MASK; switch (block) { case BIU_BLOCK: break; case MBOX_BLOCK: BXW2(isp, IspVirt2Off(isp, regoff), val); MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); return; case SXP_BLOCK: isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); return; case RISC_BLOCK: isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); return; case DMA_BLOCK: isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); return; default: isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", regoff); break; } switch (regoff) { case BIU2400_FLASH_ADDR: case BIU2400_FLASH_DATA: case BIU2400_ICR: case BIU2400_ISR: case BIU2400_CSR: case BIU2400_REQINP: case BIU2400_REQOUTP: case BIU2400_RSPINP: case BIU2400_RSPOUTP: case BIU2400_PRI_REQINP: case BIU2400_PRI_REQOUTP: case BIU2400_ATIO_RSPINP: case BIU2400_ATIO_RSPOUTP: case BIU2400_HCCR: case BIU2400_GPIOD: case BIU2400_GPIOE: case BIU2400_HSEMA: BXW4(isp, IspVirt2Off(isp, regoff), val); #ifdef MEMORYBARRIERW if (regoff == BIU2400_REQINP || regoff == BIU2400_RSPOUTP || regoff == BIU2400_PRI_REQINP || regoff == BIU2400_ATIO_RSPOUTP) MEMORYBARRIERW(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1) else #endif MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); break; default: isp_prt(isp, ISP_LOGERR, "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); break; } } struct imush { ispsoftc_t *isp; caddr_t vbase; int chan; int error; }; static void imc(void *, bus_dma_segment_t *, int, int); static void imc1(void *, bus_dma_segment_t *, int, int); static void imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; isp_ecmd_t *ecmd; if (error) { imushp->error = error; return; } if (nseg != 1) { imushp->error = EINVAL; return; } isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); imushp->isp->isp_rquest = imushp->vbase; imushp->isp->isp_rquest_dma = segs->ds_addr; segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); imushp->isp->isp_result_dma = segs->ds_addr; imushp->isp->isp_result = imushp->vbase; segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); if (imushp->isp->isp_type >= ISP_HA_FC_2200) { imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr; imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase; imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free; for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) { ecmd->next = NULL; } else { ecmd->next = ecmd + 1; } } } #ifdef ISP_TARGET_MODE segs->ds_addr += (N_XCMDS * XCMD_SIZE); imushp->vbase += (N_XCMDS * XCMD_SIZE); if (IS_24XX(imushp->isp)) { imushp->isp->isp_atioq_dma = segs->ds_addr; imushp->isp->isp_atioq = imushp->vbase; } #endif } static void imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; return; } if (nseg != 1) { imushp->error = EINVAL; return; } isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; } static int isp_pci_mbxdma(ispsoftc_t *isp) { caddr_t base; uint32_t len, nsegs; int i, error, cmap = 0; bus_size_t slim; /* segment size */ bus_addr_t llim; /* low limit of unavailable dma */ bus_addr_t hlim; /* high limit of unavailable dma */ struct imush im; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } ISP_UNLOCK(isp); if (isp->isp_maxcmds == 0) { isp_prt(isp, ISP_LOGERR, "maxcmds not set"); ISP_LOCK(isp); return (1); } hlim = BUS_SPACE_MAXADDR; if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { if (sizeof (bus_size_t) > 4) { slim = (bus_size_t) (1ULL << 32); } else { slim = (bus_size_t) (1UL << 31); } llim = BUS_SPACE_MAXADDR; } else { llim = BUS_SPACE_MAXADDR_32BIT; slim = (1UL << 24); } len = isp->isp_maxcmds * sizeof (struct isp_pcmd); isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_osinfo.pcmd_pool == NULL) { isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); ISP_LOCK(isp); return (1); } if (isp->isp_osinfo.sixtyfourbit) { nsegs = ISP_NSEG64_MAX; } else { nsegs = ISP_NSEG_MAX; } if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); return (1); } len = sizeof (isp_hdl_t) * isp->isp_maxcmds; isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_xflist == NULL) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); return (1); } for (len = 0; len < isp->isp_maxcmds - 1; len++) { isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; } isp->isp_xffree = isp->isp_xflist; #ifdef ISP_TARGET_MODE len = sizeof (isp_hdl_t) * isp->isp_maxcmds; isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_tgtlist == NULL) { free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); return (1); } for (len = 0; len < isp->isp_maxcmds - 1; len++) { isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; } isp->isp_tgtfree = isp->isp_tgtlist; #endif /* * Allocate and map the request and result queues (and ATIO queue * if we're a 2400 supporting target mode), and a region for * external dma addressable command/status structures (23XX and * later). */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); #ifdef ISP_TARGET_MODE if (IS_24XX(isp)) { len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); } #endif if (isp->isp_type >= ISP_HA_FC_2200) { len += (N_XCMDS * XCMD_SIZE); } /* * Create a tag for the control spaces. We don't always need this * to be 32 bits, but we do this for simplicity and speed's sake. */ if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) { isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif ISP_LOCK(isp); return (1); } if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif ISP_LOCK(isp); return (1); } im.isp = isp; im.chan = 0; im.vbase = base; im.error = 0; bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); goto bad; } if (IS_FC(isp)) { for (cmap = 0; cmap < isp->isp_nchan; cmap++) { struct isp_fc *fc = ISP_FC_PC(isp, cmap); if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { goto bad; } if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { bus_dma_tag_destroy(fc->tdmat); goto bad; } im.isp = isp; im.chan = cmap; im.vbase = base; im.error = 0; bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); if (im.error) { bus_dmamem_free(fc->tdmat, base, fc->tdmap); bus_dma_tag_destroy(fc->tdmat); goto bad; } if (!IS_2100(isp)) { for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); if (n == NULL) { while (fc->nexus_free_list) { n = fc->nexus_free_list; fc->nexus_free_list = n->next; free(n, M_DEVBUF); } goto bad; } n->next = fc->nexus_free_list; fc->nexus_free_list = n; } } } } for (i = 0; i < isp->isp_maxcmds; i++) { struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); if (error) { isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); while (--i >= 0) { bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); } goto bad; } callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); if (i == isp->isp_maxcmds-1) { pcmd->next = NULL; } else { pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; } } isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; ISP_LOCK(isp); return (0); bad: while (--cmap >= 0) { struct isp_fc *fc = ISP_FC_PC(isp, cmap); bus_dmamap_unload(fc->tdmat, fc->tdmap); bus_dmamem_free(fc->tdmat, base, fc->tdmap); bus_dma_tag_destroy(fc->tdmat); while (fc->nexus_free_list) { struct isp_nexus *n = fc->nexus_free_list; fc->nexus_free_list = n->next; free(n, M_DEVBUF); } } if (isp->isp_rquest_dma != 0) bus_dmamap_unload(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap); bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_xflist, M_DEVBUF); #ifdef ISP_TARGET_MODE free(isp->isp_tgtlist, M_DEVBUF); #endif free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); isp->isp_rquest = NULL; ISP_LOCK(isp); return (1); } typedef struct { ispsoftc_t *isp; void *cmd_token; void *rq; /* original request */ int error; bus_size_t mapsize; } mush_t; #define MUSHERR_NOQENTRIES -2 #ifdef ISP_TARGET_MODE static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); static void tdma2(void *, bus_dma_segment_t *, int, int); static void tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) { mush_t *mp; mp = (mush_t *)arg; mp->mapsize = mapsize; tdma2(arg, dm_segs, nseg, error); } static void tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; isp_ddir_t ddir; ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; if (nseg) { if (isp->isp_osinfo.sixtyfourbit) { if (nseg >= ISP_NSEG64_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); mp->error = EFAULT; return; } if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; } } else { if (nseg >= ISP_NSEG_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); mp->error = EFAULT; return; } } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); ddir = ISP_TO_DEVICE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); ddir = ISP_FROM_DEVICE; } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); switch (error) { case CMD_EAGAIN: mp->error = MUSHERR_NOQENTRIES; case CMD_QUEUED: break; default: mp->error = EIO; } } #endif static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); static void dma2(void *, bus_dma_segment_t *, int, int); static void dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) { mush_t *mp; mp = (mush_t *)arg; mp->mapsize = mapsize; dma2(arg, dm_segs, nseg, error); } static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; isp_ddir_t ddir; ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; if (nseg) { if (isp->isp_osinfo.sixtyfourbit) { if (nseg >= ISP_NSEG64_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); mp->error = EFAULT; return; } if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { rq->req_header.rqs_entry_type = RQSTYPE_A64; } } else { if (nseg >= ISP_NSEG_MAX) { isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); mp->error = EFAULT; return; } } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); ddir = ISP_FROM_DEVICE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); ddir = ISP_TO_DEVICE; } else { ddir = ISP_NOXFR; } } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); switch (error) { case CMD_EAGAIN: mp->error = MUSHERR_NOQENTRIES; break; case CMD_QUEUED: break; default: mp->error = EIO; break; } } static int isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) { mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); int error; mp = &mush; mp->isp = isp; mp->cmd_token = csio; mp->rq = ff; mp->error = 0; mp->mapsize = 0; #ifdef ISP_TARGET_MODE if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { eptr = tdma2; eptr2 = tdma2_2; } else #endif { eptr = dma2; eptr2 = dma2_2; } error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, (union ccb *)csio, eptr, mp, 0); if (error == EINPROGRESS) { bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); mp->error = EINVAL; isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { csio->ccb_h.status = CAM_REQ_TOO_BIG; } else if (mp->error == EINVAL) { csio->ccb_h.status = CAM_REQ_INVALID; } else { csio->ccb_h.status = CAM_UNREC_HBA_ERROR; } return (retval); } return (CMD_QUEUED); } static void isp_pci_reset0(ispsoftc_t *isp) { ISP_DISABLE_INTS(isp); } static void isp_pci_reset1(ispsoftc_t *isp) { if (!IS_24XX(isp)) { /* Make sure the BIOS is disabled */ isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); } /* and enable interrupts */ ISP_ENABLE_INTS(isp); } static void isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); if (IS_SCSI(isp)) printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); else printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); if (IS_SCSI(isp)) { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); printf(" PCI Status Command/Status=%x\n", pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); } Index: head/sys/dev/isp/isp_sbus.c =================================================================== --- head/sys/dev/isp/isp_sbus.c (revision 291220) +++ head/sys/dev/isp/isp_sbus.c (revision 291221) @@ -1,714 +1,701 @@ /*- * Copyright (c) 1997-2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * SBus specific probe and attach routines for Qlogic ISP SCSI adapters. * FreeBSD Version. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static uint32_t isp_sbus_rd_reg(ispsoftc_t *, int); static void isp_sbus_wr_reg(ispsoftc_t *, int, uint32_t); static int isp_sbus_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); static int isp_sbus_mbxdma(ispsoftc_t *); static int isp_sbus_dmasetup(ispsoftc_t *, XS_T *, void *); static void isp_sbus_reset0(ispsoftc_t *); static void isp_sbus_reset1(ispsoftc_t *); static void isp_sbus_dumpregs(ispsoftc_t *, const char *); static struct ispmdvec mdvec = { isp_sbus_rd_isr, isp_sbus_rd_reg, isp_sbus_wr_reg, isp_sbus_mbxdma, isp_sbus_dmasetup, isp_common_dmateardown, isp_sbus_reset0, isp_sbus_reset1, isp_sbus_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static int isp_sbus_probe (device_t); static int isp_sbus_attach (device_t); static int isp_sbus_detach (device_t); #define ISP_SBD(isp) ((struct isp_sbussoftc *)isp)->sbus_dev struct isp_sbussoftc { ispsoftc_t sbus_isp; device_t sbus_dev; struct resource * regs; void * irq; int iqd; int rgd; void * ih; int16_t sbus_poff[_NREG_BLKS]; sdparam sbus_param; struct isp_spi sbus_spi; struct ispmdvec sbus_mdvec; }; static device_method_t isp_sbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_sbus_probe), DEVMETHOD(device_attach, isp_sbus_attach), DEVMETHOD(device_detach, isp_sbus_detach), { 0, 0 } }; static driver_t isp_sbus_driver = { "isp", isp_sbus_methods, sizeof (struct isp_sbussoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, sbus, isp_sbus_driver, isp_devclass, 0, 0); MODULE_DEPEND(isp, cam, 1, 1, 1); MODULE_DEPEND(isp, firmware, 1, 1, 1); static int isp_sbus_probe(device_t dev) { int found = 0; const char *name = ofw_bus_get_name(dev); if (strcmp(name, "SUNW,isp") == 0 || strcmp(name, "QLGC,isp") == 0 || strcmp(name, "ptisp") == 0 || strcmp(name, "PTI,ptisp") == 0) { found++; } if (!found) return (ENXIO); if (isp_announced == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); isp_announced++; } return (0); } static int isp_sbus_attach(device_t dev) { int tval, isp_debug, role, ispburst, default_id; struct isp_sbussoftc *sbs; ispsoftc_t *isp = NULL; int locksetup = 0; int ints_setup = 0; sbs = device_get_softc(dev); if (sbs == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } sbs->sbus_dev = dev; sbs->sbus_mdvec = mdvec; - /* - * Figure out if we're supposed to skip this one. - * If we are, we actually go to ISP_ROLE_NONE. - */ - - tval = 0; - if (resource_int_value(device_get_name(dev), device_get_unit(dev), - "disable", &tval) == 0 && tval) { - device_printf(dev, "device is disabled\n"); - /* but return 0 so the !$)$)*!$*) unit isn't reused */ - return (0); - } - role = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &role) == 0 && ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { device_printf(dev, "setting role to 0x%x\n", role); } else { role = ISP_DEFAULT_ROLES; } sbs->irq = sbs->regs = NULL; sbs->rgd = sbs->iqd = 0; sbs->regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sbs->rgd, RF_ACTIVE); if (sbs->regs == NULL) { device_printf(dev, "unable to map registers\n"); goto bad; } sbs->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; sbs->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF; sbs->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF; sbs->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF; sbs->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; isp = &sbs->sbus_isp; isp->isp_bus_tag = rman_get_bustag(sbs->regs); isp->isp_bus_handle = rman_get_bushandle(sbs->regs); isp->isp_mdvec = &sbs->sbus_mdvec; isp->isp_bustype = ISP_BT_SBUS; isp->isp_type = ISP_HA_SCSI_UNKNOWN; isp->isp_param = &sbs->sbus_param; isp->isp_osinfo.pc.ptr = &sbs->sbus_spi; isp->isp_revision = 0; /* XXX */ isp->isp_dev = dev; isp->isp_nchan = 1; if (IS_FC(isp)) ISP_FC_PC(isp, 0)->def_role = role; /* * Get the clock frequency and convert it from HZ to MHz, * rounding up. This defaults to 25MHz if there isn't a * device specific one in the OFW device tree. */ sbs->sbus_mdvec.dv_clock = (sbus_get_clockfreq(dev) + 500000)/1000000; /* * Now figure out what the proper burst sizes, etc., to use. * Unfortunately, there is no ddi_dma_burstsizes here which * walks up the tree finding the limiting burst size node (if * any). We just use what's here for isp. */ ispburst = sbus_get_burstsz(dev); if (ispburst == 0) { ispburst = SBUS_BURST_32 - 1; } sbs->sbus_mdvec.dv_conf1 = 0; if (ispburst & (1 << 5)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32; } else if (ispburst & (1 << 4)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16; } else if (ispburst & (1 << 3)) { sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8; } if (sbs->sbus_mdvec.dv_conf1) { sbs->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE; } /* * We don't trust NVRAM on SBus cards */ isp->isp_confopts |= ISP_CFG_NONVRAM; /* * Mark things if we're a PTI SBus adapter. */ if (strcmp("PTI,ptisp", ofw_bus_get_name(dev)) == 0 || strcmp("ptisp", ofw_bus_get_name(dev)) == 0) { SDPARAM(isp, 0)->isp_ptisp = 1; } isp->isp_osinfo.fw = firmware_get("isp_1000"); if (isp->isp_osinfo.fw) { union { const void *cp; uint16_t *sp; } stupid; stupid.cp = isp->isp_osinfo.fw->data; isp->isp_mdvec->dv_ispfw = stupid.sp; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } default_id = -1; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval) == 0) { default_id = tval; isp->isp_confopts |= ISP_CFG_OWNLOOPID; } if (default_id == -1) { default_id = OF_getscsinitid(dev); } ISP_SPI_PC(isp, 0)->iid = default_id; isp_debug = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &isp_debug); /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); locksetup++; sbs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sbs->iqd, RF_ACTIVE | RF_SHAREABLE); if (sbs->irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } if (isp_setup_intr(dev, sbs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &sbs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } ints_setup++; /* * Set up logging levels. */ if (isp_debug) { isp->isp_dblev = isp_debug; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) { isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; } /* * Make sure we're in reset state. */ ISP_LOCK(isp); if (isp_reinit(isp, 1) != 0) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } ISP_UNLOCK(isp); if (isp_attach(isp)) { ISP_LOCK(isp); isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } return (0); bad: if (sbs && ints_setup) { (void) bus_teardown_intr(dev, sbs->irq, sbs->ih); } if (sbs && sbs->irq) { bus_release_resource(dev, SYS_RES_IRQ, sbs->iqd, sbs->irq); } if (locksetup && isp) { mtx_destroy(&isp->isp_osinfo.lock); } if (sbs->regs) { (void) bus_release_resource(dev, SYS_RES_MEMORY, sbs->rgd, sbs->regs); } return (ENXIO); } static int isp_sbus_detach(device_t dev) { struct isp_sbussoftc *sbs; ispsoftc_t *isp; int status; sbs = device_get_softc(dev); if (sbs == NULL) { return (ENXIO); } isp = (ispsoftc_t *) sbs; status = isp_detach(isp); if (status) return (status); ISP_LOCK(isp); isp_uninit(isp); if (sbs->ih) { (void) bus_teardown_intr(dev, sbs->irq, sbs->ih); } ISP_UNLOCK(isp); mtx_destroy(&isp->isp_osinfo.lock); (void) bus_release_resource(dev, SYS_RES_IRQ, sbs->iqd, sbs->irq); (void) bus_release_resource(dev, SYS_RES_MEMORY, sbs->rgd, sbs->regs); return (0); } #define IspVirt2Off(a, x) \ (((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \ _BLK_REG_SHFT] + ((x) & 0xff)) #define BXR2(sbc, off) \ bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) static int isp_sbus_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) { uint16_t isr, sema; isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR)); sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA)); isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; if (isr == 0 && sema == 0) { return (0); } *isrp = isr; if ((*semap = sema) != 0) *info = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0)); return (1); } static uint32_t isp_sbus_rd_reg(ispsoftc_t *isp, int regoff) { uint16_t rval; struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); rval = bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, offset); isp_prt(isp, ISP_LOGDEBUG3, "isp_sbus_rd_reg(off %x) = %x", regoff, rval); return (rval); } static void isp_sbus_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) { struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); isp_prt(isp, ISP_LOGDEBUG3, "isp_sbus_wr_reg(off %x) = %x", regoff, val); bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, offset, val); MEMORYBARRIER(isp, SYNC_REG, offset, 2, -1); } struct imush { ispsoftc_t *isp; int error; }; static void imc(void *, bus_dma_segment_t *, int, int); static void imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; } else { ispsoftc_t *isp =imushp->isp; bus_addr_t addr = segs->ds_addr; isp->isp_rquest_dma = addr; addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); isp->isp_result_dma = addr; } } static int isp_sbus_mbxdma(ispsoftc_t *isp) { caddr_t base; uint32_t len; int i, error; struct imush im; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } ISP_UNLOCK(isp); len = sizeof (struct isp_pcmd) * isp->isp_maxcmds; isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_osinfo.pcmd_pool == NULL) { isp_prt(isp, ISP_LOGERR, "cannot alloc pcmd pool"); ISP_LOCK(isp); return (1); } len = sizeof (isp_hdl_t *) * isp->isp_maxcmds; isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_xflist == NULL) { isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); ISP_LOCK(isp); return (1); } for (len = 0; len < isp->isp_maxcmds - 1; len++) { isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; } isp->isp_xffree = isp->isp_xflist; len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_SBD(isp)), 1, BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, ISP_NSEG_MAX, BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.dmat)) { isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return(1); } /* * Allocate and map the request, result queues, plus FC scratch area. */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL, len, 1, BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.cdmat)) { isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return (1); } if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); free(isp->isp_xflist, M_DEVBUF); ISP_LOCK(isp); return (1); } for (i = 0; i < isp->isp_maxcmds; i++) { struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); if (error) { isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); while (--i >= 0) { bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); } goto bad; } callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); if (i == isp->isp_maxcmds-1) { pcmd->next = NULL; } else { pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; } } isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; im.isp = isp; im.error = 0; bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); goto bad; } isp->isp_rquest = base; base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); isp->isp_result = base; ISP_LOCK(isp); return (0); bad: bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); bus_dma_tag_destroy(isp->isp_osinfo.cdmat); free(isp->isp_xflist, M_DEVBUF); free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); isp->isp_rquest = NULL; ISP_LOCK(isp); return (1); } typedef struct { ispsoftc_t *isp; void *cmd_token; void *rq; /* original request */ int error; bus_size_t mapsize; } mush_t; #define MUSHERR_NOQENTRIES -2 static void dma2(void *, bus_dma_segment_t *, int, int); static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; ispsoftc_t *isp; struct ccb_scsiio *csio; isp_ddir_t ddir; ispreq_t *rq; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; isp = mp->isp; rq = mp->rq; if (nseg) { if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); ddir = ISP_FROM_DEVICE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); ddir = ISP_TO_DEVICE; } else { ddir = ISP_NOXFR; } } else { dm_segs = NULL; nseg = 0; ddir = ISP_NOXFR; } if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, NULL) != CMD_QUEUED) { mp->error = MUSHERR_NOQENTRIES; } } static int isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) { mush_t mush, *mp; void (*eptr)(void *, bus_dma_segment_t *, int, int); int error; mp = &mush; mp->isp = isp; mp->cmd_token = csio; mp->rq = ff; mp->error = 0; mp->mapsize = 0; eptr = dma2; error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, (union ccb *)csio, eptr, mp, 0); if (error == EINPROGRESS) { bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); mp->error = EINVAL; isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { XS_SETERR(csio, CAM_REQ_TOO_BIG); } else if (mp->error == EINVAL) { XS_SETERR(csio, CAM_REQ_INVALID); } else { XS_SETERR(csio, CAM_UNREC_HBA_ERROR); } return (retval); } return (CMD_QUEUED); } static void isp_sbus_reset0(ispsoftc_t *isp) { ISP_DISABLE_INTS(isp); } static void isp_sbus_reset1(ispsoftc_t *isp) { ISP_ENABLE_INTS(isp); } static void isp_sbus_dumpregs(ispsoftc_t *isp, const char *msg) { if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); }