diff --git a/sys/dev/isp/isp.c b/sys/dev/isp/isp.c index 351caaefed97..7797a97a6fbb 100644 --- a/sys/dev/isp/isp.c +++ b/sys/dev/isp/isp.c @@ -1,5365 +1,5368 @@ /* $FreeBSD$ */ /* * Machine and OS Independent (well, as best as possible) * code for the Qlogic ISP SCSI adapters. * * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob * Feral Software * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Inspiration and ideas about this driver are from Erik Moe's Linux driver * (qlogicisp.c) and Dave Miller's SBus version of same (qlogicisp.c). Some * ideas dredged from the Solaris driver. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include #endif #ifdef __FreeBSD__ #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif /* * General defines */ #define MBOX_DELAY_COUNT 1000000 / 100 /* * Local static data */ static const char warnlun[] = "WARNING- cannot determine Expanded LUN capability- limiting to one LUN"; static const char portshift[] = "Target %d Loop ID 0x%x (Port 0x%x) => Loop 0x%x (Port 0x%x)"; static const char portdup[] = "Target %d duplicates Target %d- killing off both"; static const char retained[] = "Retaining Loop ID 0x%x for Target %d (Port 0x%x)"; static const char lretained[] = "Retained login of Target %d (Loop ID 0x%x) Port 0x%x"; static const char plogout[] = "Logging out Target %d at Loop ID 0x%x (Port 0x%x)"; static const char plogierr[] = "Command Error in PLOGI for Port 0x%x (0x%x)"; static const char nopdb[] = "Could not get PDB for Device @ Port 0x%x"; static const char pdbmfail1[] = "PDB Loop ID info for Device @ Port 0x%x does not match up (0x%x)"; static const char pdbmfail2[] = "PDB Port info for Device @ Port 0x%x does not match up (0x%x)"; static const char ldumped[] = "Target %d (Loop ID 0x%x) Port 0x%x dumped after login info mismatch"; static const char notresp[] = "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d"; static const char xact1[] = "HBA attempted queued transaction with disconnect not set for %d.%d.%d"; static const char xact2[] = "HBA attempted queued transaction to target routine %d on target %d bus %d"; static const char xact3[] = "HBA attempted queued cmd for %d.%d.%d when queueing disabled"; static const char pskip[] = "SCSI phase skipped for target %d.%d.%d"; static const char topology[] = "Loop ID %d, AL_PA 0x%x, Port ID 0x%x, Loop State 0x%x, Topology '%s'"; static const char finmsg[] = "(%d.%d.%d): FIN dl%d resid %d STS 0x%x SKEY %c XS_ERR=0x%x"; /* * Local function prototypes. */ static int isp_parse_async __P((struct ispsoftc *, int)); static int isp_handle_other_response __P((struct ispsoftc *, ispstatusreq_t *, u_int16_t *)); static void isp_parse_status __P((struct ispsoftc *, ispstatusreq_t *, XS_T *)); static void isp_fastpost_complete __P((struct ispsoftc *, u_int16_t)); static void isp_scsi_init __P((struct ispsoftc *)); static void isp_scsi_channel_init __P((struct ispsoftc *, int)); static void isp_fibre_init __P((struct ispsoftc *)); static void isp_mark_getpdb_all __P((struct ispsoftc *)); static int isp_getmap __P((struct ispsoftc *, fcpos_map_t *)); static int isp_getpdb __P((struct ispsoftc *, int, isp_pdb_t *)); static u_int64_t isp_get_portname __P((struct ispsoftc *, int, int)); static int isp_fclink_test __P((struct ispsoftc *, int)); static char *isp2100_fw_statename __P((int)); static int isp_pdb_sync __P((struct ispsoftc *)); static int isp_scan_loop __P((struct ispsoftc *)); static int isp_scan_fabric __P((struct ispsoftc *)); static void isp_register_fc4_type __P((struct ispsoftc *)); static void isp_fw_state __P((struct ispsoftc *)); static void isp_mboxcmd __P((struct ispsoftc *, mbreg_t *, int)); static void isp_update __P((struct ispsoftc *)); static void isp_update_bus __P((struct ispsoftc *, int)); static void isp_setdfltparm __P((struct ispsoftc *, int)); static int isp_read_nvram __P((struct ispsoftc *)); static void isp_rdnvram_word __P((struct ispsoftc *, int, u_int16_t *)); static void isp_parse_nvram_1020 __P((struct ispsoftc *, u_int8_t *)); static void isp_parse_nvram_1080 __P((struct ispsoftc *, int, u_int8_t *)); static void isp_parse_nvram_12160 __P((struct ispsoftc *, int, u_int8_t *)); static void isp_parse_nvram_2100 __P((struct ispsoftc *, u_int8_t *)); /* * Reset Hardware. * * Hit the chip over the head, download new f/w if available and set it running. * * Locking done elsewhere. */ void isp_reset(struct ispsoftc *isp) { mbreg_t mbs; int loops, i, touched, dodnld = 1; char *revname = "????"; isp->isp_state = ISP_NILSTATE; /* * Basic types (SCSI, FibreChannel and PCI or SBus) * have been set in the MD code. We figure out more * here. * * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and do other settings for the 2100. */ /* * Get the current running firmware revision out of the * chip before we hit it over the head (if this is our * first time through). Note that we store this as the * 'ROM' firmware revision- which it may not be. In any * case, we don't really use this yet, but we may in * the future. */ if ((touched = isp->isp_touched) == 0) { /* * First see whether or not we're sitting in the ISP PROM. * If we've just been reset, we'll have the string "ISP " * spread through outgoing mailbox registers 1-3. */ if (ISP_READ(isp, OUTMAILBOX1) != 0x4953 || ISP_READ(isp, OUTMAILBOX2) != 0x5020 || ISP_READ(isp, OUTMAILBOX3) != 0x2020) { /* * Just in case it was paused... */ ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); mbs.param[0] = MBOX_ABOUT_FIRMWARE; isp_mboxcmd(isp, &mbs, MBOX_COMMAND_ERROR); /* * This *shouldn't* fail..... */ if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp->isp_romfw_rev[0] = mbs.param[1]; isp->isp_romfw_rev[1] = mbs.param[2]; isp->isp_romfw_rev[2] = mbs.param[3]; } } isp->isp_touched = 1; } DISABLE_INTS(isp); /* * Put the board into PAUSE mode (so we can read the SXP registers * or write FPM/FBM registers). */ ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); if (IS_FC(isp)) { switch (isp->isp_type) { case ISP_HA_FC_2100: revname = "2100"; break; case ISP_HA_FC_2200: revname = "2200"; break; + case ISP_HA_FC_2300: + revname = "2300"; + break; default: break; } /* * While we're paused, reset the FPM module and FBM fifos. */ ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } else if (IS_1240(isp)) { sdparam *sdp = isp->isp_param; revname = "1240"; isp->isp_clock = 60; sdp->isp_ultramode = 1; sdp++; sdp->isp_ultramode = 1; /* * XXX: Should probably do some bus sensing. */ } else if (IS_ULTRA2(isp)) { static const char m[] = "bus %d is in %s Mode"; u_int16_t l; sdparam *sdp = isp->isp_param; isp->isp_clock = 100; if (IS_1280(isp)) revname = "1280"; else if (IS_1080(isp)) revname = "1080"; else if (IS_12160(isp)) revname = "12160"; else revname = ""; l = ISP_READ(isp, SXP_PINS_DIFF) & ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 0, l); break; } if (IS_DUALBUS(isp)) { sdp++; l = ISP_READ(isp, SXP_PINS_DIFF|SXP_BANK1_SELECT); l &= ISP1080_MODE_MASK; switch(l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 1, l); break; } } } else { sdparam *sdp = isp->isp_param; i = ISP_READ(isp, BIU_CONF0) & BIU_CONF0_HW_MASK; switch (i) { default: isp_prt(isp, ISP_LOGALL, "Unknown Chip Type 0x%x", i); /* FALLTHROUGH */ case 1: revname = "1020"; isp->isp_type = ISP_HA_SCSI_1020; isp->isp_clock = 40; break; case 2: /* * Some 1020A chips are Ultra Capable, but don't * run the clock rate up for that unless told to * do so by the Ultra Capable bits being set. */ revname = "1020A"; isp->isp_type = ISP_HA_SCSI_1020A; isp->isp_clock = 40; break; case 3: revname = "1040"; isp->isp_type = ISP_HA_SCSI_1040; isp->isp_clock = 60; break; case 4: revname = "1040A"; isp->isp_type = ISP_HA_SCSI_1040A; isp->isp_clock = 60; break; case 5: revname = "1040B"; isp->isp_type = ISP_HA_SCSI_1040B; isp->isp_clock = 60; break; case 6: revname = "1040C"; isp->isp_type = ISP_HA_SCSI_1040C; isp->isp_clock = 60; break; } /* * Now, while we're at it, gather info about ultra * and/or differential mode. */ if (ISP_READ(isp, SXP_PINS_DIFF) & SXP_PINS_DIFF_MODE) { isp_prt(isp, ISP_LOGCONFIG, "Differential Mode"); sdp->isp_diffmode = 1; } else { sdp->isp_diffmode = 0; } i = ISP_READ(isp, RISC_PSR); if (isp->isp_bustype == ISP_BT_SBUS) { i &= RISC_PSR_SBUS_ULTRA; } else { i &= RISC_PSR_PCI_ULTRA; } if (i != 0) { isp_prt(isp, ISP_LOGCONFIG, "Ultra Mode Capable"); sdp->isp_ultramode = 1; /* * If we're in Ultra Mode, we have to be 60Mhz clock- * even for the SBus version. */ isp->isp_clock = 60; } else { sdp->isp_ultramode = 0; /* * Clock is known. Gronk. */ } /* * Machine dependent clock (if set) overrides * our generic determinations. */ if (isp->isp_mdvec->dv_clock) { if (isp->isp_mdvec->dv_clock < isp->isp_clock) { isp->isp_clock = isp->isp_mdvec->dv_clock; } } } /* * Clear instrumentation */ isp->isp_intcnt = isp->isp_intbogus = 0; /* * Do MD specific pre initialization */ ISP_RESET0(isp); again: /* * Hit the chip over the head with hammer, * and give the ISP a chance to recover. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_ICR, BIU_ICR_SOFT_RESET); /* * A slight delay... */ USEC_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); ISP_WRITE(isp, DDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); } else { ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); /* * A slight delay... */ USEC_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, TDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, RDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); } /* * Wait for ISP to be ready to go... */ loops = MBOX_DELAY_COUNT; for (;;) { if (IS_SCSI(isp)) { if (!(ISP_READ(isp, BIU_ICR) & BIU_ICR_SOFT_RESET)) break; } else { if (!(ISP_READ(isp, BIU2100_CSR) & BIU2100_SOFT_RESET)) break; } USEC_DELAY(100); if (--loops < 0) { ISP_DUMPREGS(isp, "chip reset timed out"); return; } } /* * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and other settings for the 2100. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_CONF1, 0); } else { ISP_WRITE(isp, BIU2100_CSR, 0); } /* * Reset RISC Processor */ ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); USEC_DELAY(100); /* Clear semaphore register (just to be sure) */ ISP_WRITE(isp, BIU_SEMA, 0); /* * Establish some initial burst rate stuff. * (only for the 1XX0 boards). This really should * be done later after fetching from NVRAM. */ if (IS_SCSI(isp)) { u_int16_t tmp = isp->isp_mdvec->dv_conf1; /* * Busted FIFO. Turn off all but burst enables. */ if (isp->isp_type == ISP_HA_SCSI_1040A) { tmp &= BIU_BURST_ENABLE; } ISP_SETBITS(isp, BIU_CONF1, tmp); if (tmp & BIU_BURST_ENABLE) { ISP_SETBITS(isp, CDMA_CONF, DMA_ENABLE_BURST); ISP_SETBITS(isp, DDMA_CONF, DMA_ENABLE_BURST); } #ifdef PTI_CARDS if (((sdparam *) isp->isp_param)->isp_ultramode) { while (ISP_READ(isp, RISC_MTR) != 0x1313) { ISP_WRITE(isp, RISC_MTR, 0x1313); ISP_WRITE(isp, HCCR, HCCR_CMD_STEP); } } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } /* * PTI specific register */ ISP_WRITE(isp, RISC_EMB, DUAL_BANK) #else ISP_WRITE(isp, RISC_MTR, 0x1212); #endif } else { ISP_WRITE(isp, RISC_MTR2100, 0x1212); - if (IS_2200(isp)) { + if (IS_2200(isp) || IS_2300(isp)) { ISP_WRITE(isp, HCCR, HCCR_2X00_DISABLE_PARITY_PAUSE); } } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); /* release paused processor */ /* * Do MD specific post initialization */ ISP_RESET1(isp); /* * Wait for everything to finish firing up... */ loops = MBOX_DELAY_COUNT; while (ISP_READ(isp, OUTMAILBOX0) == MBOX_BUSY) { USEC_DELAY(100); if (--loops < 0) { isp_prt(isp, ISP_LOGERR, "MBOX_BUSY never cleared on reset"); return; } } /* * Up until this point we've done everything by just reading or * setting registers. From this point on we rely on at least *some* * kind of firmware running in the card. */ /* * Do some sanity checking. */ mbs.param[0] = MBOX_NO_OP; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } if (IS_SCSI(isp)) { mbs.param[0] = MBOX_MAILBOX_REG_TEST; mbs.param[1] = 0xdead; mbs.param[2] = 0xbeef; mbs.param[3] = 0xffff; mbs.param[4] = 0x1111; mbs.param[5] = 0xa5a5; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } if (mbs.param[1] != 0xdead || mbs.param[2] != 0xbeef || mbs.param[3] != 0xffff || mbs.param[4] != 0x1111 || mbs.param[5] != 0xa5a5) { isp_prt(isp, ISP_LOGERR, "Register Test Failed (0x%x 0x%x 0x%x 0x%x 0x%x)", mbs.param[1], mbs.param[2], mbs.param[3], mbs.param[4], mbs.param[5]); return; } } /* * Download new Firmware, unless requested not to do so. * This is made slightly trickier in some cases where the * firmware of the ROM revision is newer than the revision * compiled into the driver. So, where we used to compare * versions of our f/w and the ROM f/w, now we just see * whether we have f/w at all and whether a config flag * has disabled our download. */ if ((isp->isp_mdvec->dv_ispfw == NULL) || (isp->isp_confopts & ISP_CFG_NORELOAD)) { dodnld = 0; } if (dodnld) { u_int16_t fwlen = isp->isp_mdvec->dv_ispfw[3]; for (i = 0; i < fwlen; i++) { mbs.param[0] = MBOX_WRITE_RAM_WORD; mbs.param[1] = ISP_CODE_ORG + i; mbs.param[2] = isp->isp_mdvec->dv_ispfw[i]; isp_mboxcmd(isp, &mbs, MBLOGNONE); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", i); dodnld = 0; goto again; } } /* * Verify that it downloaded correctly. */ mbs.param[0] = MBOX_VERIFY_CHECKSUM; mbs.param[1] = ISP_CODE_ORG; isp_mboxcmd(isp, &mbs, MBLOGNONE); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "Ram Checksum Failure"); return; } isp->isp_loaded_fw = 1; } else { isp->isp_loaded_fw = 0; isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); } /* * Now start it rolling. * * If we didn't actually download f/w, * we still need to (re)start it. */ mbs.param[0] = MBOX_EXEC_FIRMWARE; mbs.param[1] = ISP_CODE_ORG; isp_mboxcmd(isp, &mbs, MBLOGNONE); /* give it a chance to start */ USEC_SLEEP(isp, 500); if (IS_SCSI(isp)) { /* * Set CLOCK RATE, but only if asked to. */ if (isp->isp_clock) { mbs.param[0] = MBOX_SET_CLOCK_RATE; mbs.param[1] = isp->isp_clock; isp_mboxcmd(isp, &mbs, MBLOGALL); /* we will try not to care if this fails */ } } mbs.param[0] = MBOX_ABOUT_FIRMWARE; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp_prt(isp, ISP_LOGCONFIG, "Board Revision %s, %s F/W Revision %d.%d.%d", revname, dodnld? "loaded" : "resident", mbs.param[1], mbs.param[2], mbs.param[3]); if (IS_FC(isp)) { isp_prt(isp, ISP_LOGCONFIG, "Firmware Attributes = 0x%x", mbs.param[6]); if (ISP_READ(isp, BIU2100_CSR) & BIU2100_PCI64) { isp_prt(isp, ISP_LOGCONFIG, "Installed in 64-Bit PCI slot"); } } isp->isp_fwrev[0] = mbs.param[1]; isp->isp_fwrev[1] = mbs.param[2]; isp->isp_fwrev[2] = mbs.param[3]; if (isp->isp_romfw_rev[0] || isp->isp_romfw_rev[1] || isp->isp_romfw_rev[2]) { isp_prt(isp, ISP_LOGCONFIG, "Last F/W revision was %d.%d.%d", isp->isp_romfw_rev[0], isp->isp_romfw_rev[1], isp->isp_romfw_rev[2]); } mbs.param[0] = MBOX_GET_FIRMWARE_STATUS; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_maxcmds = mbs.param[2]; isp_prt(isp, ISP_LOGINFO, "%d max I/O commands supported", mbs.param[2]); isp_fw_state(isp); /* * Set up DMA for the request and result mailboxes. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } isp->isp_state = ISP_RESETSTATE; /* * Okay- now that we have new firmware running, we now (re)set our * notion of how many luns we support. This is somewhat tricky because * if we haven't loaded firmware, we don't have an easy way of telling * how many luns we support. * * We'll make a simplifying assumption- if we loaded firmware, we * are running with expanded lun firmware, otherwise not. * * Expanded lun firmware gives you 32 luns for SCSI cards and * 65536 luns for Fibre Channel cards. * * Because the lun is in a a different position in the Request Queue * Entry structure for Fibre Channel with expanded lun firmware, we * can only support one lun (lun zero) when we don't know what kind * of firmware we're running. * * Note that we only do this once (the first time thru isp_reset) * because we may be called again after firmware has been loaded once * and released. */ if (touched == 0) { if (dodnld) { if (IS_SCSI(isp)) { isp->isp_maxluns = 32; } else { isp->isp_maxluns = 65536; } } else { if (IS_SCSI(isp)) { isp->isp_maxluns = 8; } else { isp_prt(isp, ISP_LOGALL, warnlun); isp->isp_maxluns = 1; } } } } /* * Initialize Parameters of Hardware to a known state. * * Locks are held before coming here. */ void isp_init(struct ispsoftc *isp) { /* * Must do this first to get defaults established. */ isp_setdfltparm(isp, 0); if (IS_DUALBUS(isp)) { isp_setdfltparm(isp, 1); } if (IS_FC(isp)) { isp_fibre_init(isp); } else { isp_scsi_init(isp); } } static void isp_scsi_init(struct ispsoftc *isp) { sdparam *sdp_chan0, *sdp_chan1; mbreg_t mbs; sdp_chan0 = isp->isp_param; sdp_chan1 = sdp_chan0; if (IS_DUALBUS(isp)) { sdp_chan1++; } /* * If we have no role (neither target nor initiator), return. */ if (isp->isp_role == ISP_ROLE_NONE) { return; } /* First do overall per-card settings. */ /* * If we have fast memory timing enabled, turn it on. */ if (sdp_chan0->isp_fast_mttr) { ISP_WRITE(isp, RISC_MTR, 0x1313); } /* * Set Retry Delay and Count. * You set both channels at the same time. */ mbs.param[0] = MBOX_SET_RETRY_COUNT; mbs.param[1] = sdp_chan0->isp_retry_count; mbs.param[2] = sdp_chan0->isp_retry_delay; mbs.param[6] = sdp_chan1->isp_retry_count; mbs.param[7] = sdp_chan1->isp_retry_delay; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ASYNC DATA SETUP time. This is very important. */ mbs.param[0] = MBOX_SET_ASYNC_DATA_SETUP_TIME; mbs.param[1] = sdp_chan0->isp_async_data_setup; mbs.param[2] = sdp_chan1->isp_async_data_setup; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ACTIVE Negation State. */ mbs.param[0] = MBOX_SET_ACT_NEG_STATE; mbs.param[1] = (sdp_chan0->isp_req_ack_active_neg << 4) | (sdp_chan0->isp_data_line_active_neg << 5); mbs.param[2] = (sdp_chan1->isp_req_ack_active_neg << 4) | (sdp_chan1->isp_data_line_active_neg << 5); isp_mboxcmd(isp, &mbs, MBLOGNONE); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set active negation state (%d,%d), (%d,%d)", sdp_chan0->isp_req_ack_active_neg, sdp_chan0->isp_data_line_active_neg, sdp_chan1->isp_req_ack_active_neg, sdp_chan1->isp_data_line_active_neg); /* * But don't return. */ } /* * Set the Tag Aging limit */ mbs.param[0] = MBOX_SET_TAG_AGE_LIMIT; mbs.param[1] = sdp_chan0->isp_tag_aging; mbs.param[2] = sdp_chan1->isp_tag_aging; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set tag age limit (%d,%d)", sdp_chan0->isp_tag_aging, sdp_chan1->isp_tag_aging); return; } /* * Set selection timeout. */ mbs.param[0] = MBOX_SET_SELECT_TIMEOUT; mbs.param[1] = sdp_chan0->isp_selection_timeout; mbs.param[2] = sdp_chan1->isp_selection_timeout; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* now do per-channel settings */ isp_scsi_channel_init(isp, 0); if (IS_DUALBUS(isp)) isp_scsi_channel_init(isp, 1); /* * Now enable request/response queues */ mbs.param[0] = MBOX_INIT_RES_QUEUE; mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_MSW(isp->isp_result_dma); mbs.param[3] = DMA_LSW(isp->isp_result_dma); mbs.param[4] = 0; mbs.param[5] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = mbs.param[5]; mbs.param[0] = MBOX_INIT_REQ_QUEUE; mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_MSW(isp->isp_rquest_dma); mbs.param[3] = DMA_LSW(isp->isp_rquest_dma); mbs.param[4] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; /* * Turn on Fast Posting, LVD transitions * * Ultra2 F/W always has had fast posting (and LVD transitions) * * Ultra and older (i.e., SBus) cards may not. It's just safer * to assume not for them. */ mbs.param[0] = MBOX_SET_FW_FEATURES; mbs.param[1] = 0; if (IS_ULTRA2(isp)) mbs.param[1] |= FW_FEATURE_LVD_NOTIFY; if (IS_ULTRA2(isp) || IS_1240(isp)) mbs.param[1] |= FW_FEATURE_FAST_POST; if (mbs.param[1] != 0) { u_int16_t sfeat = mbs.param[1]; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGINFO, "Enabled FW features (0x%x)", sfeat); } } /* * Let the outer layers decide whether to issue a SCSI bus reset. */ isp->isp_state = ISP_INITSTATE; } static void isp_scsi_channel_init(struct ispsoftc *isp, int channel) { sdparam *sdp; mbreg_t mbs; int tgt; sdp = isp->isp_param; sdp += channel; /* * Set (possibly new) Initiator ID. */ mbs.param[0] = MBOX_SET_INIT_SCSI_ID; mbs.param[1] = (channel << 7) | sdp->isp_initiator_id; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp_prt(isp, ISP_LOGINFO, "Initiator ID is %d on Channel %d", sdp->isp_initiator_id, channel); /* * Set current per-target parameters to a safe minimum. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { int lun; u_int16_t sdf; if (sdp->isp_devparam[tgt].dev_enable == 0) { continue; } #ifndef ISP_TARGET_MODE if (tgt == sdp->isp_initiator_id) { sdf = DPARM_DEFAULT; } else { sdf = DPARM_SAFE_DFLT; /* * It is not quite clear when this changed over so that * we could force narrow and async for 1000/1020 cards, * but assume that this is only the case for loaded * firmware. */ if (isp->isp_loaded_fw) { sdf |= DPARM_NARROW | DPARM_ASYNC; } } #else /* * The !$*!)$!$)* f/w uses the same index into some * internal table to decide how to respond to negotiations, * so if we've said "let's be safe" for ID X, and ID X * selects *us*, the negotiations will back to 'safe' * (as in narrow/async). What the f/w *should* do is * use the initiator id settings to decide how to respond. */ sdf = DPARM_DEFAULT; #endif mbs.param[0] = MBOX_SET_TARGET_PARAMS; mbs.param[1] = (channel << 15) | (tgt << 8); mbs.param[2] = sdf; if ((sdf & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].sync_offset << 8) | (sdp->isp_devparam[tgt].sync_period); } isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", channel, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); isp_mboxcmd(isp, &mbs, MBLOGNONE); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdf = DPARM_SAFE_DFLT; mbs.param[0] = MBOX_SET_TARGET_PARAMS; mbs.param[1] = (tgt << 8) | (channel << 15); mbs.param[2] = sdf; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } } /* * We don't update any information directly from the f/w * because we need to run at least one command to cause a * new state to be latched up. So, we just assume that we * converge to the values we just had set. * * Ensure that we don't believe tagged queuing is enabled yet. * It turns out that sometimes the ISP just ignores our * attempts to set parameters for devices that it hasn't * seen yet. */ sdp->isp_devparam[tgt].cur_dflags = sdf & ~DPARM_TQING; for (lun = 0; lun < (int) isp->isp_maxluns; lun++) { mbs.param[0] = MBOX_SET_DEV_QUEUE_PARAMS; mbs.param[1] = (channel << 15) | (tgt << 8) | lun; mbs.param[2] = sdp->isp_max_queue_depth; mbs.param[3] = sdp->isp_devparam[tgt].exc_throttle; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_refresh) { isp->isp_sendmarker |= (1 << channel); isp->isp_update |= (1 << channel); break; } } } /* * Fibre Channel specific initialization. * * Locks are held before coming here. */ static void isp_fibre_init(struct ispsoftc *isp) { fcparam *fcp; isp_icb_t *icbp; mbreg_t mbs; int loopid; u_int64_t nwwn, pwwn; fcp = isp->isp_param; /* * Do this *before* initializing the firmware. */ isp_mark_getpdb_all(isp); fcp->isp_fwstate = FW_CONFIG_WAIT; fcp->isp_loopstate = LOOP_NIL; /* * If we have no role (neither target nor initiator), return. */ if (isp->isp_role == ISP_ROLE_NONE) { return; } loopid = DEFAULT_LOOPID(isp); icbp = (isp_icb_t *) fcp->isp_scratch; MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; /* * Firmware Options are either retrieved from NVRAM or * are patched elsewhere. We check them for sanity here * and make changes based on board revision, but otherwise * let others decide policy. */ /* * If this is a 2100 < revision 5, we have to turn off FAIRNESS. */ if ((isp->isp_type == ISP_HA_FC_2100) && isp->isp_revision < 5) { fcp->isp_fwoptions &= ~ICBOPT_FAIRNESS; } /* * We have to use FULL LOGIN even though it resets the loop too much * because otherwise port database entries don't get updated after * a LIP- this is a known f/w bug for 2100 f/w less than 1.17.0. */ if (ISP_FW_REVX(isp->isp_fwrev) < ISP_FW_REV(1, 17, 0)) { fcp->isp_fwoptions |= ICBOPT_FULL_LOGIN; } /* * Insist on Port Database Update Async notifications */ fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; /* * Make sure that target role reflects into fwoptions. */ if (isp->isp_role & ISP_ROLE_TARGET) { fcp->isp_fwoptions |= ICBOPT_TGT_ENABLE; } else { fcp->isp_fwoptions &= ~ICBOPT_TGT_ENABLE; } /* * Propagate all of this into the ICB structure. */ icbp->icb_fwoptions = fcp->isp_fwoptions; icbp->icb_maxfrmlen = fcp->isp_maxfrmlen; if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", fcp->isp_maxfrmlen, ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_maxalloc = fcp->isp_maxalloc; if (icbp->icb_maxalloc < 1) { isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc); icbp->icb_maxalloc = 16; } icbp->icb_execthrottle = fcp->isp_execthrottle; if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using 16", fcp->isp_execthrottle); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } icbp->icb_retry_delay = fcp->isp_retry_delay; icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_hardaddr = loopid; /* * Right now we just set extended options to prefer point-to-point * over loop based upon some soft config options. */ - if (IS_2200(isp)) { + if (IS_2200(isp) || IS_2300(isp)) { icbp->icb_fwoptions |= ICBOPT_EXTENDED; /* * Prefer or force Point-To-Point instead Loop? */ switch(isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_NPORT: icbp->icb_xfwoptions = ICBXOPT_PTP_2_LOOP; break; case ISP_CFG_NPORT_ONLY: icbp->icb_xfwoptions = ICBXOPT_PTP_ONLY; break; case ISP_CFG_LPORT_ONLY: icbp->icb_xfwoptions = ICBXOPT_LOOP_ONLY; break; default: icbp->icb_xfwoptions = ICBXOPT_LOOP_2_PTP; break; } } icbp->icb_logintime = 60; /* 60 second login timeout */ nwwn = ISP_NODEWWN(isp); pwwn = ISP_PORTWWN(isp); if (nwwn && pwwn) { icbp->icb_fwoptions |= ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, nwwn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, pwwn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((u_int32_t) (nwwn >> 32)), ((u_int32_t) (nwwn & 0xffffffff)), ((u_int32_t) (pwwn >> 32)), ((u_int32_t) (pwwn & 0xffffffff))); } else { isp_prt(isp, ISP_LOGDEBUG1, "Not using any WWNs"); icbp->icb_fwoptions &= ~(ICBOPT_BOTH_WWNS|ICBOPT_FULL_LOGIN); } icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_LSW(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_MSW(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_LSW(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_MSW(isp->isp_result_dma); isp_prt(isp, ISP_LOGDEBUG1, "isp_fibre_init: fwoptions 0x%x", fcp->isp_fwoptions); ISP_SWIZZLE_ICB(isp, icbp); /* * Init the firmware */ mbs.param[0] = MBOX_INIT_FIRMWARE; mbs.param[1] = 0; mbs.param[2] = DMA_MSW(fcp->isp_scdma); mbs.param[3] = DMA_LSW(fcp->isp_scdma); mbs.param[4] = 0; mbs.param[5] = 0; mbs.param[6] = 0; mbs.param[7] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_sendmarker = 1; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_INITSTATE; } /* * Fibre Channel Support- get the port database for the id. * * Locks are held before coming here. Return 0 if success, * else failure. */ static int isp_getmap(struct ispsoftc *isp, fcpos_map_t *map) { fcparam *fcp = (fcparam *) isp->isp_param; mbreg_t mbs; mbs.param[0] = MBOX_GET_FC_AL_POSITION_MAP; mbs.param[1] = 0; mbs.param[2] = DMA_MSW(fcp->isp_scdma); mbs.param[3] = DMA_LSW(fcp->isp_scdma); /* * Unneeded. For the 2100, except for initializing f/w, registers * 4/5 have to not be written to. * mbs.param[4] = 0; * mbs.param[5] = 0; * */ mbs.param[6] = 0; mbs.param[7] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { MEMCPY(map, fcp->isp_scratch, sizeof (fcpos_map_t)); map->fwmap = mbs.param[1] != 0; return (0); } return (-1); } static void isp_mark_getpdb_all(struct ispsoftc *isp) { fcparam *fcp = (fcparam *) isp->isp_param; int i; for (i = 0; i < MAX_FC_TARG; i++) { fcp->portdb[i].valid = fcp->portdb[i].fabric_dev = 0; } } static int isp_getpdb(struct ispsoftc *isp, int id, isp_pdb_t *pdbp) { fcparam *fcp = (fcparam *) isp->isp_param; mbreg_t mbs; mbs.param[0] = MBOX_GET_PORT_DB; mbs.param[1] = id << 8; mbs.param[2] = DMA_MSW(fcp->isp_scdma); mbs.param[3] = DMA_LSW(fcp->isp_scdma); /* * Unneeded. For the 2100, except for initializing f/w, registers * 4/5 have to not be written to. * mbs.param[4] = 0; * mbs.param[5] = 0; * */ mbs.param[6] = 0; mbs.param[7] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { ISP_UNSWIZZLE_AND_COPY_PDBP(isp, pdbp, fcp->isp_scratch); return (0); } return (-1); } static u_int64_t isp_get_portname(struct ispsoftc *isp, int loopid, int nodename) { u_int64_t wwn = 0; mbreg_t mbs; mbs.param[0] = MBOX_GET_PORT_NAME; mbs.param[1] = loopid << 8; if (nodename) mbs.param[1] |= 1; isp_mboxcmd(isp, &mbs, MBLOGALL & ~MBOX_COMMAND_PARAM_ERROR); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { wwn = (((u_int64_t)(mbs.param[2] & 0xff)) << 56) | (((u_int64_t)(mbs.param[2] >> 8)) << 48) | (((u_int64_t)(mbs.param[3] & 0xff)) << 40) | (((u_int64_t)(mbs.param[3] >> 8)) << 32) | (((u_int64_t)(mbs.param[6] & 0xff)) << 24) | (((u_int64_t)(mbs.param[6] >> 8)) << 16) | (((u_int64_t)(mbs.param[7] & 0xff)) << 8) | (((u_int64_t)(mbs.param[7] >> 8))); } return (wwn); } /* * Make sure we have good FC link and know our Loop ID. */ static int isp_fclink_test(struct ispsoftc *isp, int usdelay) { static char *toponames[] = { "Private Loop", "FL Port", "N-Port to N-Port", "F Port", "F Port (no FLOGI_ACC response)" }; mbreg_t mbs; int count, check_for_fabric; u_int8_t lwfs; fcparam *fcp; struct lportdb *lp; isp_pdb_t pdb; fcp = isp->isp_param; /* * XXX: Here is where we would start a 'loop dead' timeout */ /* * Wait up to N microseconds for F/W to go to a ready state. */ lwfs = FW_CONFIG_WAIT; count = 0; while (count < usdelay) { u_int64_t enano; u_int32_t wrk; NANOTIME_T hra, hrb; GET_NANOTIME(&hra); isp_fw_state(isp); if (lwfs != fcp->isp_fwstate) { isp_prt(isp, ISP_LOGINFO, "Firmware State <%s->%s>", isp2100_fw_statename((int)lwfs), isp2100_fw_statename((int)fcp->isp_fwstate)); lwfs = fcp->isp_fwstate; } if (fcp->isp_fwstate == FW_READY) { break; } GET_NANOTIME(&hrb); /* * Get the elapsed time in nanoseconds. * Always guaranteed to be non-zero. */ enano = NANOTIME_SUB(&hrb, &hra); isp_prt(isp, ISP_LOGDEBUG1, "usec%d: 0x%lx->0x%lx enano 0x%x%08x", count, (long) GET_NANOSEC(&hra), (long) GET_NANOSEC(&hrb), (u_int32_t)(enano >> 32), (u_int32_t)(enano & 0xffffffff)); /* * If the elapsed time is less than 1 millisecond, * delay a period of time up to that millisecond of * waiting. * * This peculiar code is an attempt to try and avoid * invoking u_int64_t math support functions for some * platforms where linkage is a problem. */ if (enano < (1000 * 1000)) { count += 1000; enano = (1000 * 1000) - enano; while (enano > (u_int64_t) 4000000000U) { USEC_SLEEP(isp, 4000000); enano -= (u_int64_t) 4000000000U; } wrk = enano; wrk /= 1000; USEC_SLEEP(isp, wrk); } else { while (enano > (u_int64_t) 4000000000U) { count += 4000000; enano -= (u_int64_t) 4000000000U; } wrk = enano; count += (wrk / 1000); } } /* * If we haven't gone to 'ready' state, return. */ if (fcp->isp_fwstate != FW_READY) { return (-1); } /* * Get our Loop ID (if possible). We really need to have it. */ mbs.param[0] = MBOX_GET_LOOP_ID; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } fcp->isp_loopid = mbs.param[1]; - if (IS_2200(isp)) { + if (IS_2200(isp) || IS_2300(isp)) { int topo = (int) mbs.param[6]; if (topo < TOPO_NL_PORT || topo > TOPO_PTP_STUB) topo = TOPO_PTP_STUB; fcp->isp_topo = topo; } else { fcp->isp_topo = TOPO_NL_PORT; } fcp->isp_portid = fcp->isp_alpa = mbs.param[2] & 0xff; /* * Check to see if we're on a fabric by trying to see if we * can talk to the fabric name server. This can be a bit * tricky because if we're a 2100, we should check always * (in case we're connected to an server doing aliasing). */ fcp->isp_onfabric = 0; if (IS_2100(isp)) check_for_fabric = 1; else if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_F_PORT) check_for_fabric = 1; else check_for_fabric = 0; if (check_for_fabric && isp_getpdb(isp, FL_PORT_ID, &pdb) == 0) { int loopid = FL_PORT_ID; if (IS_2100(isp)) { fcp->isp_topo = TOPO_FL_PORT; } if (BITS2WORD(pdb.pdb_portid_bits) == 0) { /* * Crock. */ fcp->isp_topo = TOPO_NL_PORT; goto not_on_fabric; } fcp->isp_portid = mbs.param[2] | ((int) mbs.param[3] << 16); /* * Save the Fabric controller's port database entry. */ lp = &fcp->portdb[loopid]; lp->node_wwn = (((u_int64_t)pdb.pdb_nodename[0]) << 56) | (((u_int64_t)pdb.pdb_nodename[1]) << 48) | (((u_int64_t)pdb.pdb_nodename[2]) << 40) | (((u_int64_t)pdb.pdb_nodename[3]) << 32) | (((u_int64_t)pdb.pdb_nodename[4]) << 24) | (((u_int64_t)pdb.pdb_nodename[5]) << 16) | (((u_int64_t)pdb.pdb_nodename[6]) << 8) | (((u_int64_t)pdb.pdb_nodename[7])); lp->port_wwn = (((u_int64_t)pdb.pdb_portname[0]) << 56) | (((u_int64_t)pdb.pdb_portname[1]) << 48) | (((u_int64_t)pdb.pdb_portname[2]) << 40) | (((u_int64_t)pdb.pdb_portname[3]) << 32) | (((u_int64_t)pdb.pdb_portname[4]) << 24) | (((u_int64_t)pdb.pdb_portname[5]) << 16) | (((u_int64_t)pdb.pdb_portname[6]) << 8) | (((u_int64_t)pdb.pdb_portname[7])); lp->roles = (pdb.pdb_prli_svc3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; lp->portid = BITS2WORD(pdb.pdb_portid_bits); lp->loopid = pdb.pdb_loopid; lp->loggedin = lp->valid = 1; fcp->isp_onfabric = 1; (void) isp_async(isp, ISPASYNC_PROMENADE, &loopid); isp_register_fc4_type(isp); } else { not_on_fabric: fcp->isp_onfabric = 0; fcp->portdb[FL_PORT_ID].valid = 0; } isp_prt(isp, ISP_LOGINFO, topology, fcp->isp_loopid, fcp->isp_alpa, fcp->isp_portid, fcp->isp_loopstate, toponames[fcp->isp_topo]); /* * Announce ourselves, too. This involves synthesizing an entry. */ if (fcp->isp_iid_set == 0) { fcp->isp_iid_set = 1; fcp->isp_iid = fcp->isp_loopid; lp = &fcp->portdb[fcp->isp_iid]; } else { lp = &fcp->portdb[fcp->isp_iid]; if (fcp->isp_portid != lp->portid || fcp->isp_loopid != lp->loopid || fcp->isp_nodewwn != ISP_NODEWWN(isp) || fcp->isp_portwwn != ISP_PORTWWN(isp)) { lp->valid = 0; count = fcp->isp_iid; (void) isp_async(isp, ISPASYNC_PROMENADE, &count); } } lp->loopid = fcp->isp_loopid; lp->portid = fcp->isp_portid; lp->node_wwn = ISP_NODEWWN(isp); lp->port_wwn = ISP_PORTWWN(isp); switch (isp->isp_role) { case ISP_ROLE_NONE: lp->roles = 0; break; case ISP_ROLE_TARGET: lp->roles = SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT; break; case ISP_ROLE_INITIATOR: lp->roles = SVC3_INI_ROLE >> SVC3_ROLE_SHIFT; break; case ISP_ROLE_BOTH: lp->roles = (SVC3_INI_ROLE|SVC3_TGT_ROLE) >> SVC3_ROLE_SHIFT; break; } lp->loggedin = lp->valid = 1; count = fcp->isp_iid; (void) isp_async(isp, ISPASYNC_PROMENADE, &count); return (0); } static char * isp2100_fw_statename(int state) { switch(state) { case FW_CONFIG_WAIT: return "Config Wait"; case FW_WAIT_AL_PA: return "Waiting for AL_PA"; case FW_WAIT_LOGIN: return "Wait Login"; case FW_READY: return "Ready"; case FW_LOSS_OF_SYNC: return "Loss Of Sync"; case FW_ERROR: return "Error"; case FW_REINIT: return "Re-Init"; case FW_NON_PART: return "Nonparticipating"; default: return "?????"; } } /* * Synchronize our soft copy of the port database with what the f/w thinks * (with a view toward possibly for a specific target....) */ static int isp_pdb_sync(struct ispsoftc *isp) { struct lportdb *lp; fcparam *fcp = isp->isp_param; isp_pdb_t pdb; int loopid, base, lim; /* * Make sure we're okay for doing this right now. */ if (fcp->isp_loopstate != LOOP_PDB_RCVD && fcp->isp_loopstate != LOOP_FSCAN_DONE && fcp->isp_loopstate != LOOP_LSCAN_DONE) { return (-1); } if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_NL_PORT || fcp->isp_topo == TOPO_N_PORT) { if (fcp->isp_loopstate < LOOP_LSCAN_DONE) { if (isp_scan_loop(isp) != 0) { return (-1); } } } fcp->isp_loopstate = LOOP_SYNCING_PDB; /* * If we get this far, we've settled our differences with the f/w * (for local loop device) and we can say that the loop state is ready. */ if (fcp->isp_topo == TOPO_NL_PORT) { fcp->loop_seen_once = 1; fcp->isp_loopstate = LOOP_READY; return (0); } /* * Find all Fabric Entities that didn't make it from one scan to the * next and let the world know they went away. Scan the whole database. */ for (lp = &fcp->portdb[0]; lp < &fcp->portdb[MAX_FC_TARG]; lp++) { if (lp->was_fabric_dev && lp->fabric_dev == 0) { loopid = lp - fcp->portdb; lp->valid = 0; /* should already be set */ (void) isp_async(isp, ISPASYNC_PROMENADE, &loopid); MEMZERO((void *) lp, sizeof (*lp)); continue; } lp->was_fabric_dev = lp->fabric_dev; } if (fcp->isp_topo == TOPO_FL_PORT) base = FC_SNS_ID+1; else base = 0; if (fcp->isp_topo == TOPO_N_PORT) lim = 1; else lim = MAX_FC_TARG; /* * Now log in any fabric devices that the outer layer has * left for us to see. This seems the most sane policy * for the moment. */ for (lp = &fcp->portdb[base]; lp < &fcp->portdb[lim]; lp++) { u_int32_t portid; mbreg_t mbs; loopid = lp - fcp->portdb; if (loopid >= FL_PORT_ID && loopid <= FC_SNS_ID) { continue; } /* * Anything here? */ if (lp->port_wwn == 0) { continue; } /* * Don't try to log into yourself. */ if ((portid = lp->portid) == fcp->isp_portid) { continue; } /* * If we'd been logged in- see if we still are and we haven't * changed. If so, no need to log ourselves out, etc.. * * Unfortunately, our charming Qlogic f/w has decided to * return a valid port database entry for a fabric device * that has, in fact, gone away. And it hangs trying to * log it out. */ if (lp->loggedin && isp_getpdb(isp, lp->loopid, &pdb) == 0) { int nrole; u_int64_t nwwnn, nwwpn; nwwnn = (((u_int64_t)pdb.pdb_nodename[0]) << 56) | (((u_int64_t)pdb.pdb_nodename[1]) << 48) | (((u_int64_t)pdb.pdb_nodename[2]) << 40) | (((u_int64_t)pdb.pdb_nodename[3]) << 32) | (((u_int64_t)pdb.pdb_nodename[4]) << 24) | (((u_int64_t)pdb.pdb_nodename[5]) << 16) | (((u_int64_t)pdb.pdb_nodename[6]) << 8) | (((u_int64_t)pdb.pdb_nodename[7])); nwwpn = (((u_int64_t)pdb.pdb_portname[0]) << 56) | (((u_int64_t)pdb.pdb_portname[1]) << 48) | (((u_int64_t)pdb.pdb_portname[2]) << 40) | (((u_int64_t)pdb.pdb_portname[3]) << 32) | (((u_int64_t)pdb.pdb_portname[4]) << 24) | (((u_int64_t)pdb.pdb_portname[5]) << 16) | (((u_int64_t)pdb.pdb_portname[6]) << 8) | (((u_int64_t)pdb.pdb_portname[7])); nrole = (pdb.pdb_prli_svc3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; if (pdb.pdb_loopid == lp->loopid && lp->portid == (u_int32_t) BITS2WORD(pdb.pdb_portid_bits) && nwwnn == lp->node_wwn && nwwpn == lp->port_wwn && lp->roles == nrole) { lp->loggedin = lp->valid = 1; isp_prt(isp, ISP_LOGINFO, lretained, (int) (lp - fcp->portdb), (int) lp->loopid, lp->portid); continue; } } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_SYNCING_PDB) { return (-1); } /* * Force a logout if we were logged in. */ if (lp->loggedin) { if (isp_getpdb(isp, lp->loopid, &pdb) == 0) { mbs.param[0] = MBOX_FABRIC_LOGOUT; mbs.param[1] = lp->loopid << 8; mbs.param[2] = 0; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs, MBLOGNONE); lp->loggedin = 0; isp_prt(isp, ISP_LOGINFO, plogout, (int) (lp - fcp->portdb), lp->loopid, lp->portid); } lp->loggedin = 0; if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_SYNCING_PDB) { return (-1); } } /* * And log in.... */ loopid = lp - fcp->portdb; lp->loopid = FL_PORT_ID; do { mbs.param[0] = MBOX_FABRIC_LOGIN; mbs.param[1] = loopid << 8; mbs.param[2] = portid >> 16; mbs.param[3] = portid & 0xffff; - if (IS_2200(isp)) { + if (IS_2200(isp) || IS_2300(isp)) { /* only issue a PLOGI if not logged in */ mbs.param[1] |= 0x1; } isp_mboxcmd(isp, &mbs, MBLOGALL & ~(MBOX_LOOP_ID_USED | MBOX_PORT_ID_USED | MBOX_COMMAND_ERROR)); if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_SYNCING_PDB) { return (-1); } switch (mbs.param[0]) { case MBOX_LOOP_ID_USED: /* * Try the next available loop id. */ loopid++; break; case MBOX_PORT_ID_USED: /* * This port is already logged in. * Snaffle the loop id it's using if it's * nonzero, otherwise we're hosed. */ if (mbs.param[1] != 0) { loopid = mbs.param[1]; isp_prt(isp, ISP_LOGINFO, retained, loopid, (int) (lp - fcp->portdb), lp->portid); } else { loopid = MAX_FC_TARG; break; } /* FALLTHROUGH */ case MBOX_COMMAND_COMPLETE: lp->loggedin = 1; lp->loopid = loopid; break; case MBOX_COMMAND_ERROR: isp_prt(isp, ISP_LOGINFO, plogierr, portid, mbs.param[1]); /* FALLTHROUGH */ case MBOX_ALL_IDS_USED: /* We're outta IDs */ default: loopid = MAX_FC_TARG; break; } } while (lp->loopid == FL_PORT_ID && loopid < MAX_FC_TARG); /* * If we get here and we haven't set a Loop ID, * we failed to log into this device. */ if (lp->loopid == FL_PORT_ID) { lp->loopid = 0; continue; } /* * Make sure we can get the approriate port information. */ if (isp_getpdb(isp, lp->loopid, &pdb) != 0) { isp_prt(isp, ISP_LOGWARN, nopdb, lp->portid); goto dump_em; } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_SYNCING_PDB) { return (-1); } if (pdb.pdb_loopid != lp->loopid) { isp_prt(isp, ISP_LOGWARN, pdbmfail1, lp->portid, pdb.pdb_loopid); goto dump_em; } if (lp->portid != (u_int32_t) BITS2WORD(pdb.pdb_portid_bits)) { isp_prt(isp, ISP_LOGWARN, pdbmfail2, lp->portid, BITS2WORD(pdb.pdb_portid_bits)); goto dump_em; } lp->roles = (pdb.pdb_prli_svc3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; lp->node_wwn = (((u_int64_t)pdb.pdb_nodename[0]) << 56) | (((u_int64_t)pdb.pdb_nodename[1]) << 48) | (((u_int64_t)pdb.pdb_nodename[2]) << 40) | (((u_int64_t)pdb.pdb_nodename[3]) << 32) | (((u_int64_t)pdb.pdb_nodename[4]) << 24) | (((u_int64_t)pdb.pdb_nodename[5]) << 16) | (((u_int64_t)pdb.pdb_nodename[6]) << 8) | (((u_int64_t)pdb.pdb_nodename[7])); lp->port_wwn = (((u_int64_t)pdb.pdb_portname[0]) << 56) | (((u_int64_t)pdb.pdb_portname[1]) << 48) | (((u_int64_t)pdb.pdb_portname[2]) << 40) | (((u_int64_t)pdb.pdb_portname[3]) << 32) | (((u_int64_t)pdb.pdb_portname[4]) << 24) | (((u_int64_t)pdb.pdb_portname[5]) << 16) | (((u_int64_t)pdb.pdb_portname[6]) << 8) | (((u_int64_t)pdb.pdb_portname[7])); /* * Check to make sure this all makes sense. */ if (lp->node_wwn && lp->port_wwn) { lp->valid = 1; loopid = lp - fcp->portdb; (void) isp_async(isp, ISPASYNC_PROMENADE, &loopid); continue; } dump_em: lp->valid = 0; isp_prt(isp, ISP_LOGINFO, ldumped, loopid, lp->loopid, lp->portid); mbs.param[0] = MBOX_FABRIC_LOGOUT; mbs.param[1] = lp->loopid << 8; mbs.param[2] = 0; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs, MBLOGNONE); if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_SYNCING_PDB) { return (-1); } } /* * If we get here, we've for sure seen not only a valid loop * but know what is or isn't on it, so mark this for usage * in isp_start. */ fcp->loop_seen_once = 1; fcp->isp_loopstate = LOOP_READY; return (0); } static int isp_scan_loop(struct ispsoftc *isp) { struct lportdb *lp; fcparam *fcp = isp->isp_param; isp_pdb_t pdb; int loopid, lim, hival; switch (fcp->isp_topo) { case TOPO_NL_PORT: hival = FL_PORT_ID; break; case TOPO_N_PORT: hival = 2; break; case TOPO_FL_PORT: hival = FC_PORT_ID; break; default: fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } fcp->isp_loopstate = LOOP_SCANNING_LOOP; /* * make sure the temp port database is clean... */ MEMZERO((void *)fcp->tport, sizeof (fcp->tport)); /* * Run through the local loop ports and get port database info * for each loop ID. * * There's a somewhat unexplained situation where the f/w passes back * the wrong database entity- if that happens, just restart (up to * FL_PORT_ID times). */ for (lim = loopid = 0; loopid < hival; loopid++) { lp = &fcp->tport[loopid]; /* * Don't even try for ourselves... */ if (loopid == fcp->isp_loopid) continue; lp->node_wwn = isp_get_portname(isp, loopid, 1); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) return (-1); if (lp->node_wwn == 0) continue; lp->port_wwn = isp_get_portname(isp, loopid, 0); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) return (-1); if (lp->port_wwn == 0) { lp->node_wwn = 0; continue; } /* * Get an entry.... */ if (isp_getpdb(isp, loopid, &pdb) != 0) { if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) return (-1); continue; } if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { return (-1); } /* * If the returned database element doesn't match what we * asked for, restart the process entirely (up to a point...). */ if (pdb.pdb_loopid != loopid) { loopid = 0; if (lim++ < hival) { continue; } isp_prt(isp, ISP_LOGWARN, "giving up on synchronizing the port database"); return (-1); } /* * Save the pertinent info locally. */ lp->node_wwn = (((u_int64_t)pdb.pdb_nodename[0]) << 56) | (((u_int64_t)pdb.pdb_nodename[1]) << 48) | (((u_int64_t)pdb.pdb_nodename[2]) << 40) | (((u_int64_t)pdb.pdb_nodename[3]) << 32) | (((u_int64_t)pdb.pdb_nodename[4]) << 24) | (((u_int64_t)pdb.pdb_nodename[5]) << 16) | (((u_int64_t)pdb.pdb_nodename[6]) << 8) | (((u_int64_t)pdb.pdb_nodename[7])); lp->port_wwn = (((u_int64_t)pdb.pdb_portname[0]) << 56) | (((u_int64_t)pdb.pdb_portname[1]) << 48) | (((u_int64_t)pdb.pdb_portname[2]) << 40) | (((u_int64_t)pdb.pdb_portname[3]) << 32) | (((u_int64_t)pdb.pdb_portname[4]) << 24) | (((u_int64_t)pdb.pdb_portname[5]) << 16) | (((u_int64_t)pdb.pdb_portname[6]) << 8) | (((u_int64_t)pdb.pdb_portname[7])); lp->roles = (pdb.pdb_prli_svc3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; lp->portid = BITS2WORD(pdb.pdb_portid_bits); lp->loopid = pdb.pdb_loopid; } /* * Mark all of the permanent local loop database entries as invalid * (except our own entry). */ for (loopid = 0; loopid < hival; loopid++) { if (loopid == fcp->isp_iid) { fcp->portdb[loopid].valid = 1; fcp->portdb[loopid].loopid = fcp->isp_loopid; continue; } fcp->portdb[loopid].valid = 0; } /* * Now merge our local copy of the port database into our saved copy. * Notify the outer layers of new devices arriving. */ for (loopid = 0; loopid < hival; loopid++) { int i; /* * If we don't have a non-zero Port WWN, we're not here. */ if (fcp->tport[loopid].port_wwn == 0) { continue; } /* * Skip ourselves. */ if (loopid == fcp->isp_iid) { continue; } /* * For the purposes of deciding whether this is the * 'same' device or not, we only search for an identical * Port WWN. Node WWNs may or may not be the same as * the Port WWN, and there may be multiple different * Port WWNs with the same Node WWN. It would be chaos * to have multiple identical Port WWNs, so we don't * allow that. */ for (i = 0; i < hival; i++) { int j; if (fcp->portdb[i].port_wwn == 0) continue; if (fcp->portdb[i].port_wwn != fcp->tport[loopid].port_wwn) continue; /* * We found this WWN elsewhere- it's changed * loopids then. We don't change it's actual * position in our cached port database- we * just change the actual loop ID we'd use. */ if (fcp->portdb[i].loopid != loopid) { isp_prt(isp, ISP_LOGINFO, portshift, i, fcp->portdb[i].loopid, fcp->portdb[i].portid, loopid, fcp->tport[loopid].portid); } fcp->portdb[i].portid = fcp->tport[loopid].portid; fcp->portdb[i].loopid = loopid; fcp->portdb[i].valid = 1; fcp->portdb[i].roles = fcp->tport[loopid].roles; /* * Now make sure this Port WWN doesn't exist elsewhere * in the port database. */ for (j = i+1; j < hival; j++) { if (fcp->portdb[i].port_wwn != fcp->portdb[j].port_wwn) { continue; } isp_prt(isp, ISP_LOGWARN, portdup, j, i); /* * Invalidate the 'old' *and* 'new' ones. * This is really harsh and not quite right, * but if this happens, we really don't know * who is what at this point. */ fcp->portdb[i].valid = 0; fcp->portdb[j].valid = 0; } break; } /* * If we didn't traverse the entire port database, * then we found (and remapped) an existing entry. * No need to notify anyone- go for the next one. */ if (i < hival) { continue; } /* * We've not found this Port WWN anywhere. It's a new entry. * See if we can leave it where it is (with target == loopid). */ if (fcp->portdb[loopid].port_wwn != 0) { for (lim = 0; lim < hival; lim++) { if (fcp->portdb[lim].port_wwn == 0) break; } /* "Cannot Happen" */ if (lim == hival) { isp_prt(isp, ISP_LOGWARN, "Remap Overflow"); continue; } i = lim; } else { i = loopid; } /* * NB: The actual loopid we use here is loopid- we may * in fact be at a completely different index (target). */ fcp->portdb[i].loopid = loopid; fcp->portdb[i].port_wwn = fcp->tport[loopid].port_wwn; fcp->portdb[i].node_wwn = fcp->tport[loopid].node_wwn; fcp->portdb[i].roles = fcp->tport[loopid].roles; fcp->portdb[i].portid = fcp->tport[loopid].portid; fcp->portdb[i].valid = 1; /* * Tell the outside world we've arrived. */ (void) isp_async(isp, ISPASYNC_PROMENADE, &i); } /* * Now find all previously used targets that are now invalid and * notify the outer layers that they're gone. */ for (lp = &fcp->portdb[0]; lp < &fcp->portdb[hival]; lp++) { if (lp->valid || lp->port_wwn == 0) { continue; } /* * Tell the outside world we've gone * away and erase our pdb entry. * */ loopid = lp - fcp->portdb; (void) isp_async(isp, ISPASYNC_PROMENADE, &loopid); MEMZERO((void *) lp, sizeof (*lp)); } fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } static int isp_scan_fabric(struct ispsoftc *isp) { fcparam *fcp = isp->isp_param; u_int32_t portid, first_portid; sns_screq_t *reqp; sns_scrsp_t *resp; mbreg_t mbs; int hicap, first_portid_seen; if (fcp->isp_onfabric == 0) { fcp->isp_loopstate = LOOP_FSCAN_DONE; return (0); } reqp = (sns_screq_t *) fcp->isp_scratch; resp = (sns_scrsp_t *) (&((char *)fcp->isp_scratch)[0x100]); /* * Since Port IDs are 24 bits, we can check against having seen * anything yet with this value. */ first_portid = portid = fcp->isp_portid; fcp->isp_loopstate = LOOP_SCANNING_FABRIC; for (first_portid_seen = hicap = 0; hicap < 65535; hicap++) { MEMZERO((void *) reqp, SNS_GAN_REQ_SIZE); reqp->snscb_rblen = SNS_GAN_RESP_SIZE >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_LSW(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_MSW(fcp->isp_scdma + 0x100); reqp->snscb_sblen = 6; reqp->snscb_data[0] = SNS_GAN; reqp->snscb_data[4] = portid & 0xffff; reqp->snscb_data[5] = (portid >> 16) & 0xff; ISP_SWIZZLE_SNS_REQ(isp, reqp); mbs.param[0] = MBOX_SEND_SNS; mbs.param[1] = SNS_GAN_REQ_SIZE >> 1; mbs.param[2] = DMA_MSW(fcp->isp_scdma); mbs.param[3] = DMA_LSW(fcp->isp_scdma); mbs.param[6] = 0; mbs.param[7] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (fcp->isp_loopstate == LOOP_SCANNING_FABRIC) { fcp->isp_loopstate = LOOP_PDB_RCVD; } return (-1); } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { return (-1); } ISP_UNSWIZZLE_SNS_RSP(isp, resp, SNS_GAN_RESP_SIZE >> 1); portid = (((u_int32_t) resp->snscb_port_id[0]) << 16) | (((u_int32_t) resp->snscb_port_id[1]) << 8) | (((u_int32_t) resp->snscb_port_id[2])); (void) isp_async(isp, ISPASYNC_FABRIC_DEV, resp); if (first_portid == portid) { fcp->isp_loopstate = LOOP_FSCAN_DONE; return (0); } } isp_prt(isp, ISP_LOGWARN, "broken fabric nameserver...*wheeze*..."); /* * We either have a broken name server or a huge fabric if we get here. */ fcp->isp_loopstate = LOOP_FSCAN_DONE; return (0); } static void isp_register_fc4_type(struct ispsoftc *isp) { fcparam *fcp = isp->isp_param; sns_screq_t *reqp; mbreg_t mbs; reqp = (sns_screq_t *) fcp->isp_scratch; MEMZERO((void *) reqp, SNS_RFT_REQ_SIZE); reqp->snscb_rblen = SNS_RFT_RESP_SIZE >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_LSW(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_MSW(fcp->isp_scdma + 0x100); reqp->snscb_sblen = 22; reqp->snscb_data[0] = SNS_RFT; reqp->snscb_data[4] = fcp->isp_portid & 0xffff; reqp->snscb_data[5] = (fcp->isp_portid >> 16) & 0xff; reqp->snscb_data[6] = 0x100; /* SCS - FCP */ #if 0 reqp->snscb_data[6] |= 20; /* ISO/IEC 8802-2 LLC/SNAP */ #endif ISP_SWIZZLE_SNS_REQ(isp, reqp); mbs.param[0] = MBOX_SEND_SNS; mbs.param[1] = SNS_RFT_REQ_SIZE >> 1; mbs.param[2] = DMA_MSW(fcp->isp_scdma); mbs.param[3] = DMA_LSW(fcp->isp_scdma); mbs.param[6] = 0; mbs.param[7] = 0; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGDEBUG0, "Register FC4 types succeeded"); } } /* * Start a command. Locking is assumed done in the caller. */ int isp_start(XS_T *xs) { struct ispsoftc *isp; u_int16_t iptr, optr, handle; union { ispreq_t *_reqp; ispreqt2_t *_t2reqp; } _u; #define reqp _u._reqp #define t2reqp _u._t2reqp #define UZSIZE max(sizeof (ispreq_t), sizeof (ispreqt2_t)) int target, i; XS_INITERR(xs); isp = XS_ISP(xs); - /* * Check to make sure we're supporting initiator role. */ if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } /* * Now make sure we're running. */ if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Check command CDB length, etc.. We really are limited to 16 bytes * for Fibre Channel, but can do up to 44 bytes in parallel SCSI, * but probably only if we're running fairly new firmware (we'll * let the old f/w choke on an extended command queue entry). */ if (XS_CDBLEN(xs) > (IS_FC(isp)? 16 : 44) || XS_CDBLEN(xs) == 0) { isp_prt(isp, ISP_LOGERR, "unsupported cdb length (%d, CDB[0]=0x%x)", XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Check to see whether we have good firmware state still or * need to refresh our port database for this target. */ target = XS_TGT(xs); if (IS_FC(isp)) { fcparam *fcp = isp->isp_param; struct lportdb *lp; #ifdef HANDLE_LOOPSTATE_IN_OUTER_LAYERS if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { return (CMD_RQLATER); } /* * If we're not on a Fabric, we can't have a target * above FL_PORT_ID-1. * * If we're on a fabric and *not* connected as an F-port, * we can't have a target less than FC_SNS_ID+1. This * keeps us from having to sort out the difference between * local public loop devices and those which we might get * from a switch's database. */ if (fcp->isp_onfabric == 0) { if (target >= FL_PORT_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } else { if (target >= FL_PORT_ID && target <= FC_SNS_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (fcp->isp_topo != TOPO_F_PORT && target < FL_PORT_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } #else /* * Check for f/w being in ready state. If the f/w * isn't in ready state, then we don't know our * loop ID and the f/w hasn't completed logging * into all targets on the loop. If this is the * case, then bounce the command. We pretend this is * a SELECTION TIMEOUT error if we've never gone to * FW_READY state at all- in this case we may not * be hooked to a loop at all and we shouldn't hang * the machine for this. Otherwise, defer this command * until later. */ if (fcp->isp_fwstate != FW_READY) { /* * Give ourselves at most a 250ms delay. */ if (isp_fclink_test(isp, 250000)) { XS_SETERR(xs, HBA_SELTIMEOUT); if (fcp->loop_seen_once) { return (CMD_RQLATER); } else { return (CMD_COMPLETE); } } } /* * If we're not on a Fabric, we can't have a target * above FL_PORT_ID-1. * * If we're on a fabric and *not* connected as an F-port, * we can't have a target less than FC_SNS_ID+1. This * keeps us from having to sort out the difference between * local public loop devices and those which we might get * from a switch's database. */ if (fcp->isp_onfabric == 0) { if (target >= FL_PORT_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } else { if (target >= FL_PORT_ID && target <= FC_SNS_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (fcp->isp_topo != TOPO_F_PORT && target < FL_PORT_ID) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } /* * If our loop state is such that we haven't yet received * a "Port Database Changed" notification (after a LIP or * a Loop Reset or firmware initialization), then defer * sending commands for a little while, but only if we've * seen a valid loop at one point (otherwise we can get * stuck at initialization time). */ if (fcp->isp_loopstate < LOOP_PDB_RCVD) { XS_SETERR(xs, HBA_SELTIMEOUT); if (fcp->loop_seen_once) { return (CMD_RQLATER); } else { return (CMD_COMPLETE); } } /* * If we're in the middle of loop or fabric scanning * or merging the port databases, retry this command later. */ if (fcp->isp_loopstate == LOOP_SCANNING_FABRIC || fcp->isp_loopstate == LOOP_SCANNING_LOOP || fcp->isp_loopstate == LOOP_SYNCING_PDB) { return (CMD_RQLATER); } /* * If our loop state is now such that we've just now * received a Port Database Change notification, then * we have to go off and (re)scan the fabric. We back * out and try again later if this doesn't work. */ if (fcp->isp_loopstate == LOOP_PDB_RCVD && fcp->isp_onfabric) { if (isp_scan_fabric(isp)) { return (CMD_RQLATER); } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { return (CMD_RQLATER); } } /* * If our loop state is now such that we've just now * received a Port Database Change notification, then * we have to go off and (re)synchronize our port * database. */ if (fcp->isp_loopstate < LOOP_READY) { if (isp_pdb_sync(isp)) { return (CMD_RQLATER); } if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { return (CMD_RQLATER); } } /* * XXX: Here's were we would cancel any loop_dead flag * XXX: also cancel in dead_loop timeout that's running */ #endif /* * Now check whether we should even think about pursuing this. */ lp = &fcp->portdb[target]; if (lp->valid == 0) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if ((lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT)) == 0) { isp_prt(isp, ISP_LOGDEBUG2, "Target %d does not have target service", target); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } /* * Now turn target into what the actual Loop ID is. */ target = lp->loopid; } /* * Next check to see if any HBA or Device * parameters need to be updated. */ if (isp->isp_update != 0) { isp_update(isp); } if (isp_getrqentry(isp, &iptr, &optr, (void **) &reqp)) { isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } /* * Now see if we need to synchronize the ISP with respect to anything. * We do dual duty here (cough) for synchronizing for busses other * than which we got here to send a command to. */ if (isp->isp_sendmarker) { u_int8_t n = (IS_DUALBUS(isp)? 2: 1); /* * Check ports to send markers for... */ for (i = 0; i < n; i++) { if ((isp->isp_sendmarker & (1 << i)) == 0) { continue; } MEMZERO((void *) reqp, sizeof (*reqp)); reqp->req_header.rqs_entry_count = 1; reqp->req_header.rqs_entry_type = RQSTYPE_MARKER; reqp->req_modifier = SYNC_ALL; reqp->req_target = i << 7; /* insert bus number */ ISP_SWIZZLE_REQUEST(isp, reqp); ISP_ADD_REQUEST(isp, iptr); if (isp_getrqentry(isp, &iptr, &optr, (void **)&reqp)) { isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow+"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } } MEMZERO((void *) reqp, UZSIZE); reqp->req_header.rqs_entry_count = 1; if (IS_FC(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T2RQS; } else { if (XS_CDBLEN(xs) > 12) reqp->req_header.rqs_entry_type = RQSTYPE_CMDONLY; else reqp->req_header.rqs_entry_type = RQSTYPE_REQUEST; } reqp->req_header.rqs_flags = 0; reqp->req_header.rqs_seqno = 0; if (IS_FC(isp)) { /* * See comment in isp_intr */ XS_RESID(xs) = 0; /* * Fibre Channel always requires some kind of tag. * The Qlogic drivers seem be happy not to use a tag, * but this breaks for some devices (IBM drives). */ if (XS_TAG_P(xs)) { t2reqp->req_flags = XS_TAG_TYPE(xs); } else { /* * If we don't know what tag to use, use HEAD OF QUEUE * for Request Sense or Ordered (for safety's sake). */ if (XS_CDBP(xs)[0] == 0x3) /* REQUEST SENSE */ t2reqp->req_flags = REQFLAG_HTAG; else t2reqp->req_flags = REQFLAG_OTAG; } } else { sdparam *sdp = (sdparam *)isp->isp_param; if ((sdp->isp_devparam[target].cur_dflags & DPARM_TQING) && XS_TAG_P(xs)) { reqp->req_flags = XS_TAG_TYPE(xs); } } reqp->req_target = target | (XS_CHANNEL(xs) << 7); if (IS_SCSI(isp)) { reqp->req_lun_trn = XS_LUN(xs); reqp->req_cdblen = XS_CDBLEN(xs); } else { if (isp->isp_maxluns > 16) t2reqp->req_scclun = XS_LUN(xs); else t2reqp->req_lun_trn = XS_LUN(xs); } MEMCPY(reqp->req_cdb, XS_CDBP(xs), XS_CDBLEN(xs)); reqp->req_time = XS_TIME(xs) / 1000; if (reqp->req_time == 0 && XS_TIME(xs)) reqp->req_time = 1; /* * Always give a bit more leeway to commands after a bus reset. * XXX: DOES NOT DISTINGUISH WHICH PORT MAY HAVE BEEN SYNCED */ if (isp->isp_sendmarker && reqp->req_time < 5) { reqp->req_time = 5; } if (isp_save_xs(isp, xs, &handle)) { isp_prt(isp, ISP_LOGDEBUG1, "out of xflist pointers"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } reqp->req_handle = handle; /* * Set up DMA and/or do any bus swizzling of the request entry * so that the Qlogic F/W understands what is being asked of it. */ i = ISP_DMASETUP(isp, xs, reqp, &iptr, optr); if (i != CMD_QUEUED) { isp_destroy_handle(isp, handle); /* * dmasetup sets actual error in packet, and * return what we were given to return. */ return (i); } XS_SETERR(xs, HBA_NOERROR); isp_prt(isp, ISP_LOGDEBUG2, "START cmd for %d.%d.%d cmd 0x%x datalen %ld", XS_CHANNEL(xs), target, XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); ISP_ADD_REQUEST(isp, iptr); isp->isp_nactive++; if (isp->isp_sendmarker) isp->isp_sendmarker = 0; return (CMD_QUEUED); #undef reqp #undef t2reqp } /* * isp control * Locks (ints blocked) assumed held. */ int isp_control(struct ispsoftc *isp, ispctl_t ctl, void *arg) { XS_T *xs; mbreg_t mbs; int bus, tgt; u_int16_t handle; switch (ctl) { default: isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); break; case ISPCTL_RESET_BUS: /* * Issue a bus reset. */ mbs.param[0] = MBOX_BUS_RESET; mbs.param[2] = 0; if (IS_SCSI(isp)) { mbs.param[1] = ((sdparam *) isp->isp_param)->isp_bus_reset_delay; if (mbs.param[1] < 2) mbs.param[1] = 2; bus = *((int *) arg); if (IS_DUALBUS(isp)) mbs.param[2] = bus; } else { mbs.param[1] = 10; bus = 0; } isp->isp_sendmarker |= (1 << bus); isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "driver initiated bus reset of bus %d", bus); return (0); case ISPCTL_RESET_DEV: tgt = (*((int *) arg)) & 0xffff; bus = (*((int *) arg)) >> 16; mbs.param[0] = MBOX_ABORT_TARGET; mbs.param[1] = (tgt << 8) | (bus << 15); mbs.param[2] = 3; /* 'delay', in seconds */ isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "Target %d on Bus %d Reset Succeeded", tgt, bus); isp->isp_sendmarker |= (1 << bus); return (0); case ISPCTL_ABORT_CMD: xs = (XS_T *) arg; tgt = XS_TGT(xs); handle = isp_find_handle(isp, xs); if (handle == 0) { isp_prt(isp, ISP_LOGWARN, "cannot find handle for command to abort"); break; } bus = XS_CHANNEL(xs); mbs.param[0] = MBOX_ABORT; if (IS_FC(isp)) { if (isp->isp_maxluns > 16) { mbs.param[1] = tgt << 8; mbs.param[4] = 0; mbs.param[5] = 0; mbs.param[6] = XS_LUN(xs); } else { mbs.param[1] = tgt << 8 | XS_LUN(xs); } } else { mbs.param[1] = (bus << 15) | (XS_TGT(xs) << 8) | XS_LUN(xs); } mbs.param[3] = 0; mbs.param[2] = handle; isp_mboxcmd(isp, &mbs, MBLOGALL & ~MBOX_COMMAND_ERROR); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } /* * XXX: Look for command in the REQUEST QUEUE. That is, * XXX: It hasen't been picked up by firmware yet. */ break; case ISPCTL_UPDATE_PARAMS: isp_update(isp); return (0); case ISPCTL_FCLINK_TEST: if (IS_FC(isp)) { int usdelay = (arg)? *((int *) arg) : 250000; return (isp_fclink_test(isp, usdelay)); } break; case ISPCTL_SCAN_FABRIC: if (IS_FC(isp)) { return (isp_scan_fabric(isp)); } break; case ISPCTL_SCAN_LOOP: if (IS_FC(isp)) { return (isp_scan_loop(isp)); } break; case ISPCTL_PDB_SYNC: if (IS_FC(isp)) { return (isp_pdb_sync(isp)); } break; case ISPCTL_SEND_LIP: if (IS_FC(isp)) { mbs.param[0] = MBOX_INIT_LIP; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } } break; case ISPCTL_GET_POSMAP: if (IS_FC(isp) && arg) { return (isp_getmap(isp, arg)); } break; case ISPCTL_RUN_MBOXCMD: isp_mboxcmd(isp, arg, MBLOGALL); return(0); #ifdef ISP_TARGET_MODE case ISPCTL_TOGGLE_TMODE: { /* * We don't check/set against role here- that's the * responsibility for the outer layer to coordinate. */ if (IS_SCSI(isp)) { int param = *(int *)arg; mbs.param[0] = MBOX_ENABLE_TARGET_MODE; mbs.param[1] = param & 0xffff; mbs.param[2] = param >> 16; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } return (0); } #endif } return (-1); } /* * Interrupt Service Routine(s). * * External (OS) framework has done the appropriate locking, * and the locking will be held throughout this function. */ /* * Limit our stack depth by sticking with the max likely number * of completions on a request queue at any one time. */ #define MAX_REQUESTQ_COMPLETIONS 32 int isp_intr(void *arg) { struct ispsoftc *isp = arg; XS_T *complist[MAX_REQUESTQ_COMPLETIONS], *xs; u_int16_t iptr, optr, isr, sema, junk; int i, nlooked = 0, ndone = 0; if (IS_2100(isp)) { i = 0; do { isr = ISP_READ(isp, BIU_ISR); junk = ISP_READ(isp, BIU_ISR); } while (isr != junk && ++i < 1000); if (isr != junk) { isp_prt(isp, ISP_LOGWARN, "isr unsteady (%x, %x)", isr, junk); } i = 0; do { sema = ISP_READ(isp, BIU_SEMA); junk = ISP_READ(isp, BIU_SEMA); } while (sema != junk && ++i < 1000); if (sema != junk) { isp_prt(isp, ISP_LOGWARN, "sema unsteady (%x, %x)", sema, junk); } } else { isr = ISP_READ(isp, BIU_ISR); sema = ISP_READ(isp, BIU_SEMA); } isp_prt(isp, ISP_LOGDEBUG3, "isp_intr isr %x sem %x", isr, sema); isr &= INT_PENDING_MASK(isp); sema &= BIU_SEMA_LOCK; isp->isp_intcnt++; if (isr == 0 && sema == 0) { isp->isp_intbogus++; return (0); } if (sema) { u_int16_t mbox; if (IS_2100(isp)) { i = 0; do { mbox = ISP_READ(isp, OUTMAILBOX0); junk = ISP_READ(isp, OUTMAILBOX0);; } while (junk != mbox && ++i < 1000); if (mbox != junk) { isp_prt(isp, ISP_LOGWARN, "mailbox0 unsteady (%x, %x)", mbox, junk); ISP_WRITE(isp, BIU_SEMA, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); return (1); } } else { mbox = ISP_READ(isp, OUTMAILBOX0); } if (mbox & 0x4000) { int obits, i = 0; if ((obits = isp->isp_mboxbsy) != 0) { isp->isp_mboxtmp[i++] = mbox; for (i = 1; i < MAX_MAILBOX; i++) { if ((obits & (1 << i)) == 0) { continue; } isp->isp_mboxtmp[i] = ISP_READ(isp, MBOX_OFF(i)); } MBOX_NOTIFY_COMPLETE(isp); } else { isp_prt(isp, ISP_LOGWARN, "Mbox Command Async (0x%x) with no waiters", mbox); } } else { int fhandle = isp_parse_async(isp, (int) mbox); isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); if (fhandle > 0) { isp_fastpost_complete(isp, (u_int16_t) fhandle); } } if (IS_FC(isp) || isp->isp_state != ISP_RUNSTATE) { ISP_WRITE(isp, BIU_SEMA, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); return (1); } } /* * We can't be getting this now. */ if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGWARN, "interrupt (isr=%x, sema=%x) when not ready", isr, sema); ISP_WRITE(isp, INMAILBOX5, ISP_READ(isp, OUTMAILBOX5)); ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); return (1); } /* * You *must* read OUTMAILBOX5 prior to clearing the RISC interrupt. */ optr = isp->isp_residx; if (IS_2100(isp)) { i = 0; do { iptr = ISP_READ(isp, OUTMAILBOX5); junk = ISP_READ(isp, OUTMAILBOX5); } while (junk != iptr && ++i < 1000); if (iptr != junk) { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); isp_prt(isp, ISP_LOGWARN, "mailbox5 unsteady (%x, %x)", iptr, junk); return (1); } } else { iptr = ISP_READ(isp, OUTMAILBOX5); } if (sema) { ISP_WRITE(isp, BIU_SEMA, 0); } ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); if (optr == iptr && sema == 0) { /* * There are a lot of these- reasons unknown- mostly on * faster Alpha machines. * * I tried delaying after writing HCCR_CMD_CLEAR_RISC_INT to * make sure the old interrupt went away (to avoid 'ringing' * effects), but that didn't stop this from occurring. */ junk = ISP_READ(isp, BIU_ISR); isp_prt(isp, ISP_LOGDEBUG2, "bogus intr- isr %x (%x) iptr %x optr %x", isr, junk, iptr, optr); + isp->isp_intbogus++; } while (optr != iptr) { ispstatusreq_t *sp; u_int16_t oop; int buddaboom = 0; sp = (ispstatusreq_t *) ISP_QUEUE_ENTRY(isp->isp_result, optr); oop = optr; optr = ISP_NXT_QENTRY(optr, RESULT_QUEUE_LEN(isp)); nlooked++; /* * Do any appropriate unswizzling of what the Qlogic f/w has * written into memory so it makes sense to us. This is a * per-platform thing. Also includes any memory barriers. */ ISP_UNSWIZZLE_RESPONSE(isp, sp, oop); if (sp->req_header.rqs_entry_type != RQSTYPE_RESPONSE) { if (isp_handle_other_response(isp, sp, &optr) == 0) { MEMZERO(sp, sizeof (isphdr_t)); continue; } /* * It really has to be a bounced request just copied * from the request queue to the response queue. If * not, something bad has happened. */ if (sp->req_header.rqs_entry_type != RQSTYPE_REQUEST) { isp_prt(isp, ISP_LOGERR, notresp, sp->req_header.rqs_entry_type, oop, optr, nlooked); if (isp->isp_dblev & ISP_LOGDEBUG0) { isp_print_bytes(isp, "Queue Entry", QENTRY_LEN, sp); } MEMZERO(sp, sizeof (isphdr_t)); continue; } buddaboom = 1; } if (sp->req_header.rqs_flags & 0xf) { #define _RQS_OFLAGS \ ~(RQSFLAG_CONTINUATION|RQSFLAG_FULL|RQSFLAG_BADHEADER|RQSFLAG_BADPACKET) if (sp->req_header.rqs_flags & RQSFLAG_CONTINUATION) { isp_prt(isp, ISP_LOGWARN, "continuation segment"); ISP_WRITE(isp, INMAILBOX5, optr); continue; } if (sp->req_header.rqs_flags & RQSFLAG_FULL) { isp_prt(isp, ISP_LOGDEBUG1, "internal queues full"); /* * We'll synthesize a QUEUE FULL message below. */ } if (sp->req_header.rqs_flags & RQSFLAG_BADHEADER) { isp_prt(isp, ISP_LOGERR, "bad header flag"); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADPACKET) { isp_prt(isp, ISP_LOGERR, "bad request packet"); buddaboom++; } if (sp->req_header.rqs_flags & _RQS_OFLAGS) { isp_prt(isp, ISP_LOGERR, "unknown flags (0x%x) in response", sp->req_header.rqs_flags); buddaboom++; } #undef _RQS_OFLAGS } if (sp->req_handle > isp->isp_maxcmds || sp->req_handle < 1) { MEMZERO(sp, sizeof (isphdr_t)); isp_prt(isp, ISP_LOGERR, "bad request handle %d (type 0x%x, flags 0x%x)", sp->req_handle, sp->req_header.rqs_entry_type, sp->req_header.rqs_flags); ISP_WRITE(isp, INMAILBOX5, optr); continue; } xs = isp_find_xs(isp, sp->req_handle); if (xs == NULL) { MEMZERO(sp, sizeof (isphdr_t)); isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x in xflist", sp->req_handle); ISP_WRITE(isp, INMAILBOX5, optr); continue; } isp_destroy_handle(isp, sp->req_handle); if (sp->req_status_flags & RQSTF_BUS_RESET) { isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); } if (buddaboom) { XS_SETERR(xs, HBA_BOTCH); } if (IS_FC(isp) && (sp->req_scsi_status & RQCS_SV)) { /* * Fibre Channel F/W doesn't say we got status * if there's Sense Data instead. I guess they * think it goes w/o saying. */ sp->req_state_flags |= RQSF_GOT_STATUS; } if (sp->req_state_flags & RQSF_GOT_STATUS) { *XS_STSP(xs) = sp->req_scsi_status & 0xff; } switch (sp->req_header.rqs_entry_type) { case RQSTYPE_RESPONSE: XS_SET_STATE_STAT(isp, xs, sp); isp_parse_status(isp, sp, xs); if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && (*XS_STSP(xs) == SCSI_BUSY)) { XS_SETERR(xs, HBA_TGTBSY); } if (IS_SCSI(isp)) { XS_RESID(xs) = sp->req_resid; if ((sp->req_state_flags & RQSF_GOT_STATUS) && (*XS_STSP(xs) == SCSI_CHECK) && (sp->req_state_flags & RQSF_GOT_SENSE)) { XS_SAVE_SENSE(xs, sp); } /* * A new synchronous rate was negotiated for * this target. Mark state such that we'll go * look up that which has changed later. */ if (sp->req_status_flags & RQSTF_NEGOTIATION) { int t = XS_TGT(xs); sdparam *sdp = isp->isp_param; sdp += XS_CHANNEL(xs); sdp->isp_devparam[t].dev_refresh = 1; isp->isp_update |= (1 << XS_CHANNEL(xs)); } } else { if (sp->req_status_flags & RQSF_XFER_COMPLETE) { XS_RESID(xs) = 0; } else if (sp->req_scsi_status & RQCS_RESID) { XS_RESID(xs) = sp->req_resid; } else { XS_RESID(xs) = 0; } if ((sp->req_state_flags & RQSF_GOT_STATUS) && (*XS_STSP(xs) == SCSI_CHECK) && (sp->req_scsi_status & RQCS_SV)) { XS_SAVE_SENSE(xs, sp); /* solely for the benefit of debug */ sp->req_state_flags |= RQSF_GOT_SENSE; } } isp_prt(isp, ISP_LOGDEBUG2, "asked for %ld got resid %ld", (long) XS_XFRLEN(xs), (long) sp->req_resid); break; case RQSTYPE_REQUEST: if (sp->req_header.rqs_flags & RQSFLAG_FULL) { /* * Force Queue Full status. */ *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); } else if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } XS_RESID(xs) = XS_XFRLEN(xs); break; default: isp_prt(isp, ISP_LOGWARN, "unhandled response queue type 0x%x", sp->req_header.rqs_entry_type); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } break; } /* * Free any dma resources. As a side effect, this may * also do any cache flushing necessary for data coherence. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, sp->req_handle); } if (((isp->isp_dblev & (ISP_LOGDEBUG2|ISP_LOGDEBUG3))) || ((isp->isp_dblev & ISP_LOGDEBUG1) && ((!XS_NOERR(xs)) || (*XS_STSP(xs) != SCSI_GOOD)))) { char skey; if (sp->req_state_flags & RQSF_GOT_SENSE) { skey = XS_SNSKEY(xs) & 0xf; if (skey < 10) skey += '0'; else skey += 'a' - 10; } else if (*XS_STSP(xs) == SCSI_CHECK) { skey = '?'; } else { skey = '.'; } isp_prt(isp, ISP_LOGALL, finmsg, XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), XS_RESID(xs), *XS_STSP(xs), skey, XS_ERR(xs)); } if (isp->isp_nactive > 0) isp->isp_nactive--; complist[ndone++] = xs; /* defer completion call until later */ MEMZERO(sp, sizeof (isphdr_t)); if (ndone == MAX_REQUESTQ_COMPLETIONS) { break; } } /* * If we looked at any commands, then it's valid to find out * what the outpointer is. It also is a trigger to update the * ISP's notion of what we've seen so far. */ if (nlooked) { ISP_WRITE(isp, INMAILBOX5, optr); isp->isp_reqodx = ISP_READ(isp, OUTMAILBOX4); } isp->isp_residx = optr; for (i = 0; i < ndone; i++) { xs = complist[i]; if (xs) { isp_done(xs); } } return (1); } /* * Support routines. */ static int isp_parse_async(struct ispsoftc *isp, int mbox) { int bus; u_int16_t fast_post_handle = 0; if (IS_DUALBUS(isp)) { bus = ISP_READ(isp, OUTMAILBOX6); } else { bus = 0; } switch (mbox) { case ASYNC_BUS_RESET: isp->isp_sendmarker |= (1 << bus); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif isp_async(isp, ISPASYNC_BUS_RESET, &bus); break; case ASYNC_SYSTEM_ERROR: mbox = ISP_READ(isp, OUTMAILBOX1); isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Addr 0x%x", mbox); ISP_DUMPREGS(isp, "Firmware Error"); isp_reinit(isp); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, ASYNC_SYSTEM_ERROR); #endif /* no point continuing after this */ return (-1); case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: /* * We've just been notified that the Queue has woken up. * We don't need to be chatty about this- just unlatch things * and move on. */ mbox = ISP_READ(isp, OUTMAILBOX4); break; case ASYNC_TIMEOUT_RESET: isp_prt(isp, ISP_LOGWARN, "timeout initiated SCSI bus reset of bus %d", bus); isp->isp_sendmarker |= (1 << bus); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_DEVICE_RESET: isp_prt(isp, ISP_LOGINFO, "device reset on bus %d", bus); isp->isp_sendmarker |= (1 << bus); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_EXTMSG_UNDERRUN: isp_prt(isp, ISP_LOGWARN, "extended message underrun"); break; case ASYNC_SCAM_INT: isp_prt(isp, ISP_LOGINFO, "SCAM interrupt"); break; case ASYNC_HUNG_SCSI: isp_prt(isp, ISP_LOGERR, "stalled SCSI Bus after DATA Overrun"); /* XXX: Need to issue SCSI reset at this point */ break; case ASYNC_KILLED_BUS: isp_prt(isp, ISP_LOGERR, "SCSI Bus reset after DATA Overrun"); break; case ASYNC_BUS_TRANSIT: mbox = ISP_READ(isp, OUTMAILBOX2); switch (mbox & 0x1c00) { case SXP_PINS_LVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to LVD mode"); SDPARAM(isp)->isp_diffmode = 0; SDPARAM(isp)->isp_ultramode = 0; SDPARAM(isp)->isp_lvdmode = 1; break; case SXP_PINS_HVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Differential mode"); SDPARAM(isp)->isp_diffmode = 1; SDPARAM(isp)->isp_ultramode = 0; SDPARAM(isp)->isp_lvdmode = 0; break; case SXP_PINS_SE_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Single Ended mode"); SDPARAM(isp)->isp_diffmode = 0; SDPARAM(isp)->isp_ultramode = 1; SDPARAM(isp)->isp_lvdmode = 0; break; default: isp_prt(isp, ISP_LOGWARN, "Transition to Unknown Mode 0x%x", mbox); break; } /* * XXX: Set up to renegotiate again! */ /* Can only be for a 1080... */ isp->isp_sendmarker |= (1 << bus); break; case ASYNC_CMD_CMPLT: fast_post_handle = ISP_READ(isp, OUTMAILBOX1); isp_prt(isp, ISP_LOGDEBUG3, "fast post completion of %u", fast_post_handle); break; case ASYNC_CTIO_DONE: #ifdef ISP_TARGET_MODE /* * Bus gets overloaded with the handle. Dual bus * cards don't put bus# into the handle. */ bus = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); isp_target_async(isp, bus, mbox); #else isp_prt(isp, ISP_LOGINFO, "Fast Posting CTIO done"); #endif break; case ASYNC_LIP_OCCURRED: FCPARAM(isp)->isp_lipseq = ISP_READ(isp, OUTMAILBOX1); FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; isp->isp_sendmarker = 1; isp_mark_getpdb_all(isp); - isp_prt(isp, ISP_LOGINFO, "LIP occurred"); + isp_async(isp, ISPASYNC_LIP, NULL); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_LOOP_UP: isp->isp_sendmarker = 1; FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; isp_mark_getpdb_all(isp); isp_async(isp, ISPASYNC_LOOP_UP, NULL); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_LOOP_DOWN: isp->isp_sendmarker = 1; FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_NIL; isp_mark_getpdb_all(isp); isp_async(isp, ISPASYNC_LOOP_DOWN, NULL); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_LOOP_RESET: isp->isp_sendmarker = 1; FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_NIL; isp_mark_getpdb_all(isp); - isp_prt(isp, ISP_LOGINFO, "Loop RESET"); + isp_async(isp, ISPASYNC_LOOP_RESET, NULL); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif break; case ASYNC_PDB_CHANGED: isp->isp_sendmarker = 1; FCPARAM(isp)->isp_loopstate = LOOP_PDB_RCVD; isp_mark_getpdb_all(isp); isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_PDB); break; case ASYNC_CHANGE_NOTIFY: /* * Not correct, but it will force us to rescan the loop. */ FCPARAM(isp)->isp_loopstate = LOOP_PDB_RCVD; isp_mark_getpdb_all(isp); isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_SNS); break; case ASYNC_PTPMODE: if (FCPARAM(isp)->isp_onfabric) FCPARAM(isp)->isp_topo = TOPO_F_PORT; else FCPARAM(isp)->isp_topo = TOPO_N_PORT; isp_mark_getpdb_all(isp); isp->isp_sendmarker = 1; FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_OTHER); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, mbox); #endif isp_prt(isp, ISP_LOGINFO, "Point-to-Point mode"); break; case ASYNC_CONNMODE: mbox = ISP_READ(isp, OUTMAILBOX1); isp_mark_getpdb_all(isp); switch (mbox) { case ISP_CONN_LOOP: isp_prt(isp, ISP_LOGINFO, "Point-to-Point -> Loop mode"); break; case ISP_CONN_PTP: isp_prt(isp, ISP_LOGINFO, "Loop -> Point-to-Point mode"); break; case ISP_CONN_BADLIP: isp_prt(isp, ISP_LOGWARN, "Point-to-Point -> Loop mode (BAD LIP)"); break; case ISP_CONN_FATAL: isp_prt(isp, ISP_LOGERR, "FATAL CONNECTION ERROR"); isp_reinit(isp); #ifdef ISP_TARGET_MODE isp_target_async(isp, bus, ASYNC_SYSTEM_ERROR); #endif /* no point continuing after this */ return (-1); case ISP_CONN_LOOPBACK: isp_prt(isp, ISP_LOGWARN, "Looped Back in Point-to-Point mode"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown connection mode (0x%x)", mbox); break; } isp_async(isp, ISPASYNC_CHANGE_NOTIFY, ISPASYNC_CHANGE_OTHER); isp->isp_sendmarker = 1; FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; FCPARAM(isp)->isp_loopstate = LOOP_LIP_RCVD; break; default: isp_prt(isp, ISP_LOGWARN, "Unknown Async Code 0x%x", mbox); break; } return (fast_post_handle); } /* * Handle other response entries. A pointer to the request queue output * index is here in case we want to eat several entries at once, although * this is not used currently. */ static int isp_handle_other_response(struct ispsoftc *isp, ispstatusreq_t *sp, u_int16_t *optrp) { switch (sp->req_header.rqs_entry_type) { case RQSTYPE_STATUS_CONT: isp_prt(isp, ISP_LOGINFO, "Ignored Continuation Response"); return (0); case RQSTYPE_ATIO: case RQSTYPE_CTIO: case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: case RQSTYPE_NOTIFY: case RQSTYPE_NOTIFY_ACK: case RQSTYPE_CTIO1: case RQSTYPE_ATIO2: case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: #ifdef ISP_TARGET_MODE return (isp_target_notify(isp, sp, optrp)); #else optrp = optrp; /* FALLTHROUGH */ #endif case RQSTYPE_REQUEST: default: if (isp_async(isp, ISPASYNC_UNHANDLED_RESPONSE, sp)) { return (0); } isp_prt(isp, ISP_LOGWARN, "Unhandled Response Type 0x%x", sp->req_header.rqs_entry_type); return (-1); } } static void isp_parse_status(struct ispsoftc *isp, ispstatusreq_t *sp, XS_T *xs) { switch (sp->req_completion_status & 0xff) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_INCOMPLETE: if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) { isp_prt(isp, ISP_LOGDEBUG1, "Selection Timeout for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } isp_prt(isp, ISP_LOGERR, "command incomplete for %d.%d.%d, state 0x%x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), sp->req_state_flags); break; case RQCS_DMA_ERROR: isp_prt(isp, ISP_LOGERR, "DMA error for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_TRANSPORT_ERROR: { char buf[172]; buf[0] = 0; STRNCAT(buf, "states=>", sizeof buf); if (sp->req_state_flags & RQSF_GOT_BUS) { STRNCAT(buf, " GOT_BUS", sizeof buf); } if (sp->req_state_flags & RQSF_GOT_TARGET) { STRNCAT(buf, " GOT_TGT", sizeof buf); } if (sp->req_state_flags & RQSF_SENT_CDB) { STRNCAT(buf, " SENT_CDB", sizeof buf); } if (sp->req_state_flags & RQSF_XFRD_DATA) { STRNCAT(buf, " XFRD_DATA", sizeof buf); } if (sp->req_state_flags & RQSF_GOT_STATUS) { STRNCAT(buf, " GOT_STS", sizeof buf); } if (sp->req_state_flags & RQSF_GOT_SENSE) { STRNCAT(buf, " GOT_SNS", sizeof buf); } if (sp->req_state_flags & RQSF_XFER_COMPLETE) { STRNCAT(buf, " XFR_CMPLT", sizeof buf); } STRNCAT(buf, "\nstatus=>", sizeof buf); if (sp->req_status_flags & RQSTF_DISCONNECT) { STRNCAT(buf, " Disconnect", sizeof buf); } if (sp->req_status_flags & RQSTF_SYNCHRONOUS) { STRNCAT(buf, " Sync_xfr", sizeof buf); } if (sp->req_status_flags & RQSTF_PARITY_ERROR) { STRNCAT(buf, " Parity", sizeof buf); } if (sp->req_status_flags & RQSTF_BUS_RESET) { STRNCAT(buf, " Bus_Reset", sizeof buf); } if (sp->req_status_flags & RQSTF_DEVICE_RESET) { STRNCAT(buf, " Device_Reset", sizeof buf); } if (sp->req_status_flags & RQSTF_ABORTED) { STRNCAT(buf, " Aborted", sizeof buf); } if (sp->req_status_flags & RQSTF_TIMEOUT) { STRNCAT(buf, " Timeout", sizeof buf); } if (sp->req_status_flags & RQSTF_NEGOTIATION) { STRNCAT(buf, " Negotiation", sizeof buf); } isp_prt(isp, ISP_LOGERR, "%s", buf); isp_prt(isp, ISP_LOGERR, "transport error for %d.%d.%d:\n%s", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), buf); break; } case RQCS_RESET_OCCURRED: isp_prt(isp, ISP_LOGWARN, "bus reset destroyed command for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } return; case RQCS_ABORTED: isp_prt(isp, ISP_LOGERR, "command aborted for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); isp->isp_sendmarker |= (1 << XS_CHANNEL(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_prt(isp, ISP_LOGWARN, "command timed out for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_RESID(xs) = sp->req_resid; isp_prt(isp, ISP_LOGERR, "data overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_COMMAND_OVERRUN: isp_prt(isp, ISP_LOGERR, "command overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_STATUS_OVERRUN: isp_prt(isp, ISP_LOGERR, "status overrun for command on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_BAD_MESSAGE: isp_prt(isp, ISP_LOGERR, "msg not COMMAND COMPLETE after status %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_NO_MESSAGE_OUT: isp_prt(isp, ISP_LOGERR, "No MESSAGE OUT phase after selection on %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_EXT_ID_FAILED: isp_prt(isp, ISP_LOGERR, "EXTENDED IDENTIFY failed %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_IDE_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_ABORT_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "ABORT OPERATION rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_REJECT_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "MESSAGE REJECT rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_NOP_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "NOP rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_PARITY_ERROR_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "MESSAGE PARITY ERROR rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_DEVICE_RESET_MSG_FAILED: isp_prt(isp, ISP_LOGWARN, "BUS DEVICE RESET rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_ID_MSG_FAILED: isp_prt(isp, ISP_LOGERR, "IDENTIFY rejected by %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_UNEXP_BUS_FREE: isp_prt(isp, ISP_LOGERR, "%d.%d.%d had an unexpected bus free", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_DATA_UNDERRUN: XS_RESID(xs) = sp->req_resid; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_XACT_ERR1: isp_prt(isp, ISP_LOGERR, xact1, XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); break; case RQCS_XACT_ERR2: isp_prt(isp, ISP_LOGERR, xact2, XS_LUN(xs), XS_TGT(xs), XS_CHANNEL(xs)); break; case RQCS_XACT_ERR3: isp_prt(isp, ISP_LOGERR, xact3, XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); break; case RQCS_BAD_ENTRY: isp_prt(isp, ISP_LOGERR, "Invalid IOCB entry type detected"); break; case RQCS_QUEUE_FULL: isp_prt(isp, ISP_LOGDEBUG1, "internal queues full for %d.%d.%d status 0x%x", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs), *XS_STSP(xs)); /* * If QFULL or some other status byte is set, then this * isn't an error, per se. */ if (*XS_STSP(xs) != SCSI_GOOD && XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); return; } break; case RQCS_PHASE_SKIPPED: isp_prt(isp, ISP_LOGERR, pskip, XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); break; case RQCS_ARQS_FAILED: isp_prt(isp, ISP_LOGERR, "Auto Request Sense failed for %d.%d.%d", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ARQFAIL); } return; case RQCS_WIDE_FAILED: isp_prt(isp, ISP_LOGERR, "Wide Negotiation failed for %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); if (IS_SCSI(isp)) { sdparam *sdp = isp->isp_param; sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].dev_flags &= ~DPARM_WIDE; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; isp->isp_update |= (1 << XS_CHANNEL(xs)); } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_SYNCXFER_FAILED: isp_prt(isp, ISP_LOGERR, "SDTR Message failed for target %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); if (IS_SCSI(isp)) { sdparam *sdp = isp->isp_param; sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].dev_flags &= ~DPARM_SYNC; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; isp->isp_update |= (1 << XS_CHANNEL(xs)); } break; case RQCS_LVD_BUSERR: isp_prt(isp, ISP_LOGERR, "Bad LVD condition while talking to %d.%d.%d", XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs)); break; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ isp_prt(isp, ISP_LOGINFO, "Port Unavailable for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_LOGGED_OUT: /* * It was there (maybe)- treat as a selection timeout. */ isp_prt(isp, ISP_LOGINFO, "port logout for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_BUSY: isp_prt(isp, ISP_LOGWARN, "port busy for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", sp->req_completion_status); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_fastpost_complete(struct ispsoftc *isp, u_int16_t fph) { XS_T *xs; if (fph == 0) { return; } xs = isp_find_xs(isp, fph); if (xs == NULL) { isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } isp_destroy_handle(isp, fph); /* * Since we don't have a result queue entry item, * we must believe that SCSI status is zero and * that all data transferred. */ XS_SET_STATE_STAT(isp, xs, NULL); XS_RESID(xs) = 0; *XS_STSP(xs) = SCSI_GOOD; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, fph); } if (isp->isp_nactive) isp->isp_nactive--; isp_done(xs); } #define HIBYT(x) ((x) >> 0x8) #define LOBYT(x) ((x) & 0xff) #define ISPOPMAP(a, b) (((a) << 8) | (b)) static u_int16_t mbpscsi[] = { ISPOPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISPOPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISPOPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISPOPMAP(0x1f, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISPOPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISPOPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISPOPMAP(0x3f, 0x3f), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISPOPMAP(0x03, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISPOPMAP(0x01, 0x4f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x09: */ ISPOPMAP(0x00, 0x00), /* 0x0a: */ ISPOPMAP(0x00, 0x00), /* 0x0b: */ ISPOPMAP(0x00, 0x00), /* 0x0c: */ ISPOPMAP(0x00, 0x00), /* 0x0d: */ ISPOPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x0f: */ ISPOPMAP(0x1f, 0x1f), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISPOPMAP(0x3f, 0x3f), /* 0x11: MBOX_INIT_RES_QUEUE */ ISPOPMAP(0x0f, 0x0f), /* 0x12: MBOX_EXECUTE_IOCB */ ISPOPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISPOPMAP(0x01, 0x3f), /* 0x14: MBOX_STOP_FIRMWARE */ ISPOPMAP(0x0f, 0x0f), /* 0x15: MBOX_ABORT */ ISPOPMAP(0x03, 0x03), /* 0x16: MBOX_ABORT_DEVICE */ ISPOPMAP(0x07, 0x07), /* 0x17: MBOX_ABORT_TARGET */ ISPOPMAP(0x07, 0x07), /* 0x18: MBOX_BUS_RESET */ ISPOPMAP(0x03, 0x07), /* 0x19: MBOX_STOP_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1a: MBOX_START_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISPOPMAP(0x03, 0x07), /* 0x1c: MBOX_ABORT_QUEUE */ ISPOPMAP(0x03, 0x4f), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISPOPMAP(0x00, 0x00), /* 0x1e: */ ISPOPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISPOPMAP(0x01, 0x07), /* 0x20: MBOX_GET_INIT_SCSI_ID */ ISPOPMAP(0x01, 0x07), /* 0x21: MBOX_GET_SELECT_TIMEOUT */ ISPOPMAP(0x01, 0xc7), /* 0x22: MBOX_GET_RETRY_COUNT */ ISPOPMAP(0x01, 0x07), /* 0x23: MBOX_GET_TAG_AGE_LIMIT */ ISPOPMAP(0x01, 0x03), /* 0x24: MBOX_GET_CLOCK_RATE */ ISPOPMAP(0x01, 0x07), /* 0x25: MBOX_GET_ACT_NEG_STATE */ ISPOPMAP(0x01, 0x07), /* 0x26: MBOX_GET_ASYNC_DATA_SETUP_TIME */ ISPOPMAP(0x01, 0x07), /* 0x27: MBOX_GET_PCI_PARAMS */ ISPOPMAP(0x03, 0x4f), /* 0x28: MBOX_GET_TARGET_PARAMS */ ISPOPMAP(0x03, 0x0f), /* 0x29: MBOX_GET_DEV_QUEUE_PARAMS */ ISPOPMAP(0x01, 0x07), /* 0x2a: MBOX_GET_RESET_DELAY_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x2b: */ ISPOPMAP(0x00, 0x00), /* 0x2c: */ ISPOPMAP(0x00, 0x00), /* 0x2d: */ ISPOPMAP(0x00, 0x00), /* 0x2e: */ ISPOPMAP(0x00, 0x00), /* 0x2f: */ ISPOPMAP(0x03, 0x03), /* 0x30: MBOX_SET_INIT_SCSI_ID */ ISPOPMAP(0x07, 0x07), /* 0x31: MBOX_SET_SELECT_TIMEOUT */ ISPOPMAP(0xc7, 0xc7), /* 0x32: MBOX_SET_RETRY_COUNT */ ISPOPMAP(0x07, 0x07), /* 0x33: MBOX_SET_TAG_AGE_LIMIT */ ISPOPMAP(0x03, 0x03), /* 0x34: MBOX_SET_CLOCK_RATE */ ISPOPMAP(0x07, 0x07), /* 0x35: MBOX_SET_ACT_NEG_STATE */ ISPOPMAP(0x07, 0x07), /* 0x36: MBOX_SET_ASYNC_DATA_SETUP_TIME */ ISPOPMAP(0x07, 0x07), /* 0x37: MBOX_SET_PCI_CONTROL_PARAMS */ ISPOPMAP(0x4f, 0x4f), /* 0x38: MBOX_SET_TARGET_PARAMS */ ISPOPMAP(0x0f, 0x0f), /* 0x39: MBOX_SET_DEV_QUEUE_PARAMS */ ISPOPMAP(0x07, 0x07), /* 0x3a: MBOX_SET_RESET_DELAY_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x3b: */ ISPOPMAP(0x00, 0x00), /* 0x3c: */ ISPOPMAP(0x00, 0x00), /* 0x3d: */ ISPOPMAP(0x00, 0x00), /* 0x3e: */ ISPOPMAP(0x00, 0x00), /* 0x3f: */ ISPOPMAP(0x01, 0x03), /* 0x40: MBOX_RETURN_BIOS_BLOCK_ADDR */ ISPOPMAP(0x3f, 0x01), /* 0x41: MBOX_WRITE_FOUR_RAM_WORDS */ ISPOPMAP(0x03, 0x07), /* 0x42: MBOX_EXEC_BIOS_IOCB */ ISPOPMAP(0x00, 0x00), /* 0x43: */ ISPOPMAP(0x00, 0x00), /* 0x44: */ ISPOPMAP(0x03, 0x03), /* 0x45: SET SYSTEM PARAMETER */ ISPOPMAP(0x01, 0x03), /* 0x46: GET SYSTEM PARAMETER */ ISPOPMAP(0x00, 0x00), /* 0x47: */ ISPOPMAP(0x01, 0xcf), /* 0x48: GET SCAM CONFIGURATION */ ISPOPMAP(0xcf, 0xcf), /* 0x49: SET SCAM CONFIGURATION */ ISPOPMAP(0x03, 0x03), /* 0x4a: MBOX_SET_FIRMWARE_FEATURES */ ISPOPMAP(0x01, 0x03), /* 0x4b: MBOX_GET_FIRMWARE_FEATURES */ ISPOPMAP(0x00, 0x00), /* 0x4c: */ ISPOPMAP(0x00, 0x00), /* 0x4d: */ ISPOPMAP(0x00, 0x00), /* 0x4e: */ ISPOPMAP(0x00, 0x00), /* 0x4f: */ ISPOPMAP(0xdf, 0xdf), /* 0x50: LOAD RAM A64 */ ISPOPMAP(0xdf, 0xdf), /* 0x51: DUMP RAM A64 */ ISPOPMAP(0xdf, 0xdf), /* 0x52: INITIALIZE REQUEST QUEUE A64 */ ISPOPMAP(0xff, 0xff), /* 0x53: INITIALIZE RESPONSE QUEUE A64 */ ISPOPMAP(0xcf, 0xff), /* 0x54: EXECUTE IOCB A64 */ ISPOPMAP(0x07, 0x01), /* 0x55: ENABLE TARGET MODE */ ISPOPMAP(0x03, 0x0f), /* 0x56: GET TARGET STATUS */ ISPOPMAP(0x00, 0x00), /* 0x57: */ ISPOPMAP(0x00, 0x00), /* 0x58: */ ISPOPMAP(0x00, 0x00), /* 0x59: */ ISPOPMAP(0x03, 0x03), /* 0x5a: SET DATA OVERRUN RECOVERY MODE */ ISPOPMAP(0x01, 0x03), /* 0x5b: GET DATA OVERRUN RECOVERY MODE */ ISPOPMAP(0x0f, 0x0f), /* 0x5c: SET HOST DATA */ ISPOPMAP(0x01, 0x01) /* 0x5d: GET NOST DATA */ }; #ifndef ISP_STRIPPED static char *scsi_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", NULL, NULL, NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET INIT SCSI ID", "GET SELECT TIMEOUT", "GET RETRY COUNT", "GET TAG AGE LIMIT", "GET CLOCK RATE", "GET ACT NEG STATE", "GET ASYNC DATA SETUP TIME", "GET PCI PARAMS", "GET TARGET PARAMS", "GET DEV QUEUE PARAMS", "GET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "SET INIT SCSI ID", "SET SELECT TIMEOUT", "SET RETRY COUNT", "SET TAG AGE LIMIT", "SET CLOCK RATE", "SET ACT NEG STATE", "SET ASYNC DATA SETUP TIME", "SET PCI CONTROL PARAMS", "SET TARGET PARAMS", "SET DEV QUEUE PARAMS", "SET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "RETURN BIOS BLOCK ADDR", "WRITE FOUR RAM WORDS", "EXEC BIOS IOCB", NULL, NULL, "SET SYSTEM PARAMETER", "GET SYSTEM PARAMETER", NULL, "GET SCAM CONFIGURATION", "SET SCAM CONFIGURATION", "SET FIRMWARE FEATURES", "GET FIRMWARE FEATURES", NULL, NULL, NULL, NULL, "LOAD RAM A64", "DUMP RAM A64", "INITIALIZE REQUEST QUEUE A64", "INITIALIZE RESPONSE QUEUE A64", "EXECUTE IOCB A64", "ENABLE TARGET MODE", "GET TARGET MODE STATE", NULL, NULL, NULL, "SET DATA OVERRUN RECOVERY MODE", "GET DATA OVERRUN RECOVERY MODE", "SET HOST DATA", "GET NOST DATA", }; #endif static u_int16_t mbpfc[] = { ISPOPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISPOPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISPOPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISPOPMAP(0xdf, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISPOPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISPOPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISPOPMAP(0xff, 0xff), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISPOPMAP(0x03, 0x05), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISPOPMAP(0x01, 0x0f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISPOPMAP(0xdf, 0x01), /* 0x09: LOAD RAM */ ISPOPMAP(0xdf, 0x01), /* 0x0a: DUMP RAM */ ISPOPMAP(0x00, 0x00), /* 0x0b: */ ISPOPMAP(0x00, 0x00), /* 0x0c: */ ISPOPMAP(0x00, 0x00), /* 0x0d: */ ISPOPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x0f: */ ISPOPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISPOPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */ ISPOPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */ ISPOPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISPOPMAP(0x01, 0xff), /* 0x14: MBOX_STOP_FIRMWARE */ ISPOPMAP(0x4f, 0x01), /* 0x15: MBOX_ABORT */ ISPOPMAP(0x07, 0x01), /* 0x16: MBOX_ABORT_DEVICE */ ISPOPMAP(0x07, 0x01), /* 0x17: MBOX_ABORT_TARGET */ ISPOPMAP(0x03, 0x03), /* 0x18: MBOX_BUS_RESET */ ISPOPMAP(0x07, 0x05), /* 0x19: MBOX_STOP_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1a: MBOX_START_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISPOPMAP(0x07, 0x05), /* 0x1c: MBOX_ABORT_QUEUE */ ISPOPMAP(0x07, 0x03), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISPOPMAP(0x00, 0x00), /* 0x1e: */ ISPOPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISPOPMAP(0x01, 0x4f), /* 0x20: MBOX_GET_LOOP_ID */ ISPOPMAP(0x00, 0x00), /* 0x21: */ ISPOPMAP(0x01, 0x07), /* 0x22: MBOX_GET_RETRY_COUNT */ ISPOPMAP(0x00, 0x00), /* 0x23: */ ISPOPMAP(0x00, 0x00), /* 0x24: */ ISPOPMAP(0x00, 0x00), /* 0x25: */ ISPOPMAP(0x00, 0x00), /* 0x26: */ ISPOPMAP(0x00, 0x00), /* 0x27: */ ISPOPMAP(0x0f, 0x1), /* 0x28: MBOX_GET_FIRMWARE_OPTIONS */ ISPOPMAP(0x03, 0x07), /* 0x29: MBOX_GET_PORT_QUEUE_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x2a: */ ISPOPMAP(0x00, 0x00), /* 0x2b: */ ISPOPMAP(0x00, 0x00), /* 0x2c: */ ISPOPMAP(0x00, 0x00), /* 0x2d: */ ISPOPMAP(0x00, 0x00), /* 0x2e: */ ISPOPMAP(0x00, 0x00), /* 0x2f: */ ISPOPMAP(0x00, 0x00), /* 0x30: */ ISPOPMAP(0x00, 0x00), /* 0x31: */ ISPOPMAP(0x07, 0x07), /* 0x32: MBOX_SET_RETRY_COUNT */ ISPOPMAP(0x00, 0x00), /* 0x33: */ ISPOPMAP(0x00, 0x00), /* 0x34: */ ISPOPMAP(0x00, 0x00), /* 0x35: */ ISPOPMAP(0x00, 0x00), /* 0x36: */ ISPOPMAP(0x00, 0x00), /* 0x37: */ ISPOPMAP(0x0f, 0x01), /* 0x38: MBOX_SET_FIRMWARE_OPTIONS */ ISPOPMAP(0x0f, 0x07), /* 0x39: MBOX_SET_PORT_QUEUE_PARAMS */ ISPOPMAP(0x00, 0x00), /* 0x3a: */ ISPOPMAP(0x00, 0x00), /* 0x3b: */ ISPOPMAP(0x00, 0x00), /* 0x3c: */ ISPOPMAP(0x00, 0x00), /* 0x3d: */ ISPOPMAP(0x00, 0x00), /* 0x3e: */ ISPOPMAP(0x00, 0x00), /* 0x3f: */ ISPOPMAP(0x03, 0x01), /* 0x40: MBOX_LOOP_PORT_BYPASS */ ISPOPMAP(0x03, 0x01), /* 0x41: MBOX_LOOP_PORT_ENABLE */ ISPOPMAP(0x03, 0x07), /* 0x42: MBOX_GET_RESOURCE_COUNTS */ ISPOPMAP(0x01, 0x01), /* 0x43: MBOX_REQUEST_NON_PARTICIPATING_MODE */ ISPOPMAP(0x00, 0x00), /* 0x44: */ ISPOPMAP(0x00, 0x00), /* 0x45: */ ISPOPMAP(0x00, 0x00), /* 0x46: */ ISPOPMAP(0xcf, 0x03), /* 0x47: GET PORT_DATABASE ENHANCED */ ISPOPMAP(0x00, 0x00), /* 0x48: */ ISPOPMAP(0x00, 0x00), /* 0x49: */ ISPOPMAP(0x00, 0x00), /* 0x4a: */ ISPOPMAP(0x00, 0x00), /* 0x4b: */ ISPOPMAP(0x00, 0x00), /* 0x4c: */ ISPOPMAP(0x00, 0x00), /* 0x4d: */ ISPOPMAP(0x00, 0x00), /* 0x4e: */ ISPOPMAP(0x00, 0x00), /* 0x4f: */ ISPOPMAP(0x00, 0x00), /* 0x50: */ ISPOPMAP(0x00, 0x00), /* 0x51: */ ISPOPMAP(0x00, 0x00), /* 0x52: */ ISPOPMAP(0x00, 0x00), /* 0x53: */ ISPOPMAP(0xcf, 0x01), /* 0x54: EXECUTE IOCB A64 */ ISPOPMAP(0x00, 0x00), /* 0x55: */ ISPOPMAP(0x00, 0x00), /* 0x56: */ ISPOPMAP(0x00, 0x00), /* 0x57: */ ISPOPMAP(0x00, 0x00), /* 0x58: */ ISPOPMAP(0x00, 0x00), /* 0x59: */ ISPOPMAP(0x00, 0x00), /* 0x5a: */ ISPOPMAP(0x00, 0x00), /* 0x5b: */ ISPOPMAP(0x00, 0x00), /* 0x5c: */ ISPOPMAP(0x00, 0x00), /* 0x5d: */ ISPOPMAP(0x00, 0x00), /* 0x5e: */ ISPOPMAP(0x00, 0x00), /* 0x5f: */ ISPOPMAP(0xfd, 0x31), /* 0x60: MBOX_INIT_FIRMWARE */ ISPOPMAP(0x00, 0x00), /* 0x61: */ ISPOPMAP(0x01, 0x01), /* 0x62: MBOX_INIT_LIP */ ISPOPMAP(0xcd, 0x03), /* 0x63: MBOX_GET_FC_AL_POSITION_MAP */ ISPOPMAP(0xcf, 0x01), /* 0x64: MBOX_GET_PORT_DB */ ISPOPMAP(0x07, 0x01), /* 0x65: MBOX_CLEAR_ACA */ ISPOPMAP(0x07, 0x01), /* 0x66: MBOX_TARGET_RESET */ ISPOPMAP(0x07, 0x01), /* 0x67: MBOX_CLEAR_TASK_SET */ ISPOPMAP(0x07, 0x01), /* 0x68: MBOX_ABORT_TASK_SET */ ISPOPMAP(0x01, 0x07), /* 0x69: MBOX_GET_FW_STATE */ ISPOPMAP(0x03, 0xcf), /* 0x6a: MBOX_GET_PORT_NAME */ ISPOPMAP(0xcf, 0x01), /* 0x6b: MBOX_GET_LINK_STATUS */ ISPOPMAP(0x0f, 0x01), /* 0x6c: MBOX_INIT_LIP_RESET */ ISPOPMAP(0x00, 0x00), /* 0x6d: */ ISPOPMAP(0xcf, 0x03), /* 0x6e: MBOX_SEND_SNS */ ISPOPMAP(0x0f, 0x07), /* 0x6f: MBOX_FABRIC_LOGIN */ ISPOPMAP(0x03, 0x01), /* 0x70: MBOX_SEND_CHANGE_REQUEST */ ISPOPMAP(0x03, 0x03), /* 0x71: MBOX_FABRIC_LOGOUT */ ISPOPMAP(0x0f, 0x0f), /* 0x72: MBOX_INIT_LIP_LOGIN */ ISPOPMAP(0x00, 0x00), /* 0x73: */ ISPOPMAP(0x07, 0x01), /* 0x74: LOGIN LOOP PORT */ ISPOPMAP(0xcf, 0x03), /* 0x75: GET PORT/NODE NAME LIST */ ISPOPMAP(0x4f, 0x01), /* 0x76: SET VENDOR ID */ ISPOPMAP(0xcd, 0x01), /* 0x77: INITIALIZE IP MAILBOX */ ISPOPMAP(0x00, 0x00), /* 0x78: */ ISPOPMAP(0x00, 0x00), /* 0x79: */ ISPOPMAP(0x00, 0x00), /* 0x7a: */ ISPOPMAP(0x00, 0x00), /* 0x7b: */ ISPOPMAP(0x4f, 0x03), /* 0x7c: Get ID List */ ISPOPMAP(0xcf, 0x01), /* 0x7d: SEND LFA */ ISPOPMAP(0x07, 0x01) /* 0x7e: Lun RESET */ }; #ifndef ISP_STRIPPED static char *fc_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", "LOAD RAM", "DUMP RAM", NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET LOOP ID", NULL, "GET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "GET FIRMWARE OPTIONS", "GET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "SET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "SET FIRMWARE OPTIONS", "SET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, "LOOP PORT BYPASS", "LOOP PORT ENABLE", "GET RESOURCE COUNTS", "REQUEST NON PARTICIPATING MODE", NULL, NULL, NULL, "GET PORT DATABASE,, ENHANCED", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "EXECUTE IOCB A64", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "INIT FIRMWARE", NULL, "INIT LIP", "GET FC-AL POSITION MAP", "GET PORT DATABASE", "CLEAR ACA", "TARGET RESET", "CLEAR TASK SET", "ABORT TASK SET", "GET FW STATE", "GET PORT NAME", "GET LINK STATUS", "INIT LIP RESET", NULL, "SEND SNS", "FABRIC LOGIN", "SEND CHANGE REQUEST", "FABRIC LOGOUT", "INIT LIP LOGIN", NULL, "LOGIN LOOP PORT", "GET PORT/NODE NAME LIST", "SET VENDOR ID", "INITIALIZE IP MAILBOX", NULL, NULL, NULL, NULL, "Get ID List", "SEND LFA", "Lun RESET" }; #endif static void isp_mboxcmd(struct ispsoftc *isp, mbreg_t *mbp, int logmask) { char *cname, *xname, tname[16], mname[16]; unsigned int lim, ibits, obits, box, opcode; u_int16_t *mcp; if (IS_FC(isp)) { mcp = mbpfc; lim = (sizeof (mbpfc) / sizeof (mbpfc[0])); } else { mcp = mbpscsi; lim = (sizeof (mbpscsi) / sizeof (mbpscsi[0])); } if ((opcode = mbp->param[0]) >= lim) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } ibits = HIBYT(mcp[opcode]) & NMBOX_BMASK(isp); obits = LOBYT(mcp[opcode]) & NMBOX_BMASK(isp); if (ibits == 0 && obits == 0) { mbp->param[0] = MBOX_COMMAND_PARAM_ERROR; isp_prt(isp, ISP_LOGERR, "no parameters for 0x%x", opcode); return; } /* * Get exclusive usage of mailbox registers. */ MBOX_ACQUIRE(isp); for (box = 0; box < MAX_MAILBOX; box++) { if (ibits & (1 << box)) { ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } isp->isp_mboxtmp[box] = mbp->param[box] = 0; } isp->isp_lastmbxcmd = opcode; /* * We assume that we can't overwrite a previous command. */ isp->isp_mboxbsy = obits; /* * Set Host Interrupt condition so that RISC will pick up mailbox regs. */ ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); /* * While we haven't finished the command, spin our wheels here. */ MBOX_WAIT_COMPLETE(isp); /* * Copy back output registers. */ for (box = 0; box < MAX_MAILBOX; box++) { if (obits & (1 << box)) { mbp->param[box] = isp->isp_mboxtmp[box]; } } MBOX_RELEASE(isp); if (logmask == 0 || opcode == MBOX_EXEC_FIRMWARE) { return; } #ifdef ISP_STRIPPED cname = NULL; #else cname = (IS_FC(isp))? fc_mbcmd_names[opcode] : scsi_mbcmd_names[opcode]; #endif if (cname == NULL) { cname = tname; SNPRINTF(tname, sizeof tname, "opcode %x", opcode); } /* * Just to be chatty here... */ xname = NULL; switch (mbp->param[0]) { case MBOX_COMMAND_COMPLETE: break; case MBOX_INVALID_COMMAND: if (logmask & MBLOGMASK(MBOX_COMMAND_COMPLETE)) xname = "INVALID COMMAND"; break; case MBOX_HOST_INTERFACE_ERROR: if (logmask & MBLOGMASK(MBOX_HOST_INTERFACE_ERROR)) xname = "HOST INTERFACE ERROR"; break; case MBOX_TEST_FAILED: if (logmask & MBLOGMASK(MBOX_TEST_FAILED)) xname = "TEST FAILED"; break; case MBOX_COMMAND_ERROR: if (logmask & MBLOGMASK(MBOX_COMMAND_ERROR)) xname = "COMMAND ERROR"; break; case MBOX_COMMAND_PARAM_ERROR: if (logmask & MBLOGMASK(MBOX_COMMAND_PARAM_ERROR)) xname = "COMMAND PARAMETER ERROR"; break; case MBOX_LOOP_ID_USED: if (logmask & MBLOGMASK(MBOX_LOOP_ID_USED)) xname = "LOOP ID ALREADY IN USE"; break; case MBOX_PORT_ID_USED: if (logmask & MBLOGMASK(MBOX_PORT_ID_USED)) xname = "PORT ID ALREADY IN USE"; break; case MBOX_ALL_IDS_USED: if (logmask & MBLOGMASK(MBOX_ALL_IDS_USED)) xname = "ALL LOOP IDS IN USE"; break; case 0: /* special case */ xname = "TIMEOUT"; break; default: SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); xname = mname; break; } if (xname) isp_prt(isp, ISP_LOGALL, "Mailbox Command '%s' failed (%s)", cname, xname); } static void isp_fw_state(struct ispsoftc *isp) { if (IS_FC(isp)) { mbreg_t mbs; fcparam *fcp = isp->isp_param; mbs.param[0] = MBOX_GET_FW_STATE; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { fcp->isp_fwstate = mbs.param[1]; } } } static void isp_update(struct ispsoftc *isp) { int bus, upmask; for (bus = 0, upmask = isp->isp_update; upmask != 0; bus++) { if (upmask & (1 << bus)) { isp_update_bus(isp, bus); } upmask &= ~(1 << bus); } } static void isp_update_bus(struct ispsoftc *isp, int bus) { int tgt; mbreg_t mbs; sdparam *sdp; isp->isp_update &= ~(1 << bus); if (IS_FC(isp)) { /* * There are no 'per-bus' settings for Fibre Channel. */ return; } sdp = isp->isp_param; sdp += bus; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { u_int16_t flags, period, offset; int get; if (sdp->isp_devparam[tgt].dev_enable == 0) { sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 0; isp_prt(isp, ISP_LOGDEBUG0, "skipping target %d bus %d update", tgt, bus); continue; } /* * If the goal is to update the status of the device, * take what's in dev_flags and try and set the device * toward that. Otherwise, if we're just refreshing the * current device state, get the current parameters. */ /* * Refresh overrides set */ if (sdp->isp_devparam[tgt].dev_refresh) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; sdp->isp_devparam[tgt].dev_refresh = 0; get = 1; } else if (sdp->isp_devparam[tgt].dev_update) { mbs.param[0] = MBOX_SET_TARGET_PARAMS; /* * Make sure dev_flags has "Renegotiate on Error" * on and "Freeze Queue on Error" off. */ sdp->isp_devparam[tgt].dev_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].dev_flags &= ~DPARM_QFRZ; mbs.param[2] = sdp->isp_devparam[tgt].dev_flags; /* * Insist that PARITY must be enabled * if SYNC or WIDE is enabled. */ if ((mbs.param[2] & (DPARM_SYNC|DPARM_WIDE)) != 0) { mbs.param[2] |= DPARM_PARITY; } if ((mbs.param[2] & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].sync_offset << 8) | (sdp->isp_devparam[tgt].sync_period); } /* * A command completion later that has * RQSTF_NEGOTIATION set canl cause * the dev_refresh/announce cycle also. & * * Note: It is really important to update our current * flags with at least the state of TAG capabilities- * otherwise we might try and send a tagged command * when we have it all turned off. So change it here * to say that current already matches goal. */ sdp->isp_devparam[tgt].cur_dflags &= ~DPARM_TQING; sdp->isp_devparam[tgt].cur_dflags |= (sdp->isp_devparam[tgt].dev_flags & DPARM_TQING); isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", bus, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 1; get = 0; } else { continue; } mbs.param[1] = (bus << 15) | (tgt << 8); isp_mboxcmd(isp, &mbs, MBLOGALL); if (get == 0) { isp->isp_sendmarker |= (1 << bus); continue; } flags = mbs.param[2]; period = mbs.param[3] & 0xff; offset = mbs.param[3] >> 8; sdp->isp_devparam[tgt].cur_dflags = flags; sdp->isp_devparam[tgt].cur_period = period; sdp->isp_devparam[tgt].cur_offset = offset; get = (bus << 16) | tgt; (void) isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, &get); } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_update || sdp->isp_devparam[tgt].dev_refresh) { isp->isp_update |= (1 << bus); break; } } } static void isp_setdfltparm(struct ispsoftc *isp, int channel) { int tgt; mbreg_t mbs; sdparam *sdp; if (IS_FC(isp)) { fcparam *fcp = (fcparam *) isp->isp_param; int nvfail; fcp += channel; if (fcp->isp_gotdparms) { return; } fcp->isp_gotdparms = 1; fcp->isp_maxfrmlen = ICB_DFLT_FRMLEN; fcp->isp_maxalloc = ICB_DFLT_ALLOC; fcp->isp_execthrottle = ISP_EXEC_THROTTLE; fcp->isp_retry_delay = ICB_DFLT_RDELAY; fcp->isp_retry_count = ICB_DFLT_RCOUNT; /* Platform specific.... */ fcp->isp_loopid = DEFAULT_LOOPID(isp); fcp->isp_nodewwn = DEFAULT_NODEWWN(isp); fcp->isp_portwwn = DEFAULT_PORTWWN(isp); fcp->isp_fwoptions = 0; fcp->isp_fwoptions |= ICBOPT_FAIRNESS; fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; #ifndef ISP_NO_FASTPOST_FC fcp->isp_fwoptions |= ICBOPT_FAST_POST; #endif if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; /* * Make sure this is turned off now until we get * extended options from NVRAM */ fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; /* * Now try and read NVRAM unless told to not do so. * This will set fcparam's isp_nodewwn && isp_portwwn. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { nvfail = isp_read_nvram(isp); if (nvfail) isp->isp_confopts |= ISP_CFG_NONVRAM; } else { nvfail = 1; } /* * Set node && port to override platform set defaults * unless the nvram read failed (or none was done), * or the platform code wants to use what had been * set in the defaults. */ if (nvfail || (isp->isp_confopts & ISP_CFG_OWNWWN)) { isp_prt(isp, ISP_LOGCONFIG, "Using Node WWN 0x%08x%08x, Port WWN 0x%08x%08x", (u_int32_t) (DEFAULT_NODEWWN(isp) >> 32), (u_int32_t) (DEFAULT_NODEWWN(isp) & 0xffffffff), (u_int32_t) (DEFAULT_PORTWWN(isp) >> 32), (u_int32_t) (DEFAULT_PORTWWN(isp) & 0xffffffff)); isp->isp_confopts |= ISP_CFG_OWNWWN; ISP_NODEWWN(isp) = DEFAULT_NODEWWN(isp); ISP_PORTWWN(isp) = DEFAULT_PORTWWN(isp); } else { /* * We always start out with values derived * from NVRAM or our platform default. */ ISP_NODEWWN(isp) = fcp->isp_nodewwn; ISP_PORTWWN(isp) = fcp->isp_portwwn; } return; } sdp = (sdparam *) isp->isp_param; sdp += channel; /* * Been there, done that, got the T-shirt... */ if (sdp->isp_gotdparms) { return; } sdp->isp_gotdparms = 1; /* * Establish some default parameters. */ sdp->isp_cmd_dma_burst_enable = 1; sdp->isp_data_dma_burst_enabl = 1; sdp->isp_fifo_threshold = 0; sdp->isp_initiator_id = DEFAULT_IID(isp); if (isp->isp_type >= ISP_HA_SCSI_1040) { sdp->isp_async_data_setup = 9; } else { sdp->isp_async_data_setup = 6; } sdp->isp_selection_timeout = 250; sdp->isp_max_queue_depth = MAXISPREQUEST(isp); sdp->isp_tag_aging = 8; sdp->isp_bus_reset_delay = 3; sdp->isp_retry_count = 2; sdp->isp_retry_delay = 2; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].exc_throttle = ISP_EXEC_THROTTLE; sdp->isp_devparam[tgt].dev_enable = 1; } /* * If we've not been told to avoid reading NVRAM, try and read it. * If we're successful reading it, we can return since NVRAM will * tell us the right thing to do. Otherwise, establish some reasonable * defaults. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { if (isp_read_nvram(isp) == 0) { return; } } /* * Now try and see whether we have specific values for them. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { mbs.param[0] = MBOX_GET_ACT_NEG_STATE; isp_mboxcmd(isp, &mbs, MBLOGNONE); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdp->isp_req_ack_active_neg = 1; sdp->isp_data_line_active_neg = 1; } else { sdp->isp_req_ack_active_neg = (mbs.param[1+channel] >> 4) & 0x1; sdp->isp_data_line_active_neg = (mbs.param[1+channel] >> 5) & 0x1; } } isp_prt(isp, ISP_LOGDEBUG0, "defaulting bus %d REQ/ACK Active Negation is %d", channel, sdp->isp_req_ack_active_neg); isp_prt(isp, ISP_LOGDEBUG0, "defaulting bus %d DATA Active Negation is %d", channel, sdp->isp_data_line_active_neg); /* * The trick here is to establish a default for the default (honk!) * state (dev_flags). Then try and get the current status from * the card to fill in the current state. We don't, in fact, set * the default to the SAFE default state- that's not the goal state. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].cur_offset = 0; sdp->isp_devparam[tgt].cur_period = 0; sdp->isp_devparam[tgt].dev_flags = DPARM_DEFAULT; sdp->isp_devparam[tgt].cur_dflags = 0; /* * We default to Wide/Fast for versions less than a 1040 * (unless it's SBus). */ if ((isp->isp_bustype == ISP_BT_SBUS && isp->isp_type < ISP_HA_SCSI_1020A) || (isp->isp_bustype == ISP_BT_PCI && isp->isp_type < ISP_HA_SCSI_1040) || (isp->isp_clock && isp->isp_clock < 60) || (sdp->isp_ultramode == 0)) { sdp->isp_devparam[tgt].sync_offset = ISP_10M_SYNCPARMS >> 8; sdp->isp_devparam[tgt].sync_period = ISP_10M_SYNCPARMS & 0xff; } else if (IS_ULTRA3(isp)) { sdp->isp_devparam[tgt].sync_offset = ISP_80M_SYNCPARMS >> 8; sdp->isp_devparam[tgt].sync_period = ISP_80M_SYNCPARMS & 0xff; } else if (IS_ULTRA2(isp)) { sdp->isp_devparam[tgt].sync_offset = ISP_40M_SYNCPARMS >> 8; sdp->isp_devparam[tgt].sync_period = ISP_40M_SYNCPARMS & 0xff; } else if (IS_1240(isp)) { sdp->isp_devparam[tgt].sync_offset = ISP_20M_SYNCPARMS >> 8; sdp->isp_devparam[tgt].sync_period = ISP_20M_SYNCPARMS & 0xff; } else { sdp->isp_devparam[tgt].sync_offset = ISP_20M_SYNCPARMS_1040 >> 8; sdp->isp_devparam[tgt].sync_period = ISP_20M_SYNCPARMS_1040 & 0xff; } /* * Don't get current target parameters if we've been * told not to use NVRAM- it's really the same thing. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; mbs.param[1] = tgt << 8; isp_mboxcmd(isp, &mbs, MBLOGALL); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } sdp->isp_devparam[tgt].cur_dflags = mbs.param[2]; sdp->isp_devparam[tgt].dev_flags = mbs.param[2]; sdp->isp_devparam[tgt].cur_period = mbs.param[3] & 0xff; sdp->isp_devparam[tgt].cur_offset = mbs.param[3] >> 8; /* * The maximum period we can really see * here is 100 (decimal), or 400 ns. * For some unknown reason we sometimes * get back wildass numbers from the * boot device's parameters (alpha only). */ if ((mbs.param[3] & 0xff) <= 0x64) { sdp->isp_devparam[tgt].sync_period = mbs.param[3] & 0xff; sdp->isp_devparam[tgt].sync_offset = mbs.param[3] >> 8; } /* * It is not safe to run Ultra Mode with a clock < 60. */ if (((isp->isp_clock && isp->isp_clock < 60) || (isp->isp_type < ISP_HA_SCSI_1020A)) && (sdp->isp_devparam[tgt].sync_period <= (ISP_20M_SYNCPARMS & 0xff))) { sdp->isp_devparam[tgt].sync_offset = ISP_10M_SYNCPARMS >> 8; sdp->isp_devparam[tgt].sync_period = ISP_10M_SYNCPARMS & 0xff; } } isp_prt(isp, ISP_LOGDEBUG0, "Initial bus %d tgt %d flags %x offset %x period %x", channel, tgt, sdp->isp_devparam[tgt].dev_flags, sdp->isp_devparam[tgt].sync_offset, sdp->isp_devparam[tgt].sync_period); } } /* * Re-initialize the ISP and complete all orphaned commands * with a 'botched' notice. The reset/init routines should * not disturb an already active list of commands. * * Locks held prior to coming here. */ void isp_reinit(struct ispsoftc *isp) { XS_T *xs; u_int16_t handle; isp_reset(isp); if (isp->isp_state != ISP_RESETSTATE) { isp_prt(isp, ISP_LOGERR, "isp_reinit cannot reset card"); goto skip; } isp_init(isp); if (isp->isp_role == ISP_ROLE_NONE) { goto skip; } if (isp->isp_state == ISP_INITSTATE) { isp->isp_state = ISP_RUNSTATE; } if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "isp_reinit cannot restart card"); } skip: isp->isp_nactive = 0; for (handle = 1; (int) handle <= isp->isp_maxcmds; handle++) { xs = isp_find_xs(isp, handle); if (xs == NULL) { continue; } isp_destroy_handle(isp, handle); if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, handle); XS_RESID(xs) = XS_XFRLEN(xs); } else { XS_RESID(xs) = 0; } XS_SETERR(xs, HBA_BUSRESET); isp_done(xs); } } /* * NVRAM Routines */ static int isp_read_nvram(struct ispsoftc *isp) { int i, amt; u_int8_t csum, minversion; union { u_int8_t _x[ISP2100_NVRAM_SIZE]; u_int16_t _s[ISP2100_NVRAM_SIZE>>1]; } _n; #define nvram_data _n._x #define nvram_words _n._s if (IS_FC(isp)) { amt = ISP2100_NVRAM_SIZE; minversion = 1; } else if (IS_ULTRA2(isp)) { amt = ISP1080_NVRAM_SIZE; minversion = 0; } else { amt = ISP_NVRAM_SIZE; minversion = 2; } /* * Just read the first two words first to see if we have a valid * NVRAM to continue reading the rest with. */ for (i = 0; i < 2; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { if (isp->isp_bustype != ISP_BT_SBUS) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header"); isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]); } return (-1); } for (i = 2; i < amt>>1; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } for (csum = 0, i = 0; i < amt; i++) { csum += nvram_data[i]; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); return (-1); } if (ISP_NVRAM_VERSION(nvram_data) < minversion) { isp_prt(isp, ISP_LOGWARN, "version %d NVRAM not understood", ISP_NVRAM_VERSION(nvram_data)); return (-1); } if (IS_ULTRA3(isp)) { isp_parse_nvram_12160(isp, 0, nvram_data); isp_parse_nvram_12160(isp, 1, nvram_data); } else if (IS_1080(isp)) { isp_parse_nvram_1080(isp, 0, nvram_data); } else if (IS_1280(isp) || IS_1240(isp)) { isp_parse_nvram_1080(isp, 0, nvram_data); isp_parse_nvram_1080(isp, 1, nvram_data); } else if (IS_SCSI(isp)) { isp_parse_nvram_1020(isp, nvram_data); } else { isp_parse_nvram_2100(isp, nvram_data); } return (0); #undef nvram_data #undef nvram_words } static void isp_rdnvram_word(struct ispsoftc *isp, int wo, u_int16_t *rp) { int i, cbits; u_int16_t bit, rqst; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); USEC_DELAY(2); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); USEC_DELAY(2); if (IS_FC(isp)) { wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else if (IS_ULTRA2(isp)) { wo &= ((ISP1080_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else { wo &= ((ISP_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 6) | wo; cbits = 8; } /* * Clock the word select request out... */ for (i = cbits; i >= 0; i--) { if ((rqst >> i) & 1) { bit = BIU_NVRAM_SELECT | BIU_NVRAM_DATAOUT; } else { bit = BIU_NVRAM_SELECT; } ISP_WRITE(isp, BIU_NVRAM, bit); USEC_DELAY(2); ISP_WRITE(isp, BIU_NVRAM, bit | BIU_NVRAM_CLOCK); USEC_DELAY(2); ISP_WRITE(isp, BIU_NVRAM, bit); USEC_DELAY(2); } /* * Now read the result back in (bits come back in MSB format). */ *rp = 0; for (i = 0; i < 16; i++) { u_int16_t rv; *rp <<= 1; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); USEC_DELAY(2); rv = ISP_READ(isp, BIU_NVRAM); if (rv & BIU_NVRAM_DATAIN) { *rp |= 1; } USEC_DELAY(2); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); USEC_DELAY(2); } ISP_WRITE(isp, BIU_NVRAM, 0); USEC_DELAY(2); ISP_SWIZZLE_NVRAM_WORD(isp, rp); } static void isp_parse_nvram_1020(struct ispsoftc *isp, u_int8_t *nvram_data) { int i; sdparam *sdp = (sdparam *) isp->isp_param; sdp->isp_fifo_threshold = ISP_NVRAM_FIFO_THRESHOLD(nvram_data) | (ISP_NVRAM_FIFO_THRESHOLD_128(nvram_data) << 2); sdp->isp_initiator_id = ISP_NVRAM_INITIATOR_ID(nvram_data); sdp->isp_bus_reset_delay = ISP_NVRAM_BUS_RESET_DELAY(nvram_data); sdp->isp_retry_count = ISP_NVRAM_BUS_RETRY_COUNT(nvram_data); sdp->isp_retry_delay = ISP_NVRAM_BUS_RETRY_DELAY(nvram_data); sdp->isp_async_data_setup = ISP_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data); if (isp->isp_type >= ISP_HA_SCSI_1040) { if (sdp->isp_async_data_setup < 9) { sdp->isp_async_data_setup = 9; } } else { if (sdp->isp_async_data_setup != 6) { sdp->isp_async_data_setup = 6; } } sdp->isp_req_ack_active_neg = ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data); sdp->isp_data_line_active_neg = ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data); sdp->isp_data_dma_burst_enabl = ISP_NVRAM_DATA_DMA_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP_NVRAM_CMD_DMA_BURST_ENABLE(nvram_data); sdp->isp_tag_aging = ISP_NVRAM_TAG_AGE_LIMIT(nvram_data); sdp->isp_selection_timeout = ISP_NVRAM_SELECTION_TIMEOUT(nvram_data); sdp->isp_max_queue_depth = ISP_NVRAM_MAX_QUEUE_DEPTH(nvram_data); sdp->isp_fast_mttr = ISP_NVRAM_FAST_MTTR_ENABLE(nvram_data); for (i = 0; i < MAX_TARGETS; i++) { sdp->isp_devparam[i].dev_enable = ISP_NVRAM_TGT_DEVICE_ENABLE(nvram_data, i); sdp->isp_devparam[i].exc_throttle = ISP_NVRAM_TGT_EXEC_THROTTLE(nvram_data, i); sdp->isp_devparam[i].sync_offset = ISP_NVRAM_TGT_SYNC_OFFSET(nvram_data, i); sdp->isp_devparam[i].sync_period = ISP_NVRAM_TGT_SYNC_PERIOD(nvram_data, i); if (isp->isp_type < ISP_HA_SCSI_1040) { /* * If we're not ultra, we can't possibly * be a shorter period than this. */ if (sdp->isp_devparam[i].sync_period < 0x19) { sdp->isp_devparam[i].sync_period = 0x19; } if (sdp->isp_devparam[i].sync_offset > 0xc) { sdp->isp_devparam[i].sync_offset = 0x0c; } } else { if (sdp->isp_devparam[i].sync_offset > 0x8) { sdp->isp_devparam[i].sync_offset = 0x8; } } sdp->isp_devparam[i].dev_flags = 0; if (ISP_NVRAM_TGT_RENEG(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_RENEG; sdp->isp_devparam[i].dev_flags |= DPARM_ARQ; if (ISP_NVRAM_TGT_TQING(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_TQING; if (ISP_NVRAM_TGT_SYNC(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_SYNC; if (ISP_NVRAM_TGT_WIDE(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_WIDE; if (ISP_NVRAM_TGT_PARITY(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_PARITY; if (ISP_NVRAM_TGT_DISC(nvram_data, i)) sdp->isp_devparam[i].dev_flags |= DPARM_DISC; sdp->isp_devparam[i].cur_dflags = 0; /* we don't know */ } } static void isp_parse_nvram_1080(struct ispsoftc *isp, int bus, u_int8_t *nvram_data) { int i; sdparam *sdp = (sdparam *) isp->isp_param; sdp += bus; sdp->isp_fifo_threshold = ISP1080_NVRAM_FIFO_THRESHOLD(nvram_data); sdp->isp_initiator_id = ISP1080_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP1080_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP1080_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP1080_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP1080_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP1080_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (i = 0; i < MAX_TARGETS; i++) { sdp->isp_devparam[i].dev_enable = ISP1080_NVRAM_TGT_DEVICE_ENABLE(nvram_data, i, bus); sdp->isp_devparam[i].exc_throttle = ISP1080_NVRAM_TGT_EXEC_THROTTLE(nvram_data, i, bus); sdp->isp_devparam[i].sync_offset = ISP1080_NVRAM_TGT_SYNC_OFFSET(nvram_data, i, bus); sdp->isp_devparam[i].sync_period = ISP1080_NVRAM_TGT_SYNC_PERIOD(nvram_data, i, bus); sdp->isp_devparam[i].dev_flags = 0; if (ISP1080_NVRAM_TGT_RENEG(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_RENEG; sdp->isp_devparam[i].dev_flags |= DPARM_ARQ; if (ISP1080_NVRAM_TGT_TQING(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_TQING; if (ISP1080_NVRAM_TGT_SYNC(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_SYNC; if (ISP1080_NVRAM_TGT_WIDE(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_WIDE; if (ISP1080_NVRAM_TGT_PARITY(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_PARITY; if (ISP1080_NVRAM_TGT_DISC(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_DISC; sdp->isp_devparam[i].cur_dflags = 0; } } static void isp_parse_nvram_12160(struct ispsoftc *isp, int bus, u_int8_t *nvram_data) { sdparam *sdp = (sdparam *) isp->isp_param; int i; sdp += bus; sdp->isp_fifo_threshold = ISP12160_NVRAM_FIFO_THRESHOLD(nvram_data); sdp->isp_initiator_id = ISP12160_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP12160_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP12160_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP12160_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP12160_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP12160_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (i = 0; i < MAX_TARGETS; i++) { sdp->isp_devparam[i].dev_enable = ISP12160_NVRAM_TGT_DEVICE_ENABLE(nvram_data, i, bus); sdp->isp_devparam[i].exc_throttle = ISP12160_NVRAM_TGT_EXEC_THROTTLE(nvram_data, i, bus); sdp->isp_devparam[i].sync_offset = ISP12160_NVRAM_TGT_SYNC_OFFSET(nvram_data, i, bus); sdp->isp_devparam[i].sync_period = ISP12160_NVRAM_TGT_SYNC_PERIOD(nvram_data, i, bus); sdp->isp_devparam[i].dev_flags = 0; if (ISP12160_NVRAM_TGT_RENEG(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_RENEG; sdp->isp_devparam[i].dev_flags |= DPARM_ARQ; if (ISP12160_NVRAM_TGT_TQING(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_TQING; if (ISP12160_NVRAM_TGT_SYNC(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_SYNC; if (ISP12160_NVRAM_TGT_WIDE(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_WIDE; if (ISP12160_NVRAM_TGT_PARITY(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_PARITY; if (ISP12160_NVRAM_TGT_DISC(nvram_data, i, bus)) sdp->isp_devparam[i].dev_flags |= DPARM_DISC; sdp->isp_devparam[i].cur_dflags = 0; } } static void isp_parse_nvram_2100(struct ispsoftc *isp, u_int8_t *nvram_data) { fcparam *fcp = (fcparam *) isp->isp_param; u_int64_t wwn; /* * There is NVRAM storage for both Port and Node entities- * but the Node entity appears to be unused on all the cards * I can find. However, we should account for this being set * at some point in the future. * * Qlogic WWNs have an NAA of 2, but usually nothing shows up in * bits 48..60. In the case of the 2202, it appears that they do * use bit 48 to distinguish between the two instances on the card. * The 2204, which I've never seen, *probably* extends this method. */ wwn = ISP2100_NVRAM_PORT_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Port WWN 0x%08x%08x", (u_int32_t) (wwn >> 32), (u_int32_t) (wwn & 0xffffffff)); if ((wwn >> 60) == 0) { wwn |= (((u_int64_t) 2)<< 60); } } fcp->isp_portwwn = wwn; wwn = ISP2100_NVRAM_NODE_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Node WWN 0x%08x%08x", (u_int32_t) (wwn >> 32), (u_int32_t) (wwn & 0xffffffff)); if ((wwn >> 60) == 0) { wwn |= (((u_int64_t) 2)<< 60); } } fcp->isp_nodewwn = wwn; /* * Make sure we have both Node and Port as non-zero values. */ if (fcp->isp_nodewwn != 0 && fcp->isp_portwwn == 0) { fcp->isp_portwwn = fcp->isp_nodewwn; } else if (fcp->isp_nodewwn == 0 && fcp->isp_portwwn != 0) { fcp->isp_nodewwn = fcp->isp_portwwn; } /* * Make the Node and Port values sane if they're NAA == 2. * This means to clear bits 48..56 for the Node WWN and * make sure that there's some non-zero value in 48..56 * for the Port WWN. */ if (fcp->isp_nodewwn && fcp->isp_portwwn) { if ((fcp->isp_nodewwn & (((u_int64_t) 0xfff) << 48)) != 0 && (fcp->isp_nodewwn >> 60) == 2) { fcp->isp_nodewwn &= ~((u_int64_t) 0xfff << 48); } if ((fcp->isp_portwwn & (((u_int64_t) 0xfff) << 48)) == 0 && (fcp->isp_portwwn >> 60) == 2) { fcp->isp_portwwn |= ((u_int64_t) 1 << 56); } } fcp->isp_maxalloc = ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data); fcp->isp_maxfrmlen = ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); fcp->isp_retry_delay = ISP2100_NVRAM_RETRY_DELAY(nvram_data); fcp->isp_retry_count = ISP2100_NVRAM_RETRY_COUNT(nvram_data); fcp->isp_loopid = ISP2100_NVRAM_HARDLOOPID(nvram_data); fcp->isp_execthrottle = ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data); fcp->isp_fwoptions = ISP2100_NVRAM_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "fwoptions from nvram are 0x%x", fcp->isp_fwoptions); } diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c index 2a11c54a591b..008da7b84a11 100644 --- a/sys/dev/isp/isp_freebsd.c +++ b/sys/dev/isp/isp_freebsd.c @@ -1,2366 +1,2632 @@ /* $FreeBSD$ */ /* * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. * * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include +#include +#include #include /* for use by isp_prt below */ +#include +#include +#include +static d_ioctl_t ispioctl; static void isp_intr_enable(void *); static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *); static void isp_poll(struct cam_sim *); +#if 0 static void isp_relsim(void *); +#endif static timeout_t isp_watchdog; +static void isp_kthread(void *); static void isp_action(struct cam_sim *, union ccb *); +#define ISP_CDEV_MAJOR 248 +static struct cdevsw isp_cdevsw = { + /* open */ nullopen, + /* close */ nullclose, + /* read */ noread, + /* write */ nowrite, + /* ioctl */ ispioctl, + /* poll */ nopoll, + /* mmap */ nommap, + /* strategy */ nostrategy, + /* name */ "isp", + /* maj */ ISP_CDEV_MAJOR, + /* dump */ nodump, + /* psize */ nopsize, + /* flags */ D_TAPE, +}; + static struct ispsoftc *isplist = NULL; void isp_attach(struct ispsoftc *isp) { int primary, secondary; struct ccb_setasync csa; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; /* * Establish (in case of 12X0) which bus is the primary. */ primary = 0; secondary = 1; /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(isp->isp_maxcmds); if (devq == NULL) { return; } /* * Construct our SIM entry. */ sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); if (sim == NULL) { cam_simq_free(devq); return; } isp->isp_osinfo.ehook.ich_func = isp_intr_enable; isp->isp_osinfo.ehook.ich_arg = isp; if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { isp_prt(isp, ISP_LOGERR, "could not establish interrupt enable hook"); cam_sim_free(sim, TRUE); return; } if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { cam_sim_free(sim, TRUE); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); + config_intrhook_disestablish(&isp->isp_osinfo.ehook); return; } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); isp->isp_sim = sim; isp->isp_path = path; + /* + * Create a kernel thread for fibre channel instances. We + * don't have dual channel FC cards. + */ + if (IS_FC(isp)) { + cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); + if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, + RFHIGHPID, "%s: fc_thrd", + device_get_nameunit(isp->isp_dev))) { + isp_prt(isp, ISP_LOGERR, "could not create kthread"); + xpt_bus_deregister(cam_sim_path(sim)); + cam_sim_free(sim, TRUE); + config_intrhook_disestablish(&isp->isp_osinfo.ehook); + return; + } + } + /* * If we have a second channel, construct SIM entry for that. */ if (IS_DUALBUS(isp)) { sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); if (sim == NULL) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); cam_simq_free(devq); + config_intrhook_disestablish(&isp->isp_osinfo.ehook); return; } if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); cam_sim_free(sim, TRUE); + config_intrhook_disestablish(&isp->isp_osinfo.ehook); return; } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(isp->isp_sim)); xpt_free_path(isp->isp_path); xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); + config_intrhook_disestablish(&isp->isp_osinfo.ehook); return; } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); isp->isp_sim2 = sim; isp->isp_path2 = path; } + + /* + * Create device nodes + */ + (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, + GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); + if (isp->isp_role != ISP_ROLE_NONE) { isp->isp_state = ISP_RUNSTATE; ENABLE_INTS(isp); } if (isplist == NULL) { isplist = isp; } else { struct ispsoftc *tmp = isplist; while (tmp->isp_osinfo.next) { tmp = tmp->isp_osinfo.next; } tmp->isp_osinfo.next = isp; } + +} + +static int +ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) +{ + struct ispsoftc *isp; + int retval = ENOTTY; + + isp = isplist; + while (isp) { + if (minor(dev) == device_get_unit(isp->isp_dev)) { + break; + } + isp = isp->isp_osinfo.next; + } + if (isp == NULL) + return (ENXIO); + + switch (cmd) { + case ISP_SDBLEV: + { + int olddblev = isp->isp_dblev; + isp->isp_dblev = *(int *)addr; + *(int *)addr = olddblev; + retval = 0; + break; + } + case ISP_RESETHBA: + ISP_LOCK(isp); + isp_reinit(isp); + ISP_UNLOCK(isp); + retval = 0; + break; + case ISP_FC_RESCAN: + if (IS_FC(isp)) { + ISP_LOCK(isp); + if (isp_fc_runstate(isp, 5 * 1000000)) { + retval = EIO; + } else { + retval = 0; + } + ISP_UNLOCK(isp); + } + break; + case ISP_FC_LIP: + if (IS_FC(isp)) { + ISP_LOCK(isp); + if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { + retval = EIO; + } else { + retval = 0; + } + ISP_UNLOCK(isp); + } + break; + case ISP_FC_GETDINFO: + { + struct isp_fc_device *ifc = (struct isp_fc_device *) addr; + struct lportdb *lp; + + if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { + retval = EINVAL; + break; + } + ISP_LOCK(isp); + lp = &FCPARAM(isp)->portdb[ifc->loopid]; + if (lp->valid) { + ifc->loopid = lp->loopid; + ifc->portid = lp->portid; + ifc->node_wwn = lp->node_wwn; + ifc->port_wwn = lp->port_wwn; + retval = 0; + } else { + retval = ENODEV; + } + ISP_UNLOCK(isp); + break; + } + default: + break; + } + return (retval); } static void isp_intr_enable(void *arg) { struct ispsoftc *isp = arg; if (isp->isp_role != ISP_ROLE_NONE) { ENABLE_INTS(isp); isp->isp_osinfo.intsok = 1; } /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(&isp->isp_osinfo.ehook); } /* * Put the target mode functions here, because some are inlines */ #ifdef ISP_TARGET_MODE static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t); static __inline int are_any_luns_enabled(struct ispsoftc *, int); static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t); static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *); static __inline int isp_psema_sig_rqe(struct ispsoftc *); static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int); static __inline void isp_cv_signal_rqe(struct ispsoftc *, int); static __inline void isp_vsema_rqe(struct ispsoftc *); static cam_status create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **); static void destroy_lun_state(struct ispsoftc *, tstate_t *); static void isp_en_lun(struct ispsoftc *, union ccb *); static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *); static timeout_t isp_refire_putback_atio; static void isp_complete_ctio(union ccb *); static void isp_target_putback_atio(union ccb *); static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *); static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *); static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *); static int isp_handle_platform_ctio(struct ispsoftc *, void *); static __inline int is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun) { tstate_t *tptr; - ISP_LOCK(isp); tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; if (tptr == NULL) { ISP_UNLOCK(isp); return (0); } do { if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { ISP_UNLOCK(isp); return (1); } } while ((tptr = tptr->next) != NULL); - ISP_UNLOCK(isp); return (0); } static __inline int are_any_luns_enabled(struct ispsoftc *isp, int port) { int lo, hi; if (IS_DUALBUS(isp)) { lo = (port * (LUN_HASH_SIZE >> 1)); hi = lo + (LUN_HASH_SIZE >> 1); } else { lo = 0; hi = LUN_HASH_SIZE; } for (lo = 0; lo < hi; lo++) { if (isp->isp_osinfo.lun_hash[lo]) { return (1); } } return (0); } static __inline tstate_t * get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun) { tstate_t *tptr; - ISP_LOCK(isp); if (lun == CAM_LUN_WILDCARD) { tptr = &isp->isp_osinfo.tsdflt[bus]; tptr->hold++; - ISP_UNLOCK(isp); return (tptr); } else { tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; } if (tptr == NULL) { - ISP_UNLOCK(isp); return (NULL); } do { if (tptr->lun == lun && tptr->bus == bus) { tptr->hold++; - ISP_UNLOCK(isp); return (tptr); } } while ((tptr = tptr->next) != NULL); - ISP_UNLOCK(isp); return (tptr); } static __inline void rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr) { if (tptr->hold) tptr->hold--; } static __inline int isp_psema_sig_rqe(struct ispsoftc *isp) { - ISP_LOCK(isp); while (isp->isp_osinfo.tmflags & TM_BUSY) { isp->isp_osinfo.tmflags |= TM_WANTED; if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) { - ISP_UNLOCK(isp); return (-1); } isp->isp_osinfo.tmflags |= TM_BUSY; } - ISP_UNLOCK(isp); return (0); } static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo) { - ISP_LOCK(isp); if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) { ISP_UNLOCK(isp); return (-1); } - ISP_UNLOCK(isp); return (0); } static __inline void isp_cv_signal_rqe(struct ispsoftc *isp, int status) { isp->isp_osinfo.rstatus = status; wakeup(&isp->isp_osinfo.rstatus); } static __inline void isp_vsema_rqe(struct ispsoftc *isp) { - ISP_LOCK(isp); if (isp->isp_osinfo.tmflags & TM_WANTED) { isp->isp_osinfo.tmflags &= ~TM_WANTED; wakeup(&isp->isp_osinfo.tmflags); } isp->isp_osinfo.tmflags &= ~TM_BUSY; - ISP_UNLOCK(isp); } static cam_status create_lun_state(struct ispsoftc *isp, int bus, struct cam_path *path, tstate_t **rslt) { cam_status status; lun_id_t lun; int hfx; tstate_t *tptr, *new; lun = xpt_path_lun_id(path); if (lun < 0) { return (CAM_LUN_INVALID); } if (is_lun_enabled(isp, bus, lun)) { return (CAM_LUN_ALRDY_ENA); } new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (new == NULL) { return (CAM_RESRC_UNAVAIL); } status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), xpt_path_target_id(path), xpt_path_lun_id(path)); if (status != CAM_REQ_CMP) { free(new, M_DEVBUF); return (status); } new->bus = bus; new->lun = lun; SLIST_INIT(&new->atios); SLIST_INIT(&new->inots); new->hold = 1; hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); - ISP_LOCK(isp); tptr = isp->isp_osinfo.lun_hash[hfx]; if (tptr == NULL) { isp->isp_osinfo.lun_hash[hfx] = new; } else { while (tptr->next) tptr = tptr->next; tptr->next = new; } - ISP_UNLOCK(isp); *rslt = new; return (CAM_REQ_CMP); } static __inline void destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr) { int hfx; tstate_t *lw, *pw; hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); - ISP_LOCK(isp); if (tptr->hold) { - ISP_UNLOCK(isp); return; } pw = isp->isp_osinfo.lun_hash[hfx]; if (pw == NULL) { - ISP_UNLOCK(isp); return; } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { isp->isp_osinfo.lun_hash[hfx] = pw->next; } else { lw = pw; pw = lw->next; while (pw) { if (pw->lun == tptr->lun && pw->bus == tptr->bus) { lw->next = pw->next; break; } lw = pw; pw = pw->next; } if (pw == NULL) { ISP_UNLOCK(isp); return; } } free(tptr, M_DEVBUF); - ISP_UNLOCK(isp); } +/* + * we enter with our locks held. + */ static void isp_en_lun(struct ispsoftc *isp, union ccb *ccb) { const char lfmt[] = "Lun now %sabled for target mode on channel %d"; struct ccb_en_lun *cel = &ccb->cel; tstate_t *tptr; u_int16_t rstat; - int bus, frozen = 0; + int bus, cmd, frozen = 0; lun_id_t lun; target_id_t tgt; bus = XS_CHANNEL(ccb) & 0x1; tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; /* * Do some sanity checking first. */ if ((lun != CAM_LUN_WILDCARD) && (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } if (IS_SCSI(isp)) { sdparam *sdp = isp->isp_param; sdp += bus; if (tgt != CAM_TARGET_WILDCARD && tgt != sdp->isp_initiator_id) { ccb->ccb_h.status = CAM_TID_INVALID; return; } } else { if (tgt != CAM_TARGET_WILDCARD && tgt != FCPARAM(isp)->isp_iid) { ccb->ccb_h.status = CAM_TID_INVALID; return; } } if (tgt == CAM_TARGET_WILDCARD) { if (lun != CAM_LUN_WILDCARD) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } } /* * If Fibre Channel, stop and drain all activity to this bus. */ #if 0 if (IS_FC(isp)) { ISP_LOCK(isp); frozen = 1; xpt_freeze_simq(isp->isp_sim, 1); isp->isp_osinfo.drain = 1; while (isp->isp_osinfo.drain) { - (void) msleep(&isp->isp_osinfo.drain, - &isp->isp_osinfo.lock, PRIBIO, - "ispdrain", 10 * hz); + (void) msleep(&isp->isp_osinfo.drain, &isp->isp_lock, + PRIBIO, "ispdrain", 10 * hz); } ISP_UNLOCK(isp); } #endif /* * Check to see if we're enabling on fibre channel and * don't yet have a notion of who the heck we are (no * loop yet). */ if (IS_FC(isp) && cel->enable && (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) { fcparam *fcp = isp->isp_param; int rv; - ISP_LOCK(isp); rv = isp_fc_runstate(isp, 2 * 1000000); - ISP_UNLOCK(isp); if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGWARN, "could not get a good port database read"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } } /* * Next check to see whether this is a target/lun wildcard action. * * If so, we enable/disable target mode but don't do any lun enabling. */ if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) { int av = bus << 31; tptr = &isp->isp_osinfo.tsdflt[bus]; if (cel->enable) { if (isp->isp_osinfo.tmflags & (1 << bus)) { ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } ccb->ccb_h.status = xpt_create_path(&tptr->owner, NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (ccb->ccb_h.status != CAM_REQ_CMP) { - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } SLIST_INIT(&tptr->atios); SLIST_INIT(&tptr->inots); av |= ENABLE_TARGET_FLAG; - ISP_LOCK(isp); av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); if (av) { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_free_path(tptr->owner); - ISP_UNLOCK(isp); - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } isp->isp_osinfo.tmflags |= (1 << bus); - ISP_UNLOCK(isp); } else { if ((isp->isp_osinfo.tmflags & (1 << bus)) == 0) { ccb->ccb_h.status = CAM_LUN_INVALID; - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } if (are_any_luns_enabled(isp, bus)) { ccb->ccb_h.status = CAM_SCSI_BUSY; - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } - ISP_LOCK(isp); av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); if (av) { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; - ISP_UNLOCK(isp); - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } isp->isp_osinfo.tmflags &= ~(1 << bus); - ISP_UNLOCK(isp); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGINFO, "Target Mode %sabled on channel %d", (cel->enable) ? "en" : "dis", bus); - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } return; } /* * We can move along now... */ - if (frozen) + if (frozen) { + ISPLOCK_2_CAMLOCK(isp); xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } if (cel->enable) { ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); if (ccb->ccb_h.status != CAM_REQ_CMP) { return; } } else { tptr = get_lun_statep(isp, bus, lun); if (tptr == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } } if (isp_psema_sig_rqe(isp)) { rls_lun_statep(isp, tptr); if (cel->enable) destroy_lun_state(isp, tptr); ccb->ccb_h.status = CAM_REQ_CMP_ERR; return; } - ISP_LOCK(isp); if (cel->enable) { u_int32_t seq = isp->isp_osinfo.rollinfo++; + int c, n, ulun = lun; + + cmd = RQSTYPE_ENABLE_LUN; + c = DFLT_CMND_CNT; + n = DFLT_INOT_CNT; + if (IS_FC(isp) && lun != 0) { + cmd = RQSTYPE_MODIFY_LUN; + n = 0; + /* + * For SCC firmware, we only deal with setting + * (enabling or modifying) lun 0. + */ + ulun = 0; + } rstat = LUN_ERR; - if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { + if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed"); goto out; } if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, - "wait for ENABLE LUN timed out"); + "wait for ENABLE/MODIFY LUN timed out"); goto out; } rstat = isp->isp_osinfo.rstatus; if (rstat != LUN_OK) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, - "ENABLE LUN returned 0x%x", rstat); + "ENABLE/MODIFY LUN returned 0x%x", rstat); goto out; } } else { + int c, n, ulun = lun; u_int32_t seq; - seq = isp->isp_osinfo.rollinfo++; rstat = LUN_ERR; + seq = isp->isp_osinfo.rollinfo++; + cmd = -RQSTYPE_MODIFY_LUN; - if (isp_lun_cmd(isp, -RQSTYPE_MODIFY_LUN, bus, tgt, lun, seq)) { + c = DFLT_CMND_CNT; + n = DFLT_INOT_CNT; + if (IS_FC(isp) && lun != 0) { + n = 0; + /* + * For SCC firmware, we only deal with setting + * (enabling or modifying) lun 0. + */ + ulun = 0; + } + if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); goto out; } if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, "wait for MODIFY LUN timed out"); goto out; } rstat = isp->isp_osinfo.rstatus; if (rstat != LUN_OK) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, "MODIFY LUN returned 0x%x", rstat); goto out; } - rstat = LUN_ERR; + if (IS_FC(isp) && lun) { + goto out; + } + seq = isp->isp_osinfo.rollinfo++; - if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, bus, tgt, lun, seq)) { + rstat = LUN_ERR; + cmd = -RQSTYPE_ENABLE_LUN; + if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed"); goto out; } if (isp_cv_wait_timed_rqe(isp, 30 * hz)) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGERR, - "wait for ENABLE LUN timed out"); + "wait for DISABLE LUN timed out"); goto out; } rstat = isp->isp_osinfo.rstatus; if (rstat != LUN_OK) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGWARN, - "ENABLE LUN returned 0x%x", rstat); + "DISABLE LUN returned 0x%x", rstat); goto out; } } out: isp_vsema_rqe(isp); - ISP_UNLOCK(isp); if (rstat != LUN_OK) { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGWARN, "lun %sable failed", (cel->enable) ? "en" : "dis"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; rls_lun_statep(isp, tptr); if (cel->enable) destroy_lun_state(isp, tptr); } else { xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGINFO, lfmt, (cel->enable) ? "en" : "dis", bus); rls_lun_statep(isp, tptr); if (cel->enable == 0) { destroy_lun_state(isp, tptr); } ccb->ccb_h.status = CAM_REQ_CMP; } } static cam_status isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb) { tstate_t *tptr; struct ccb_hdr_slist *lp; struct ccb_hdr *curelm; int found; union ccb *accb = ccb->cab.abort_ccb; if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { if (IS_FC(isp) && (accb->ccb_h.target_id != ((fcparam *) isp->isp_param)->isp_loopid)) { return (CAM_PATH_INVALID); } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != ((sdparam *) isp->isp_param)->isp_initiator_id)) { return (CAM_PATH_INVALID); } } tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); if (tptr == NULL) { return (CAM_PATH_INVALID); } if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &tptr->atios; } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { lp = &tptr->inots; } else { rls_lun_statep(isp, tptr); return (CAM_UA_ABORT); } curelm = SLIST_FIRST(lp); found = 0; if (curelm == &accb->ccb_h) { found = 1; SLIST_REMOVE_HEAD(lp, sim_links.sle); } else { while(curelm != NULL) { struct ccb_hdr *nextelm; nextelm = SLIST_NEXT(curelm, sim_links.sle); if (nextelm == &accb->ccb_h) { found = 1; SLIST_NEXT(curelm, sim_links.sle) = SLIST_NEXT(nextelm, sim_links.sle); break; } curelm = nextelm; } } rls_lun_statep(isp, tptr); if (found) { accb->ccb_h.status = CAM_REQ_ABORTED; return (CAM_REQ_CMP); } return(CAM_PATH_INVALID); } static cam_status isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb) { void *qe; struct ccb_scsiio *cso = &ccb->csio; u_int16_t *hp, save_handle; u_int16_t iptr, optr; if (isp_getrqentry(isp, &iptr, &optr, &qe)) { xpt_print_path(ccb->ccb_h.path); printf("Request Queue Overflow in isp_target_start_ctio\n"); return (CAM_RESRC_UNAVAIL); } bzero(qe, QENTRY_LEN); /* * We're either moving data or completing a command here. */ if (IS_FC(isp)) { struct ccb_accept_tio *atiop; ct2_entry_t *cto = qe; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_iid = cso->init_id; if (isp->isp_maxluns <= 16) { cto->ct_lun = ccb->ccb_h.target_lun; } /* * Start with a residual based on what the original datalength * was supposed to be. Basically, we ignore what CAM has set * for residuals. The data transfer routines will knock off * the residual for each byte actually moved- and also will * be responsible for setting the underrun flag. */ /* HACK! HACK! */ if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) { cto->ct_resid = atiop->ccb_h.spriv_field0; } cto->ct_rxid = cso->tag_id; if (cso->dxfer_len == 0) { cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; if (ccb->ccb_h.flags & CAM_SEND_STATUS) { cto->ct_flags |= CT2_SENDSTATUS; cto->rsp.m1.ct_scsi_status = cso->scsi_status; } if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { int m = min(cso->sense_len, MAXRESPLEN); bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m); cto->rsp.m1.ct_senselen = m; cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; } } else { cto->ct_flags |= CT2_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { cto->ct_flags |= CT2_SENDSTATUS; cto->rsp.m0.ct_scsi_status = cso->scsi_status; } /* * If we're sending data and status back together, * we can't also send back sense data as well. */ ccb->ccb_h.flags &= ~CAM_SEND_SENSE; } if (cto->ct_flags & CT2_SENDSTATUS) { isp_prt(isp, ISP_LOGTDEBUG1, "CTIO2[%x] SCSI STATUS 0x%x datalength %u", cto->ct_rxid, cso->scsi_status, cto->ct_resid); } if (cto->ct_flags & CT2_SENDSTATUS) cto->ct_flags |= CT2_CCINCR; cto->ct_timeout = 10; hp = &cto->ct_syshandle; } else { ct_entry_t *cto = qe; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_iid = cso->init_id; cto->ct_iid |= XS_CHANNEL(ccb) << 7; cto->ct_tgt = ccb->ccb_h.target_id; cto->ct_lun = ccb->ccb_h.target_lun; cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); if (AT_HAS_TAG(cso->tag_id)) { cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id); cto->ct_flags |= CT_TQAE; } if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { cto->ct_flags |= CT_NODISC; } if (cso->dxfer_len == 0) { cto->ct_flags |= CT_NO_DATA; } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT_DATA_IN; } else { cto->ct_flags |= CT_DATA_OUT; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { cto->ct_flags |= CT_SENDSTATUS; cto->ct_scsi_status = cso->scsi_status; cto->ct_resid = cso->resid; } if (cto->ct_flags & CT_SENDSTATUS) { isp_prt(isp, ISP_LOGTDEBUG1, "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", cto->ct_fwhandle, cso->scsi_status, cso->resid, cso->tag_id); } cto->ct_timeout = 10; hp = &cto->ct_syshandle; ccb->ccb_h.flags &= ~CAM_SEND_SENSE; - if (cto->ct_flags & CT_SENDSTATUS) + if (cto->ct_flags & CT_SENDSTATUS) cto->ct_flags |= CT_CCINCR; } if (isp_save_xs(isp, (XS_T *)ccb, hp)) { xpt_print_path(ccb->ccb_h.path); printf("No XFLIST pointers for isp_target_start_ctio\n"); return (CAM_RESRC_UNAVAIL); } /* * Call the dma setup routines for this entry (and any subsequent * CTIOs) if there's data to move, and then tell the f/w it's got * new things to play with. As with isp_start's usage of DMA setup, * any swizzling is done in the machine dependent layer. Because * of this, we put the request onto the queue area first in native * format. */ save_handle = *hp; switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) { case CMD_QUEUED: ISP_ADD_REQUEST(isp, iptr); return (CAM_REQ_INPROG); case CMD_EAGAIN: ccb->ccb_h.status = CAM_RESRC_UNAVAIL; isp_destroy_handle(isp, save_handle); return (CAM_RESRC_UNAVAIL); default: isp_destroy_handle(isp, save_handle); return (XS_ERR(ccb)); } } static void isp_refire_putback_atio(void *arg) { int s = splcam(); isp_target_putback_atio(arg); splx(s); } static void isp_target_putback_atio(union ccb *ccb) { struct ispsoftc *isp; struct ccb_scsiio *cso; u_int16_t iptr, optr; void *qe; isp = XS_ISP(ccb); if (isp_getrqentry(isp, &iptr, &optr, &qe)) { (void) timeout(isp_refire_putback_atio, ccb, 10); isp_prt(isp, ISP_LOGWARN, "isp_target_putback_atio: Request Queue Overflow"); return; } bzero(qe, QENTRY_LEN); cso = &ccb->csio; if (IS_FC(isp)) { at2_entry_t *at = qe; at->at_header.rqs_entry_type = RQSTYPE_ATIO2; at->at_header.rqs_entry_count = 1; if (isp->isp_maxluns > 16) { at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; } else { at->at_lun = (uint8_t) ccb->ccb_h.target_lun; } at->at_status = CT_OK; at->at_rxid = cso->tag_id; ISP_SWIZ_ATIO2(isp, qe, qe); } else { at_entry_t *at = qe; at->at_header.rqs_entry_type = RQSTYPE_ATIO; at->at_header.rqs_entry_count = 1; at->at_iid = cso->init_id; at->at_iid |= XS_CHANNEL(ccb) << 7; at->at_tgt = cso->ccb_h.target_id; at->at_lun = cso->ccb_h.target_lun; at->at_status = CT_OK; at->at_tag_val = AT_GET_TAG(cso->tag_id); at->at_handle = AT_GET_HANDLE(cso->tag_id); ISP_SWIZ_ATIO(isp, qe, qe); } ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); ISP_ADD_REQUEST(isp, iptr); isp_complete_ctio(ccb); } static void isp_complete_ctio(union ccb *ccb) { struct ispsoftc *isp = XS_ISP(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { ccb->ccb_h.status |= CAM_REQ_CMP; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; if (isp->isp_osinfo.simqfrozen == 0) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq"); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } else { isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen"); } } else { isp_prt(isp, ISP_LOGWARN, "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen); } } xpt_done(ccb); } /* * Handle ATIO stuff that the generic code can't. * This means handling CDBs. */ static int isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep) { tstate_t *tptr; int status, bus; struct ccb_accept_tio *atiop; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, - * we're still connected on the SCSI bus - i.e. the initiator - * did not set DiscPriv in the identify message. We don't care - * about this so it's ignored. + * we're still connected on the SCSI bus. */ status = aep->at_status; if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { /* * Bus Phase Sequence error. We should have sense data * suggested by the f/w. I'm not sure quite yet what * to do about this for CAM. */ isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } if ((status & ~QLTM_SVALID) != AT_CDB) { - isp_prt(isp, - ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", + isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } - bus = aep->at_iid >> 7; - aep->at_iid &= 0x7f; + bus = GET_BUS_VAL(aep->at_iid); tptr = get_lun_statep(isp, bus, aep->at_lun); if (tptr == NULL) { tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); } if (tptr == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a BUSY status * instead. This works out okay because the only * time we should, in fact, get this, is in the * case that somebody configured us without the * blackhole driver, so they get what they deserve. */ isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a QUEUE FULL status * instead. This works out okay because the only time we * should, in fact, get this, is in the case that we've * run out of ATIOS. */ xpt_print_path(tptr->owner); isp_prt(isp, ISP_LOGWARN, "no ATIOS for lun %d from initiator %d on channel %d", - aep->at_lun, aep->at_iid, bus); + aep->at_lun, GET_IID_VAL(aep->at_iid), bus); rls_lun_statep(isp, tptr); if (aep->at_flags & AT_TQAE) isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); else isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); if (tptr == &isp->isp_osinfo.tsdflt[bus]) { atiop->ccb_h.target_id = aep->at_tgt; atiop->ccb_h.target_lun = aep->at_lun; } if (aep->at_flags & AT_NODISC) { atiop->ccb_h.flags = CAM_DIS_DISCONNECT; } else { atiop->ccb_h.flags = 0; } if (status & QLTM_SVALID) { size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); atiop->sense_len = amt; MEMCPY(&atiop->sense_data, aep->at_sense, amt); } else { atiop->sense_len = 0; } - atiop->init_id = aep->at_iid & 0x7f; + atiop->init_id = GET_IID_VAL(aep->at_iid); atiop->cdb_len = aep->at_cdblen; MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); atiop->ccb_h.status = CAM_CDB_RECVD; /* * Construct a tag 'id' based upon tag value (which may be 0..255) * and the handle (which we have to preserve). */ AT_MAKE_TAGID(atiop->tag_id, aep); if (aep->at_flags & AT_TQAE) { atiop->tag_action = aep->at_tag_type; atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; } xpt_done((union ccb*)atiop); isp_prt(isp, ISP_LOGTDEBUG1, - "ATIO[%x] CDB=0x%x iid%d->lun%d tag 0x%x ttype 0x%x %s", - aep->at_handle, aep->at_cdb[0] & 0xff, aep->at_iid, aep->at_lun, - aep->at_tag_val & 0xff, aep->at_tag_type, - (aep->at_flags & AT_NODISC)? "nondisc" : "disconnecting"); + "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", + aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), + GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, + aep->at_tag_type, (aep->at_flags & AT_NODISC)? + "nondisc" : "disconnecting"); rls_lun_statep(isp, tptr); return (0); } static int isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep) { lun_id_t lun; tstate_t *tptr; struct ccb_accept_tio *atiop; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. */ if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } if (isp->isp_maxluns > 16) { lun = aep->at_scclun; } else { lun = aep->at_lun; } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); } if (tptr == NULL) { /* * What we'd like to know is whether or not we have a listener * upstream that really hasn't configured yet. If we do, then * we can give a more sensible reply here. If not, then we can * reject this out of hand. * * Choices for what to send were * * Not Ready, Unit Not Self-Configured Yet * (0x2,0x3e,0x00) * * for the former and * * Illegal Request, Logical Unit Not Supported * (0x5,0x25,0x00) * * for the latter. * * We used to decide whether there was at least one listener * based upon whether the black hole driver was configured. * However, recent config(8) changes have made this hard to do * at this time. * */ u_int32_t ccode = SCSI_STATUS_BUSY; /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a BUSY status * instead. This works out okay because the only * time we should, in fact, get this, is in the * case that somebody configured us without the * blackhole driver, so they get what they deserve. */ isp_endcmd(isp, aep, ccode, 0); return (0); } atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a QUEUE FULL status * instead. This works out okay because the only time we * should, in fact, get this, is in the case that we've * run out of ATIOS. */ xpt_print_path(tptr->owner); isp_prt(isp, ISP_LOGWARN, "no ATIOS for lun %d from initiator %d", lun, aep->at_iid); rls_lun_statep(isp, tptr); if (aep->at_flags & AT_TQAE) isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); else isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return (0); } SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); if (tptr == &isp->isp_osinfo.tsdflt[0]) { atiop->ccb_h.target_id = ((fcparam *)isp->isp_param)->isp_loopid; atiop->ccb_h.target_lun = lun; } /* * We don't get 'suggested' sense data as we do with SCSI cards. */ atiop->sense_len = 0; atiop->init_id = aep->at_iid; atiop->cdb_len = ATIO2_CDBLEN; MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = aep->at_rxid; switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { case ATIO2_TC_ATTR_SIMPLEQ: atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case ATIO2_TC_ATTR_HEADOFQ: atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case ATIO2_TC_ATTR_ORDERED: atiop->tag_action = MSG_ORDERED_Q_TAG; break; case ATIO2_TC_ATTR_ACAQ: /* ?? */ case ATIO2_TC_ATTR_UNTAGGED: default: atiop->tag_action = 0; break; } if (atiop->tag_action != 0) { atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; } /* * Preserve overall command datalength in private field. */ atiop->ccb_h.spriv_field0 = aep->at_datalen; xpt_done((union ccb*)atiop); isp_prt(isp, ISP_LOGTDEBUG1, "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, lun, aep->at_taskflags, aep->at_datalen); rls_lun_statep(isp, tptr); return (0); } static int isp_handle_platform_ctio(struct ispsoftc *isp, void *arg) { union ccb *ccb; int sentstatus, ok, notify_cam, resid = 0; /* * CTIO and CTIO2 are close enough.... */ ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle); KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); if (IS_FC(isp)) { ct2_entry_t *ct = arg; sentstatus = ct->ct_flags & CT2_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { ccb->ccb_h.status |= CAM_SENT_SENSE; } isp_prt(isp, ISP_LOGTDEBUG1, "CTIO2[%x] sts 0x%x flg 0x%x sns %d %s", ct->ct_rxid, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, sentstatus? "FIN" : "MID"); notify_cam = ct->ct_header.rqs_seqno & 0x1; - if (ct->ct_flags & CT2_DATAMASK) + if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { resid = ct->ct_resid; + } } else { ct_entry_t *ct = arg; sentstatus = ct->ct_flags & CT_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; isp_prt(isp, ISP_LOGTDEBUG1, "CTIO[%x] tag %x iid %x tgt %d lun %d sts 0x%x flg %x %s", ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_tgt, ct->ct_lun, ct->ct_status, ct->ct_flags, sentstatus? "FIN" : "MID"); /* * We *ought* to be able to get back to the original ATIO * here, but for some reason this gets lost. It's just as * well because it's squirrelled away as part of periph * private data. * * We can live without it as long as we continue to use * the auto-replenish feature for CTIOs. */ notify_cam = ct->ct_header.rqs_seqno & 0x1; - if (ct->ct_status & QLTM_SVALID) { + if (ct->ct_status & QLTM_SVALID) { char *sp = (char *)ct; sp += CTIO_SENSE_OFFSET; ccb->csio.sense_len = min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } - if (ct->ct_flags & CT_DATAMASK) + if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { resid = ct->ct_resid; + } } ccb->csio.resid += resid; /* * We're here either because intermediate data transfers are done * and/or the final status CTIO (which may have joined with a * Data Transfer) is done. * * In any case, for this platform, the upper layers figure out * what to do next, so all we do here is collect status and * pass information along. Any DMA handles have already been * freed. */ if (notify_cam == 0) { isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO done"); return (0); } isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO done (resid %d)", (sentstatus)? " FINAL " : "MIDTERM ", ccb->csio.resid); if (!ok) { isp_target_putback_atio(ccb); } else { isp_complete_ctio(ccb); } return (0); } #endif static void isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; struct ispsoftc *isp; sim = (struct cam_sim *)cbarg; isp = (struct ispsoftc *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: if (IS_SCSI(isp)) { u_int16_t oflags, nflags; sdparam *sdp = isp->isp_param; int tgt; tgt = xpt_path_target_id(path); ISP_LOCK(isp); sdp += cam_sim_bus(sim); #ifndef ISP_TARGET_MODE if (tgt == sdp->isp_initiator_id) { nflags = DPARM_DEFAULT; } else { nflags = DPARM_SAFE_DFLT; if (isp->isp_loaded_fw) { nflags |= DPARM_NARROW | DPARM_ASYNC; } } #else nflags = DPARM_DEFAULT; #endif oflags = sdp->isp_devparam[tgt].dev_flags; sdp->isp_devparam[tgt].dev_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; isp->isp_update |= (1 << cam_sim_bus(sim)); (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); sdp->isp_devparam[tgt].dev_flags = oflags; ISP_UNLOCK(isp); } break; default: isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); break; } } static void isp_poll(struct cam_sim *sim) { struct ispsoftc *isp = cam_sim_softc(sim); ISP_LOCK(isp); (void) isp_intr(isp); ISP_UNLOCK(isp); } +#if 0 static void isp_relsim(void *arg) { struct ispsoftc *isp = arg; ISP_LOCK(isp); if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) { int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED; isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED; if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { xpt_release_simq(isp->isp_sim, 1); isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq"); } } ISP_UNLOCK(isp); } +#endif static void isp_watchdog(void *arg) { XS_T *xs = arg; struct ispsoftc *isp = XS_ISP(xs); u_int32_t handle; /* * We've decided this command is dead. Make sure we're not trying * to kill a command that's already dead by getting it's handle and * and seeing whether it's still alive. */ ISP_LOCK(isp); handle = isp_find_handle(isp, xs); if (handle) { u_int16_t r; if (XS_CMD_DONE_P(xs)) { isp_prt(isp, ISP_LOGDEBUG1, "watchdog found done cmd (handle 0x%x)", handle); ISP_UNLOCK(isp); return; } if (XS_CMD_WDOG_P(xs)) { isp_prt(isp, ISP_LOGDEBUG2, "recursive watchdog (handle 0x%x)", handle); ISP_UNLOCK(isp); return; } XS_CMD_S_WDOG(xs); r = ISP_READ(isp, BIU_ISR); if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) { isp_prt(isp, ISP_LOGDEBUG2, "watchdog cleanup (%x, %x)", handle, r); xpt_done((union ccb *) xs); } else if (XS_CMD_GRACE_P(xs)) { /* * Make sure the command is *really* dead before we * release the handle (and DMA resources) for reuse. */ (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); /* * After this point, the comamnd is really dead. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, handle); } isp_destroy_handle(isp, handle); xpt_print_path(xs->ccb_h.path); isp_prt(isp, ISP_LOGWARN, "watchdog timeout (%x, %x)", handle, r); XS_SETERR(xs, CAM_CMD_TIMEOUT); XS_CMD_C_WDOG(xs); isp_done(xs); } else { u_int16_t iptr, optr; ispreq_t *mp; XS_CMD_C_WDOG(xs); xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) { ISP_UNLOCK(isp); return; } XS_CMD_S_GRACE(xs); MEMZERO((void *) mp, sizeof (*mp)); mp->req_header.rqs_entry_count = 1; mp->req_header.rqs_entry_type = RQSTYPE_MARKER; mp->req_modifier = SYNC_ALL; mp->req_target = XS_CHANNEL(xs) << 7; ISP_SWIZZLE_REQUEST(isp, mp); ISP_ADD_REQUEST(isp, iptr); } } else { isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); } ISP_UNLOCK(isp); } +#ifdef ISP_SMPLOCK +static void +isp_kthread(void *arg) +{ + int wasfrozen; + struct ispsoftc *isp = arg; + + mtx_lock(&isp->isp_lock); + for (;;) { + isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); + while (isp_fc_runstate(isp, 2 * 1000000) != 0) { +#if 0 + msleep(&lbolt, &isp->isp_lock, + PRIBIO, "isp_fcthrd", 0); +#else + msleep(isp_kthread, &isp->isp_lock, + PRIBIO, "isp_fcthrd", hz); +#endif + } + wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; + isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; + if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { + isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); + ISPLOCK_2_CAMLOCK(isp); + xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } + cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); + } +} +#else +static void +isp_kthread(void *arg) +{ + int wasfrozen; + struct ispsoftc *isp = arg; + + mtx_lock(&Giant); + for (;;) { + isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state"); + while (isp_fc_runstate(isp, 2 * 1000000) != 0) { + tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); + } + wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; + isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; + if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { + isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq"); + ISPLOCK_2_CAMLOCK(isp); + xpt_release_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } + tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "isp_fc_worker", 0); + } +} +#endif static void isp_action(struct cam_sim *sim, union ccb *ccb) { int bus, tgt, error; struct ispsoftc *isp; struct ccb_trans_settings *cts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (struct ispsoftc *)cam_sim_softc(sim); ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = isp; if (isp->isp_state != ISP_RUNSTATE && ccb->ccb_h.func_code == XPT_SCSI_IO) { - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); isp_init(isp); if (isp->isp_state != ISP_INITSTATE) { ISP_UNLOCK(isp); /* * Lie. Say it was a selection timeout. */ ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } isp->isp_state = ISP_RUNSTATE; - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); } isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); + switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } #ifdef DIAGNOSTIC if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { ccb->ccb_h.status = CAM_PATH_INVALID; } if (ccb->ccb_h.status == CAM_PATH_INVALID) { isp_prt(isp, ISP_LOGERR, "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", ccb->ccb_h.target_id, ccb->ccb_h.target_lun); xpt_done(ccb); break; } #endif ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); error = isp_start((XS_T *) ccb); - ISP_UNLOCK(isp); switch (error) { case CMD_QUEUED: ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { u_int64_t ticks = (u_int64_t) hz; if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) ticks = 60 * 1000 * ticks; else ticks = ccb->ccb_h.timeout * hz; ticks = ((ticks + 999) / 1000) + hz + hz; if (ticks >= 0x80000000) { isp_prt(isp, ISP_LOGERR, "timeout overflow"); ticks = 0x80000000; } ccb->ccb_h.timeout_ch = timeout(isp_watchdog, (caddr_t)ccb, (int)ticks); } else { callout_handle_init(&ccb->ccb_h.timeout_ch); } + ISPLOCK_2_CAMLOCK(isp); break; case CMD_RQLATER: +#ifdef ISP_SMPLOCK + cv_signal(&isp->isp_osinfo.kthread_cv); +#else + wakeup(&isp->isp_osinfo.kthread_cv); +#endif if (isp->isp_osinfo.simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG2, "RQLATER freeze simq"); +#if 0 isp->isp_osinfo.simqfrozen |= SIMQFRZ_TIMED; timeout(isp_relsim, isp, 500); +#else + isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; +#endif + ISPLOCK_2_CAMLOCK(isp); xpt_freeze_simq(sim, 1); + } else { + ISPLOCK_2_CAMLOCK(isp); } XS_SETERR(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); break; case CMD_EAGAIN: if (isp->isp_osinfo.simqfrozen == 0) { xpt_freeze_simq(sim, 1); isp_prt(isp, ISP_LOGDEBUG2, "EAGAIN freeze simq"); } isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; XS_SETERR(ccb, CAM_REQUEUE_REQ); + ISPLOCK_2_CAMLOCK(isp); xpt_done(ccb); break; case CMD_COMPLETE: - ISP_LOCK(isp); isp_done((struct ccb_scsiio *) ccb); - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); break; default: isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); XS_SETERR(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); + ISPLOCK_2_CAMLOCK(isp); } break; #ifdef ISP_TARGET_MODE case XPT_EN_LUN: /* Enable LUN as a target */ + CAMLOCK_2_ISPLOCK(isp); isp_en_lun(isp, ccb); + ISPLOCK_2_CAMLOCK(isp); xpt_done(ccb); break; case XPT_NOTIFY_ACK: /* recycle notify ack */ case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); if (tptr == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); break; } ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = isp; - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); } else { SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); } - ISP_UNLOCK(isp); rls_lun_statep(isp, tptr); ccb->ccb_h.status = CAM_REQ_INPROG; + ISPLOCK_2_CAMLOCK(isp); break; } case XPT_CONT_TARGET_IO: { - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); ccb->ccb_h.status = isp_target_start_ctio(isp, ccb); if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (isp->isp_osinfo.simqfrozen == 0) { xpt_freeze_simq(sim, 1); xpt_print_path(ccb->ccb_h.path); isp_prt(isp, ISP_LOGINFO, "XPT_CONT_TARGET_IO freeze simq"); } isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE; XS_SETERR(ccb, CAM_REQUEUE_REQ); + ISPLOCK_2_CAMLOCK(isp); xpt_done(ccb); } else { + ISPLOCK_2_CAMLOCK(isp); ccb->ccb_h.status |= CAM_SIM_QUEUED; } - ISP_UNLOCK(isp); break; } #endif case XPT_RESET_DEV: /* BDR the specified SCSI device */ bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); tgt = ccb->ccb_h.target_id; tgt |= (bus << 16); - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { union ccb *accb = ccb->cab.abort_ccb; + CAMLOCK_2_ISPLOCK(isp); switch (accb->ccb_h.func_code) { #ifdef ISP_TARGET_MODE case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); break; case XPT_CONT_TARGET_IO: isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); ccb->ccb_h.status = CAM_UA_ABORT; break; #endif case XPT_SCSI_IO: - ISP_LOCK(isp); error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); - ISP_UNLOCK(isp); if (error) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } + ISPLOCK_2_CAMLOCK(isp); xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; tgt = cts->ccb_h.target_id; - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); if (IS_SCSI(isp)) { sdparam *sdp = isp->isp_param; u_int16_t *dptr; bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); sdp += bus; #if 0 if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) dptr = &sdp->isp_devparam[tgt].cur_dflags; else dptr = &sdp->isp_devparam[tgt].dev_flags; #else /* * We always update (internally) from dev_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].dev_flags; #endif /* * Note that these operations affect the * the goal flags (dev_flags)- not * the current state flags. Then we mark * things so that the next operation to * this HBA will cause the update to occur. */ if (cts->valid & CCB_TRANS_DISC_VALID) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { *dptr |= DPARM_DISC; } else { *dptr &= ~DPARM_DISC; } } if (cts->valid & CCB_TRANS_TQ_VALID) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { *dptr |= DPARM_TQING; } else { *dptr &= ~DPARM_TQING; } } if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { switch (cts->bus_width) { case MSG_EXT_WDTR_BUS_16_BIT: *dptr |= DPARM_WIDE; break; default: *dptr &= ~DPARM_WIDE; } } /* * Any SYNC RATE of nonzero and SYNC_OFFSET * of nonzero will cause us to go to the * selected (from NVRAM) maximum value for * this device. At a later point, we'll * allow finer control. */ if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && (cts->sync_offset > 0)) { *dptr |= DPARM_SYNC; } else { *dptr &= ~DPARM_SYNC; } *dptr |= DPARM_SAFE_DFLT; isp_prt(isp, ISP_LOGDEBUG0, "%d.%d set %s period 0x%x offset 0x%x flags 0x%x", bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? "current" : "user", sdp->isp_devparam[tgt].sync_period, sdp->isp_devparam[tgt].sync_offset, sdp->isp_devparam[tgt].dev_flags); sdp->isp_devparam[tgt].dev_update = 1; isp->isp_update |= (1 << bus); } - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; if (IS_FC(isp)) { /* * a lot of normal SCSI things don't make sense. */ cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; /* * How do you measure the width of a high * speed serial bus? Well, in bytes. * * Offset and period make no sense, though, so we set * (above) a 'base' transfer speed to be gigabit. */ cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } else { sdparam *sdp = isp->isp_param; u_int16_t dval, pval, oval; int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); + CAMLOCK_2_ISPLOCK(isp); sdp += bus; if (cts->flags & CCB_TRANS_CURRENT_SETTINGS) { - ISP_LOCK(isp); sdp->isp_devparam[tgt].dev_refresh = 1; isp->isp_update |= (1 << bus); (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL); - ISP_UNLOCK(isp); dval = sdp->isp_devparam[tgt].cur_dflags; oval = sdp->isp_devparam[tgt].cur_offset; pval = sdp->isp_devparam[tgt].cur_period; } else { dval = sdp->isp_devparam[tgt].dev_flags; oval = sdp->isp_devparam[tgt].sync_offset; pval = sdp->isp_devparam[tgt].sync_period; } - ISP_LOCK(isp); cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if (dval & DPARM_DISC) { cts->flags |= CCB_TRANS_DISC_ENB; } if (dval & DPARM_TQING) { cts->flags |= CCB_TRANS_TAG_ENB; } if (dval & DPARM_WIDE) { cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } cts->valid = CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if ((dval & DPARM_SYNC) && oval != 0) { cts->sync_period = pval; cts->sync_offset = oval; cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; } - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); isp_prt(isp, ISP_LOGDEBUG0, "%d.%d get %s period 0x%x offset 0x%x flags 0x%x", bus, tgt, (cts->flags & CCB_TRANS_CURRENT_SETTINGS)? "current" : "user", pval, oval, dval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t secs_per_cylinder; u_int32_t size_mb; ccg = &ccb->ccg; if (ccg->block_size == 0) { isp_prt(isp, ISP_LOGERR, "%d.%d XPT_CALC_GEOMETRY block size 0?", ccg->ccb_h.target_id, ccg->ccb_h.target_lun); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); if (size_mb > 1024) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified bus */ bus = cam_sim_bus(sim); - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); error = isp_control(isp, ISPCTL_RESET_BUS, &bus); - ISP_UNLOCK(isp); + ISPLOCK_2_CAMLOCK(isp); if (error) ccb->ccb_h.status = CAM_REQ_CMP_ERR; else { if (cam_sim_bus(sim) && isp->isp_path2 != NULL) xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); else if (isp->isp_path != NULL) xpt_async(AC_BUS_RESET, isp->isp_path, NULL); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; #ifdef ISP_TARGET_MODE cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; #else cpi->target_sprt = 0; #endif cpi->hba_eng_cnt = 0; cpi->max_target = ISP_MAX_TARGETS(isp) - 1; cpi->max_lun = ISP_MAX_LUNS(isp) - 1; cpi->bus_id = cam_sim_bus(sim); if (IS_FC(isp)) { cpi->hba_misc = PIM_NOBUSRESET; /* * Because our loop ID can shift from time to time, * make our initiator ID out of range of our bus. */ cpi->initiator_id = cpi->max_target + 1; /* * Set base transfer capabilities for Fibre Channel. * Technically not correct because we don't know * what media we're running on top of- but we'll * look good if we always say 100MB/s. */ cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; } else { sdparam *sdp = isp->isp_param; sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->hba_misc = 0; cpi->initiator_id = sdp->isp_initiator_id; cpi->base_transfer_speed = 3300; } strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) void isp_done(struct ccb_scsiio *sccb) { struct ispsoftc *isp = XS_ISP(sccb); if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } else { sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { sccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(sccb->ccb_h.path, 1); if (sccb->scsi_status != SCSI_STATUS_OK) isp_prt(isp, ISP_LOGDEBUG2, "freeze devq %d.%d %x %x", sccb->ccb_h.target_id, sccb->ccb_h.target_lun, sccb->ccb_h.status, sccb->scsi_status); } } /* * If we were frozen waiting resources, clear that we were frozen * waiting for resources. If we are no longer frozen, and the devq * isn't frozen, mark the completing CCB to have the XPT layer * release the simq. */ if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) { isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE; if (isp->isp_osinfo.simqfrozen == 0) { if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { isp_prt(isp, ISP_LOGDEBUG2, "isp_done->relsimq"); sccb->ccb_h.status |= CAM_RELEASE_SIMQ; } else { isp_prt(isp, ISP_LOGDEBUG2, "isp_done->devq frozen"); } } else { isp_prt(isp, ISP_LOGDEBUG2, "isp_done -> simqfrozen = %x", isp->isp_osinfo.simqfrozen); } } if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print_path(sccb->ccb_h.path); isp_prt(isp, ISP_LOGINFO, "cam completion status 0x%x", sccb->ccb_h.status); } XS_CMD_S_DONE(sccb); if (XS_CMD_WDOG_P(sccb) == 0) { untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); if (XS_CMD_GRACE_P(sccb)) { isp_prt(isp, ISP_LOGDEBUG2, "finished command on borrowed time"); } XS_CMD_S_CLEAR(sccb); - ISP_UNLOCK(isp); -#ifdef ISP_SMPLOCK - mtx_lock(&Giant); - xpt_done((union ccb *) sccb); - mtx_unlock(&Giant); -#else + ISPLOCK_2_CAMLOCK(isp); xpt_done((union ccb *) sccb); -#endif - ISP_LOCK(isp); + CAMLOCK_2_ISPLOCK(isp); } } int isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg) { int bus, rv = 0; switch (cmd) { case ISPASYNC_NEW_TGT_PARAMS: { int flags, tgt; sdparam *sdp = isp->isp_param; struct ccb_trans_settings neg; struct cam_path *tmppath; tgt = *((int *)arg); bus = (tgt >> 16) & 0xffff; tgt &= 0xffff; sdp += bus; if (xpt_create_path(&tmppath, NULL, cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); rv = -1; break; } flags = sdp->isp_devparam[tgt].cur_dflags; neg.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if (flags & DPARM_DISC) { neg.flags |= CCB_TRANS_DISC_ENB; } if (flags & DPARM_TQING) { neg.flags |= CCB_TRANS_TAG_ENB; } neg.valid |= CCB_TRANS_BUS_WIDTH_VALID; neg.bus_width = (flags & DPARM_WIDE)? MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; neg.sync_period = sdp->isp_devparam[tgt].cur_period; neg.sync_offset = sdp->isp_devparam[tgt].cur_offset; if (flags & DPARM_SYNC) { neg.valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; } isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, neg.sync_period, neg.sync_offset, flags); xpt_setup_ccb(&neg.ccb_h, tmppath, 1); + ISPLOCK_2_CAMLOCK(isp); xpt_async(AC_TRANSFER_NEG, tmppath, &neg); + CAMLOCK_2_ISPLOCK(isp); xpt_free_path(tmppath); break; } case ISPASYNC_BUS_RESET: bus = *((int *)arg); isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); if (bus > 0 && isp->isp_path2) { + ISPLOCK_2_CAMLOCK(isp); xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); + CAMLOCK_2_ISPLOCK(isp); } else if (isp->isp_path) { + ISPLOCK_2_CAMLOCK(isp); xpt_async(AC_BUS_RESET, isp->isp_path, NULL); + CAMLOCK_2_ISPLOCK(isp); } break; + case ISPASYNC_LIP: + if (isp->isp_path) { + if (isp->isp_osinfo.simqfrozen == 0) { + isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq"); + ISPLOCK_2_CAMLOCK(isp); + xpt_freeze_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } + isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; + } + isp_prt(isp, ISP_LOGINFO, "LIP Received"); + break; + case ISPASYNC_LOOP_RESET: + if (isp->isp_path) { + if (isp->isp_osinfo.simqfrozen == 0) { + isp_prt(isp, ISP_LOGDEBUG0, + "Loop Reset freeze simq"); + ISPLOCK_2_CAMLOCK(isp); + xpt_freeze_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); + } + isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; + } + isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); + break; case ISPASYNC_LOOP_DOWN: if (isp->isp_path) { if (isp->isp_osinfo.simqfrozen == 0) { - isp_prt(isp, ISP_LOGDEBUG2, + isp_prt(isp, ISP_LOGDEBUG0, "loop down freeze simq"); + ISPLOCK_2_CAMLOCK(isp); xpt_freeze_simq(isp->isp_sim, 1); + CAMLOCK_2_ISPLOCK(isp); } isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; } isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); break; case ISPASYNC_LOOP_UP: - if (isp->isp_path) { - int wasfrozen = - isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; - isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; - if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { - xpt_release_simq(isp->isp_sim, 1); - isp_prt(isp, ISP_LOGDEBUG2, - "loop up release simq"); - } - } + /* + * Now we just note that Loop has come up. We don't + * actually do anything because we're waiting for a + * Change Notify before activating the FC cleanup + * thread to look at the state of the loop again. + */ isp_prt(isp, ISP_LOGINFO, "Loop UP"); break; case ISPASYNC_PROMENADE: { const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; static const char *roles[4] = { "(none)", "Target", "Initiator", "Target/Initiator" }; fcparam *fcp = isp->isp_param; int tgt = *((int *) arg); struct lportdb *lp = &fcp->portdb[tgt]; isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, roles[lp->roles & 0x3], (lp->valid)? "Arrived" : "Departed", (u_int32_t) (lp->port_wwn >> 32), (u_int32_t) (lp->port_wwn & 0xffffffffLL), (u_int32_t) (lp->node_wwn >> 32), (u_int32_t) (lp->node_wwn & 0xffffffffLL)); break; } case ISPASYNC_CHANGE_NOTIFY: if (arg == (void *) 1) { isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed"); } else { isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed"); } +#ifdef ISP_SMPLOCK + cv_signal(&isp->isp_osinfo.kthread_cv); +#else + wakeup(&isp->isp_osinfo.kthread_cv); +#endif break; case ISPASYNC_FABRIC_DEV: { int target, lrange; struct lportdb *lp = NULL; char *pt; sns_ganrsp_t *resp = (sns_ganrsp_t *) arg; u_int32_t portid; u_int64_t wwpn, wwnn; fcparam *fcp = isp->isp_param; portid = (((u_int32_t) resp->snscb_port_id[0]) << 16) | (((u_int32_t) resp->snscb_port_id[1]) << 8) | (((u_int32_t) resp->snscb_port_id[2])); wwpn = (((u_int64_t)resp->snscb_portname[0]) << 56) | (((u_int64_t)resp->snscb_portname[1]) << 48) | (((u_int64_t)resp->snscb_portname[2]) << 40) | (((u_int64_t)resp->snscb_portname[3]) << 32) | (((u_int64_t)resp->snscb_portname[4]) << 24) | (((u_int64_t)resp->snscb_portname[5]) << 16) | (((u_int64_t)resp->snscb_portname[6]) << 8) | (((u_int64_t)resp->snscb_portname[7])); wwnn = (((u_int64_t)resp->snscb_nodename[0]) << 56) | (((u_int64_t)resp->snscb_nodename[1]) << 48) | (((u_int64_t)resp->snscb_nodename[2]) << 40) | (((u_int64_t)resp->snscb_nodename[3]) << 32) | (((u_int64_t)resp->snscb_nodename[4]) << 24) | (((u_int64_t)resp->snscb_nodename[5]) << 16) | (((u_int64_t)resp->snscb_nodename[6]) << 8) | (((u_int64_t)resp->snscb_nodename[7])); if (portid == 0 || wwpn == 0) { break; } switch (resp->snscb_port_type) { case 1: pt = " N_Port"; break; case 2: pt = " NL_Port"; break; case 3: pt = "F/NL_Port"; break; case 0x7f: pt = " Nx_Port"; break; case 0x81: pt = " F_port"; break; case 0x82: pt = " FL_Port"; break; case 0x84: pt = " E_port"; break; default: pt = "?"; break; } isp_prt(isp, ISP_LOGINFO, "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x", pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn), ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn)); /* * We're only interested in SCSI_FCP types (for now) */ if ((resp->snscb_fc4_types[2] & 1) == 0) { break; } if (fcp->isp_topo != TOPO_F_PORT) lrange = FC_SNS_ID+1; else lrange = 0; /* * Is it already in our list? */ for (target = lrange; target < MAX_FC_TARG; target++) { if (target >= FL_PORT_ID && target <= FC_SNS_ID) { continue; } lp = &fcp->portdb[target]; if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) { lp->fabric_dev = 1; break; } } if (target < MAX_FC_TARG) { break; } for (target = lrange; target < MAX_FC_TARG; target++) { if (target >= FL_PORT_ID && target <= FC_SNS_ID) { continue; } lp = &fcp->portdb[target]; if (lp->port_wwn == 0) { break; } } if (target == MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "no more space for fabric devices"); break; } lp->node_wwn = wwnn; lp->port_wwn = wwpn; lp->portid = portid; lp->fabric_dev = 1; break; } #ifdef ISP_TARGET_MODE case ISPASYNC_TARGET_MESSAGE: { tmd_msg_t *mp = arg; isp_prt(isp, ISP_LOGDEBUG2, "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x", mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt, (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval, mp->nt_msg[0]); break; } case ISPASYNC_TARGET_EVENT: { tmd_event_t *ep = arg; isp_prt(isp, ISP_LOGDEBUG2, "bus %d event code 0x%x", ep->ev_bus, ep->ev_event); break; } case ISPASYNC_TARGET_ACTION: switch (((isphdr_t *)arg)->rqs_entry_type) { default: isp_prt(isp, ISP_LOGWARN, "event 0x%x for unhandled target action", ((isphdr_t *)arg)->rqs_entry_type); break; case RQSTYPE_ATIO: rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); break; case RQSTYPE_ATIO2: rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); break; case RQSTYPE_CTIO2: case RQSTYPE_CTIO: rv = isp_handle_platform_ctio(isp, arg); break; case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status); break; } break; #endif default: isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); rv = -1; break; } return (rv); } /* * Locks are held before coming here. */ void isp_uninit(struct ispsoftc *isp) { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); DISABLE_INTS(isp); } void isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...) { va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } printf("%s: ", device_get_nameunit(isp->isp_dev)); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } diff --git a/sys/dev/isp/isp_freebsd.h b/sys/dev/isp/isp_freebsd.h index 4598db764528..0e25d91bc3ec 100644 --- a/sys/dev/isp/isp_freebsd.h +++ b/sys/dev/isp/isp_freebsd.h @@ -1,422 +1,400 @@ /* $FreeBSD$ */ /* * Qlogic ISP SCSI Host Adapter FreeBSD Wrapper Definitions (CAM version) * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _ISP_FREEBSD_H #define _ISP_FREEBSD_H #define ISP_PLATFORM_VERSION_MAJOR 5 #define ISP_PLATFORM_VERSION_MINOR 7 /* * We're not ready for primetime yet */ -#if 0 -#if ((ISP_PLATFORM_VERSION_MAJOR * 10) + ISP_PLATFORM_VERSION_MINOR) >= 54 #define ISP_SMPLOCK 1 -#endif -#endif - #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ddb.h" #include "opt_isp.h" +#define HANDLE_LOOPSTATE_IN_OUTER_LAYERS 1 + typedef void ispfwfunc __P((int, int, int, const u_int16_t **)); #ifdef ISP_TARGET_MODE typedef struct tstate { struct tstate *next; struct cam_path *owner; struct ccb_hdr_slist atios; struct ccb_hdr_slist inots; lun_id_t lun; int bus; u_int32_t hold; } tstate_t; #define LUN_HASH_SIZE 32 #define LUN_HASH_FUNC(isp, port, lun) \ ((IS_DUALBUS(isp)) ? \ (((lun) & ((LUN_HASH_SIZE >> 1) - 1)) << (port)) : \ ((lun) & (LUN_HASH_SIZE - 1))) #endif struct isposinfo { struct ispsoftc * next; u_int64_t default_port_wwn; u_int64_t default_node_wwn; device_t dev; struct cam_sim *sim; struct cam_path *path; struct cam_sim *sim2; struct cam_path *path2; struct intr_config_hook ehook; u_int8_t mboxwaiting; u_int8_t simqfrozen; u_int8_t drain; u_int8_t intsok; -#ifdef ISP_SMPLOCK struct mtx lock; -#else - volatile u_int32_t islocked; - int splsaved; -#endif + struct cv kthread_cv; + struct proc *kproc; #ifdef ISP_TARGET_MODE #define TM_WANTED 0x80 #define TM_BUSY 0x40 #define TM_TMODE_ENABLED 0x03 u_int8_t tmflags; u_int8_t rstatus; u_int16_t rollinfo; tstate_t tsdflt[2]; /* two busses */ tstate_t *lun_hash[LUN_HASH_SIZE]; #endif }; +#define isp_lock isp_osinfo.lock + /* * Locking macros... */ #ifdef ISP_SMPLOCK -#define ISP_LOCK(x) mtx_lock(&(x)->isp_osinfo.lock) -#define ISP_UNLOCK(x) mtx_unlock(&(x)->isp_osinfo.lock) +#define ISP_LOCK(x) mtx_lock(&(x)->isp_lock) +#define ISP_UNLOCK(x) mtx_unlock(&(x)->isp_lock) +#define ISPLOCK_2_CAMLOCK(isp) \ + mtx_unlock(&(isp)->isp_lock); mtx_lock(&Giant) +#define CAMLOCK_2_ISPLOCK(isp) \ + mtx_unlock(&Giant); mtx_lock(&(isp)->isp_lock) #else -#define ISP_LOCK isp_lock -#define ISP_UNLOCK isp_unlock +#define ISP_LOCK(x) +#define ISP_UNLOCK(x) +#define ISPLOCK_2_CAMLOCK(x) +#define CAMLOCK_2_ISPLOCK(x) #endif - /* * Required Macros/Defines */ #define INLINE __inline #define ISP2100_SCRLEN 0x400 #define MEMZERO bzero #define MEMCPY(dst, src, amt) bcopy((src), (dst), (amt)) #define SNPRINTF snprintf #define STRNCAT strncat #define USEC_DELAY DELAY #define USEC_SLEEP(isp, x) \ if (isp->isp_osinfo.intsok) \ ISP_UNLOCK(isp); \ DELAY(x); \ if (isp->isp_osinfo.intsok) \ ISP_LOCK(isp) #define NANOTIME_T struct timespec #define GET_NANOTIME nanotime #define GET_NANOSEC(x) ((x)->tv_sec * 1000000000 + (x)->tv_nsec) #define NANOTIME_SUB nanotime_sub #define MAXISPREQUEST(isp) 256 #ifdef __alpha__ #define MEMORYBARRIER(isp, type, offset, size) alpha_mb() #else #define MEMORYBARRIER(isp, type, offset, size) #endif #define MBOX_ACQUIRE(isp) #define MBOX_WAIT_COMPLETE isp_mbox_wait_complete #define MBOX_NOTIFY_COMPLETE(isp) \ if (isp->isp_osinfo.mboxwaiting) { \ isp->isp_osinfo.mboxwaiting = 0; \ wakeup(&isp->isp_osinfo.mboxwaiting); \ } \ isp->isp_mboxbsy = 0 #define MBOX_RELEASE(isp) #ifndef SCSI_GOOD #define SCSI_GOOD SCSI_STATUS_OK #endif #ifndef SCSI_CHECK #define SCSI_CHECK SCSI_STATUS_CHECK_COND #endif #ifndef SCSI_BUSY #define SCSI_BUSY SCSI_STATUS_BUSY #endif #ifndef SCSI_QFULL #define SCSI_QFULL SCSI_STATUS_QUEUE_FULL #endif #define XS_T struct ccb_scsiio #define XS_ISP(ccb) ((struct ispsoftc *) (ccb)->ccb_h.spriv_ptr1) #define XS_CHANNEL(ccb) cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)) #define XS_TGT(ccb) (ccb)->ccb_h.target_id #define XS_LUN(ccb) (ccb)->ccb_h.target_lun #define XS_CDBP(ccb) \ (((ccb)->ccb_h.flags & CAM_CDB_POINTER)? \ (ccb)->cdb_io.cdb_ptr : (ccb)->cdb_io.cdb_bytes) #define XS_CDBLEN(ccb) (ccb)->cdb_len #define XS_XFRLEN(ccb) (ccb)->dxfer_len #define XS_TIME(ccb) (ccb)->ccb_h.timeout #define XS_RESID(ccb) (ccb)->resid #define XS_STSP(ccb) (&(ccb)->scsi_status) #define XS_SNSP(ccb) (&(ccb)->sense_data) #define XS_SNSLEN(ccb) \ imin((sizeof((ccb)->sense_data)), ccb->sense_len) #define XS_SNSKEY(ccb) ((ccb)->sense_data.flags & 0xf) #define XS_TAG_P(ccb) \ (((ccb)->ccb_h.flags & CAM_TAG_ACTION_VALID) && \ (ccb)->tag_action != CAM_TAG_ACTION_NONE) #define XS_TAG_TYPE(ccb) \ ((ccb->tag_action == MSG_SIMPLE_Q_TAG)? REQFLAG_STAG : \ ((ccb->tag_action == MSG_HEAD_OF_Q_TAG)? REQFLAG_HTAG : REQFLAG_OTAG)) #define XS_SETERR(ccb, v) (ccb)->ccb_h.status &= ~CAM_STATUS_MASK, \ (ccb)->ccb_h.status |= v, \ (ccb)->ccb_h.spriv_field0 |= ISP_SPRIV_ERRSET # define HBA_NOERROR CAM_REQ_INPROG # define HBA_BOTCH CAM_UNREC_HBA_ERROR # define HBA_CMDTIMEOUT CAM_CMD_TIMEOUT # define HBA_SELTIMEOUT CAM_SEL_TIMEOUT # define HBA_TGTBSY CAM_SCSI_STATUS_ERROR # define HBA_BUSRESET CAM_SCSI_BUS_RESET # define HBA_ABORTED CAM_REQ_ABORTED # define HBA_DATAOVR CAM_DATA_RUN_ERR # define HBA_ARQFAIL CAM_AUTOSENSE_FAIL #define XS_ERR(ccb) ((ccb)->ccb_h.status & CAM_STATUS_MASK) #define XS_NOERR(ccb) \ (((ccb)->ccb_h.spriv_field0 & ISP_SPRIV_ERRSET) == 0 || \ ((ccb)->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) #define XS_INITERR(ccb) \ XS_SETERR(ccb, CAM_REQ_INPROG), (ccb)->ccb_h.spriv_field0 = 0 #define XS_SAVE_SENSE(xs, sp) \ (xs)->ccb_h.status |= CAM_AUTOSNS_VALID, \ bcopy(sp->req_sense_data, &(xs)->sense_data, \ imin(XS_SNSLEN(xs), sp->req_sense_len)) #define XS_SET_STATE_STAT(a, b, c) #define DEFAULT_IID(x) 7 #define DEFAULT_LOOPID(x) 109 #define DEFAULT_NODEWWN(isp) (isp)->isp_osinfo.default_node_wwn #define DEFAULT_PORTWWN(isp) (isp)->isp_osinfo.default_port_wwn #define ISP_NODEWWN(isp) FCPARAM(isp)->isp_nodewwn #define ISP_PORTWWN(isp) FCPARAM(isp)->isp_portwwn #define ISP_UNSWIZZLE_AND_COPY_PDBP(isp, dest, src) \ if((void *)src != (void *)dest) bcopy(src, dest, sizeof (isp_pdb_t)) #define ISP_SWIZZLE_ICB(a, b) #define ISP_SWIZZLE_REQUEST(a, b) #define ISP_UNSWIZZLE_RESPONSE(a, b, c) #define ISP_SWIZZLE_SNS_REQ(a, b) #define ISP_UNSWIZZLE_SNS_RSP(a, b, c) #define ISP_SWIZZLE_NVRAM_WORD(isp, x) /* * Includes of common header files */ #include #include #include /* * isp_osinfo definiitions && shorthand */ #define SIMQFRZ_RESOURCE 0x1 #define SIMQFRZ_LOOPDOWN 0x2 #define SIMQFRZ_TIMED 0x4 #define isp_sim isp_osinfo.sim #define isp_path isp_osinfo.path #define isp_sim2 isp_osinfo.sim2 #define isp_path2 isp_osinfo.path2 #define isp_dev isp_osinfo.dev /* * prototypes for isp_pci && isp_freebsd to share */ extern void isp_attach(struct ispsoftc *); extern void isp_uninit(struct ispsoftc *); /* * Platform private flags */ #define ISP_SPRIV_ERRSET 0x1 #define ISP_SPRIV_INWDOG 0x2 #define ISP_SPRIV_GRACE 0x4 #define ISP_SPRIV_DONE 0x8 #define XS_CMD_S_WDOG(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_INWDOG #define XS_CMD_C_WDOG(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_INWDOG #define XS_CMD_WDOG_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_INWDOG) #define XS_CMD_S_GRACE(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_GRACE #define XS_CMD_C_GRACE(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_GRACE #define XS_CMD_GRACE_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_GRACE) #define XS_CMD_S_DONE(sccb) (sccb)->ccb_h.spriv_field0 |= ISP_SPRIV_DONE #define XS_CMD_C_DONE(sccb) (sccb)->ccb_h.spriv_field0 &= ~ISP_SPRIV_DONE #define XS_CMD_DONE_P(sccb) ((sccb)->ccb_h.spriv_field0 & ISP_SPRIV_DONE) #define XS_CMD_S_CLEAR(sccb) (sccb)->ccb_h.spriv_field0 = 0 /* * Platform specific inline functions */ -#ifndef ISP_SMPLOCK -static INLINE void isp_lock(struct ispsoftc *); -static INLINE void -isp_lock(struct ispsoftc *isp) -{ - int s = splcam(); - if (isp->isp_osinfo.islocked++ == 0) { - isp->isp_osinfo.splsaved = s; - } else { - splx(s); - } -} - -static INLINE void isp_unlock(struct ispsoftc *); -static INLINE void -isp_unlock(struct ispsoftc *isp) -{ - if (isp->isp_osinfo.islocked) { - if (--isp->isp_osinfo.islocked == 0) { - splx(isp->isp_osinfo.splsaved); - } - } -} -#endif static INLINE void isp_mbox_wait_complete(struct ispsoftc *); static INLINE void isp_mbox_wait_complete(struct ispsoftc *isp) { if (isp->isp_osinfo.intsok) { isp->isp_osinfo.mboxwaiting = 1; #ifdef ISP_SMPLOCK (void) msleep(&isp->isp_osinfo.mboxwaiting, - &isp->isp_osinfo.lock, PRIBIO, "isp_mboxwaiting", 10 * hz); + &isp->isp_lock, PRIBIO, "isp_mboxwaiting", 10 * hz); #else (void) tsleep(&isp->isp_osinfo.mboxwaiting, PRIBIO, "isp_mboxwaiting", 10 * hz); #endif if (isp->isp_mboxbsy != 0) { isp_prt(isp, ISP_LOGWARN, "Interrupting Mailbox Command (0x%x) Timeout", isp->isp_lastmbxcmd); isp->isp_mboxbsy = 0; } isp->isp_osinfo.mboxwaiting = 0; } else { int j; for (j = 0; j < 60 * 10000; j++) { if (isp_intr(isp) == 0) { USEC_DELAY(500); } if (isp->isp_mboxbsy == 0) { break; } } if (isp->isp_mboxbsy != 0) { isp_prt(isp, ISP_LOGWARN, "Polled Mailbox Command (0x%x) Timeout", isp->isp_lastmbxcmd); } } } static INLINE u_int64_t nanotime_sub(struct timespec *, struct timespec *); static INLINE u_int64_t nanotime_sub(struct timespec *b, struct timespec *a) { u_int64_t elapsed; struct timespec x = *b; timespecsub(&x, a); elapsed = GET_NANOSEC(&x); if (elapsed == 0) elapsed++; return (elapsed); } static INLINE char *strncat(char *, const char *, size_t); static INLINE char * strncat(char *d, const char *s, size_t c) { char *t = d; if (c) { while (*d) d++; while ((*d++ = *s++)) { if (--c == 0) { *d = '\0'; break; } } } return (t); } /* * Common inline functions */ #include #endif /* _ISP_FREEBSD_H */ diff --git a/sys/dev/isp/isp_ioctl.h b/sys/dev/isp/isp_ioctl.h new file mode 100644 index 000000000000..5ed8a91cc49b --- /dev/null +++ b/sys/dev/isp/isp_ioctl.h @@ -0,0 +1,76 @@ +/* $FreeBSD$ */ +/* + * Copyright (c) 2001 by Matthew Jacob + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * Alternatively, this software may be distributed under the terms of the + * the GNU Public License ("GPL", Library, Version 2). + * + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Matthew Jacob #include #include #include #include #include #include #include #include #include #include #include #include #include static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); static int isp_pci_mbxdma __P((struct ispsoftc *)); static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t)); static void isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int16_t)); static void isp_pci_reset1 __P((struct ispsoftc *)); static void isp_pci_dumpregs __P((struct ispsoftc *, const char *)); #ifndef ISP_CODE_ORG #define ISP_CODE_ORG 0x1000 #endif static struct ispmdvec mdvec = { isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_pci_dmateardown, NULL, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_1080 = { isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_pci_dmateardown, NULL, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_12160 = { isp_pci_rd_reg_1080, isp_pci_wr_reg_1080, isp_pci_mbxdma, isp_pci_dmasetup, isp_pci_dmateardown, NULL, isp_pci_reset1, isp_pci_dumpregs, NULL, BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 }; static struct ispmdvec mdvec_2100 = { isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_pci_dmateardown, NULL, isp_pci_reset1, isp_pci_dumpregs }; static struct ispmdvec mdvec_2200 = { isp_pci_rd_reg, isp_pci_wr_reg, isp_pci_mbxdma, isp_pci_dmasetup, isp_pci_dmateardown, NULL, isp_pci_reset1, isp_pci_dumpregs }; #ifndef PCIM_CMD_INVEN #define PCIM_CMD_INVEN 0x10 #endif #ifndef PCIM_CMD_BUSMASTEREN #define PCIM_CMD_BUSMASTEREN 0x0004 #endif #ifndef PCIM_CMD_PERRESPEN #define PCIM_CMD_PERRESPEN 0x0040 #endif #ifndef PCIM_CMD_SEREN #define PCIM_CMD_SEREN 0x0100 #endif #ifndef PCIR_COMMAND #define PCIR_COMMAND 0x04 #endif #ifndef PCIR_CACHELNSZ #define PCIR_CACHELNSZ 0x0c #endif #ifndef PCIR_LATTIMER #define PCIR_LATTIMER 0x0d #endif #ifndef PCIR_ROMADDR #define PCIR_ROMADDR 0x30 #endif #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1020 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1080 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP12160 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1240 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP1280 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2100 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP2200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 #endif #define PCI_QLOGIC_ISP1020 \ ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1080 \ ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP12160 \ ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1240 \ ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP1280 \ ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2100 \ ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) #define PCI_QLOGIC_ISP2200 \ ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) /* * Odd case for some AMI raid cards... We need to *not* attach to this. */ #define AMI_RAID_SUBVENDOR_ID 0x101e #define IO_MAP_REG 0x10 #define MEM_MAP_REG 0x14 #define PCI_DFLT_LTNCY 0x40 #define PCI_DFLT_LNSZ 0x10 static int isp_pci_probe (device_t); static int isp_pci_attach (device_t); struct isp_pcisoftc { struct ispsoftc pci_isp; device_t pci_dev; struct resource * pci_reg; bus_space_tag_t pci_st; bus_space_handle_t pci_sh; void * ih; int16_t pci_poff[_NREG_BLKS]; bus_dma_tag_t parent_dmat; bus_dma_tag_t cntrol_dmat; bus_dmamap_t cntrol_dmap; bus_dmamap_t *dmaps; }; ispfwfunc *isp_get_firmware_p = NULL; static device_method_t isp_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isp_pci_probe), DEVMETHOD(device_attach, isp_pci_attach), { 0, 0 } }; static void isp_pci_intr __P((void *)); static driver_t isp_pci_driver = { "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) }; static devclass_t isp_devclass; DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); MODULE_VERSION(isp, 1); static int isp_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP1020: device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1080: device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1240: device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP1280: device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP12160: if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { return (ENXIO); } device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); break; case PCI_QLOGIC_ISP2100: device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); break; case PCI_QLOGIC_ISP2200: device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); break; default: return (ENXIO); } if (device_get_unit(dev) == 0 && bootverbose) { printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " "Core Version %d.%d\n", ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); } /* * XXXX: Here is where we might load the f/w module * XXXX: (or increase a reference count to it). */ return (0); } static int isp_pci_attach(device_t dev) { struct resource *regs, *irq; int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; u_int32_t data, cmd, linesz, psize, basetype; struct isp_pcisoftc *pcs; struct ispsoftc *isp = NULL; struct ispmdvec *mdvp; bus_size_t lim; char *sptr; -#ifdef ISP_SMPLOCK int locksetup = 0; -#endif /* * Figure out if we're supposed to skip this one. * If we are, we actually go to ISP_ROLE_NONE. */ tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { device_printf(dev, "device is disabled\n"); /* but return 0 so the !$)$)*!$*) unit isn't reused */ return (0); } role = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &role) == 0 && ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { device_printf(dev, "setting role to 0x%x\n", role); } else { #ifdef ISP_TARGET_MODE role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; #else role = ISP_DEFAULT_ROLES; #endif } pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcs == NULL) { device_printf(dev, "cannot allocate softc\n"); return (ENOMEM); } /* * Figure out which we should try first - memory mapping or i/o mapping? */ #ifdef __alpha__ m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; #else m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; #endif tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; } linesz = PCI_DFLT_LNSZ; irq = regs = NULL; rgd = rtp = iqd = 0; cmd = pci_read_config(dev, PCIR_COMMAND, 1); if (cmd & m1) { rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); } if (regs == NULL && (cmd & m2)) { rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); } if (regs == NULL) { device_printf(dev, "unable to map any ports\n"); goto bad; } if (bootverbose) device_printf(dev, "using %s space register mapping\n", (rgd == IO_MAP_REG)? "I/O" : "Memory"); pcs->pci_dev = dev; pcs->pci_reg = regs; pcs->pci_st = rman_get_bustag(regs); pcs->pci_sh = rman_get_bushandle(regs); pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; mdvp = &mdvec; basetype = ISP_HA_SCSI_UNKNOWN; psize = sizeof (sdparam); lim = BUS_SPACE_MAXSIZE_32BIT; if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { mdvp = &mdvec; basetype = ISP_HA_SCSI_UNKNOWN; psize = sizeof (sdparam); lim = BUS_SPACE_MAXSIZE_24BIT; } if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { mdvp = &mdvec_1080; basetype = ISP_HA_SCSI_1080; psize = sizeof (sdparam); pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; } if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { mdvp = &mdvec_1080; basetype = ISP_HA_SCSI_1240; psize = 2 * sizeof (sdparam); pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; } if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { mdvp = &mdvec_1080; basetype = ISP_HA_SCSI_1280; psize = 2 * sizeof (sdparam); pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; } if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { mdvp = &mdvec_12160; basetype = ISP_HA_SCSI_12160; psize = 2 * sizeof (sdparam); pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; } if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { mdvp = &mdvec_2100; basetype = ISP_HA_FC_2100; psize = sizeof (fcparam); pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; if (pci_get_revid(dev) < 3) { /* * XXX: Need to get the actual revision * XXX: number of the 2100 FB. At any rate, * XXX: lower cache line size for early revision * XXX; boards. */ linesz = 1; } } if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { mdvp = &mdvec_2200; basetype = ISP_HA_FC_2200; psize = sizeof (fcparam); pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; } isp = &pcs->pci_isp; isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); if (isp->isp_param == NULL) { device_printf(dev, "cannot allocate parameter data\n"); goto bad; } isp->isp_mdvec = mdvp; isp->isp_type = basetype; isp->isp_revision = pci_get_revid(dev); isp->isp_role = role; isp->isp_dev = dev; /* * Try and find firmware for this device. */ if (isp_get_firmware_p) { int device = (int) pci_get_device(dev); #ifdef ISP_TARGET_MODE (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); #else (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); #endif } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER * are set. */ cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; pci_write_config(dev, PCIR_COMMAND, cmd, 1); /* * Make sure the Cache Line Size register is set sensibly. */ data = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (data != linesz) { data = PCI_DFLT_LNSZ; isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); pci_write_config(dev, PCIR_CACHELNSZ, data, 1); } /* * Make sure the Latency Timer is sane. */ data = pci_read_config(dev, PCIR_LATTIMER, 1); if (data < PCI_DFLT_LTNCY) { data = PCI_DFLT_LTNCY; isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); pci_write_config(dev, PCIR_LATTIMER, data, 1); } /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_ROMADDR, 4); data &= ~1; pci_write_config(dev, PCIR_ROMADDR, data, 4); if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 255, lim, 0, &pcs->parent_dmat) != 0) { device_printf(dev, "could not create master dma tag\n"); free(isp->isp_param, M_DEVBUF); free(pcs, M_DEVBUF); return (ENXIO); } iqd = 0; irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NORELOAD; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_NONVRAM; } tval = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) { isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; } sptr = 0; if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", &sptr) == 0 && sptr != 0) { if (strcmp(sptr, "lport") == 0) { isp->isp_confopts |= ISP_CFG_LPORT; } else if (strcmp(sptr, "nport") == 0) { isp->isp_confopts |= ISP_CFG_NPORT; } else if (strcmp(sptr, "lport-only") == 0) { isp->isp_confopts |= ISP_CFG_LPORT_ONLY; } else if (strcmp(sptr, "nport-only") == 0) { isp->isp_confopts |= ISP_CFG_NPORT_ONLY; } } /* * Because the resource_*_value functions can neither return * 64 bit integer values, nor can they be directly coerced * to interpret the right hand side of the assignment as * you want them to interpret it, we have to force WWN * hint replacement to specify WWN strings with a leading * 'w' (e..g w50000000aaaa0001). Sigh. */ sptr = 0; tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { device_printf(dev, "mangled portwwn hint '%s'\n", sptr); isp->isp_osinfo.default_port_wwn = 0; } else { isp->isp_confopts |= ISP_CFG_OWNWWN; } } if (isp->isp_osinfo.default_port_wwn == 0) { isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; } sptr = 0; tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", &sptr); if (tval == 0 && sptr != 0 && *sptr++ == 'w') { char *eptr = 0; isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); isp->isp_osinfo.default_node_wwn = 0; } else { isp->isp_confopts |= ISP_CFG_OWNWWN; } } if (isp->isp_osinfo.default_node_wwn == 0) { isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; } isp_debug = 0; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &isp_debug); -#ifdef ISP_SMPLOCK /* Make sure the lock is set up. */ mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF); locksetup++; - if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY, - isp_pci_intr, isp, &pcs->ih)) { - device_printf(dev, "could not setup interrupt\n"); - goto bad; - } +#ifdef ISP_SMPLOCK +#define INTR_FLAGS INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY #else - if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_ENTROPY, - isp_pci_intr, isp, &pcs->ih)) { +#define INTR_FLAGS INTR_TYPE_CAM | INTR_ENTROPY +#endif + if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } -#endif /* * Set up logging levels. */ if (isp_debug) { isp->isp_dblev = isp_debug; } else { isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; } if (bootverbose) isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; /* * Make sure we're in reset state. */ ISP_LOCK(isp); isp_reset(isp); if (isp->isp_state != ISP_RESETSTATE) { ISP_UNLOCK(isp); goto bad; } isp_init(isp); if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } isp_attach(isp); if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { isp_uninit(isp); ISP_UNLOCK(isp); goto bad; } /* * XXXX: Here is where we might unload the f/w module * XXXX: (or decrease the reference count to it). */ ISP_UNLOCK(isp); return (0); bad: if (pcs && pcs->ih) { (void) bus_teardown_intr(dev, irq, pcs->ih); } -#ifdef ISP_SMPLOCK if (locksetup && isp) { mtx_destroy(&isp->isp_osinfo.lock); } -#endif if (irq) { (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); } if (regs) { (void) bus_release_resource(dev, rtp, rgd, regs); } if (pcs) { if (pcs->pci_isp.isp_param) free(pcs->pci_isp.isp_param, M_DEVBUF); free(pcs, M_DEVBUF); } /* * XXXX: Here is where we might unload the f/w module * XXXX: (or decrease the reference count to it). */ return (ENXIO); } static void isp_pci_intr(void *arg) { struct ispsoftc *isp = arg; ISP_LOCK(isp); (void) isp_intr(isp); ISP_UNLOCK(isp); } static u_int16_t isp_pci_rd_reg(isp, regoff) struct ispsoftc *isp; int regoff; { u_int16_t rv; struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; int offset, oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = isp_pci_rd_reg(isp, BIU_CONF1); isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); } offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { isp_pci_wr_reg(isp, BIU_CONF1, oldconf); } return (rv); } static void isp_pci_wr_reg(isp, regoff, val) struct ispsoftc *isp; int regoff; u_int16_t val; { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; int offset, oldconf = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { /* * We will assume that someone has paused the RISC processor. */ oldconf = isp_pci_rd_reg(isp, BIU_CONF1); isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); } offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { isp_pci_wr_reg(isp, BIU_CONF1, oldconf); } } static u_int16_t isp_pci_rd_reg_1080(isp, regoff) struct ispsoftc *isp; int regoff; { u_int16_t rv, oc = 0; struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; int offset; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { u_int16_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = isp_pci_rd_reg(isp, BIU_CONF1); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; isp_pci_wr_reg(isp, BIU_CONF1, tc); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = isp_pci_rd_reg(isp, BIU_CONF1); isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); } offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); if (oc) { isp_pci_wr_reg(isp, BIU_CONF1, oc); } return (rv); } static void isp_pci_wr_reg_1080(isp, regoff, val) struct ispsoftc *isp; int regoff; u_int16_t val; { struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; int offset, oc = 0; if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { u_int16_t tc; /* * We will assume that someone has paused the RISC processor. */ oc = isp_pci_rd_reg(isp, BIU_CONF1); tc = oc & ~BIU_PCI1080_CONF1_DMA; if (regoff & SXP_BANK1_SELECT) tc |= BIU_PCI1080_CONF1_SXP1; else tc |= BIU_PCI1080_CONF1_SXP0; isp_pci_wr_reg(isp, BIU_CONF1, tc); } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { oc = isp_pci_rd_reg(isp, BIU_CONF1); isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); } offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; offset += (regoff & 0xff); bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); if (oc) { isp_pci_wr_reg(isp, BIU_CONF1, oc); } } static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); struct imush { struct ispsoftc *isp; int error; }; static void isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; } else { imushp->isp->isp_rquest_dma = segs->ds_addr; } } static void isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; } else { imushp->isp->isp_result_dma = segs->ds_addr; } } static void isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct imush *imushp = (struct imush *) arg; if (error) { imushp->error = error; } else { fcparam *fcp = imushp->isp->isp_param; fcp->isp_scdma = segs->ds_addr; } } static int isp_pci_mbxdma(struct ispsoftc *isp) { struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; caddr_t base; u_int32_t len; int i, error; bus_size_t lim; struct imush im; /* * Already been here? If so, leave... */ if (isp->isp_rquest) { return (0); } len = sizeof (XS_T **) * isp->isp_maxcmds; isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (isp->isp_xflist == NULL) { isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); return (1); } len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); if (pci->dmaps == NULL) { isp_prt(isp, ISP_LOGERR, "can't alloc dma maps"); free(isp->isp_xflist, M_DEVBUF); return (1); } if (IS_FC(isp) || IS_ULTRA2(isp)) lim = BUS_SPACE_MAXADDR + 1; else lim = BUS_SPACE_MAXADDR_24BIT + 1; /* * Allocate and map the request, result queues, plus FC scratch area. */ len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); if (IS_FC(isp)) { len += ISP2100_SCRLEN; } if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); return (1); } if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); return (1); } isp->isp_rquest = base; im.isp = isp; im.error = 0; bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading dma map for DMA request queue", im.error); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); isp->isp_rquest = NULL; return (1); } isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); im.error = 0; bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading dma map for DMA result queue", im.error); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); isp->isp_rquest = NULL; return (1); } for (i = 0; i < isp->isp_maxcmds; i++) { error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); if (error) { isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); isp->isp_rquest = NULL; return (1); } } if (IS_FC(isp)) { fcparam *fcp = (fcparam *) isp->isp_param; fcp->isp_scratch = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) + ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); im.error = 0; bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); if (im.error) { isp_prt(isp, ISP_LOGERR, "error %d loading FC scratch area", im.error); free(isp->isp_xflist, M_DEVBUF); free(pci->dmaps, M_DEVBUF); isp->isp_rquest = NULL; return (1); } } return (0); } typedef struct { struct ispsoftc *isp; void *cmd_token; void *rq; u_int16_t *iptrp; u_int16_t optr; u_int error; } mush_t; #define MUSHERR_NOQENTRIES -2 #ifdef ISP_TARGET_MODE /* * We need to handle DMA for target mode differently from initiator mode. * * DMA mapping and construction and submission of CTIO Request Entries * and rendevous for completion are very tightly coupled because we start * out by knowing (per platform) how much data we have to move, but we * don't know, up front, how many DMA mapping segments will have to be used * cover that data, so we don't know how many CTIO Request Entries we * will end up using. Further, for performance reasons we may want to * (on the last CTIO for Fibre Channel), send status too (if all went well). * * The standard vector still goes through isp_pci_dmasetup, but the callback * for the DMA mapping routines comes here instead with the whole transfer * mapped and a pointer to a partially filled in already allocated request * queue entry. We finish the job. */ static void tdma_mk(void *, bus_dma_segment_t *, int, int); static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); #define STATUS_WITH_DATA 1 static void tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; struct ccb_scsiio *csio; struct isp_pcisoftc *pci; bus_dmamap_t *dp; u_int8_t scsi_status; ct_entry_t *cto; u_int16_t handle; u_int32_t totxfr, sflags; int nctios, send_status; int32_t resid; int i, j; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; cto = mp->rq; cto->ct_xfrlen = 0; cto->ct_seg_count = 0; cto->ct_header.rqs_entry_count = 1; MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); if (nseg == 0) { cto->ct_header.rqs_seqno = 1; isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, cto->ct_tag_val, cto->ct_flags, cto->ct_status, cto->ct_scsi_status, cto->ct_resid); ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto); ISP_SWIZ_CTIO(mp->isp, cto, cto); return; } nctios = nseg / ISP_RQDSEG; if (nseg % ISP_RQDSEG) { nctios++; } /* * Check to see that we don't overflow. */ for (i = 0, j = *mp->iptrp; i < nctios; i++) { j = ISP_NXT_QENTRY(j, RQUEST_QUEUE_LEN(isp)); if (j == mp->optr) { isp_prt(mp->isp, ISP_LOGWARN, "Request Queue Overflow [tdma_mk]"); mp->error = MUSHERR_NOQENTRIES; return; } } /* * Save syshandle, and potentially any SCSI status, which we'll * reinsert on the last CTIO we're going to send. */ handle = cto->ct_syshandle; cto->ct_syshandle = 0; cto->ct_header.rqs_seqno = 0; send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; if (send_status) { sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); /* * Preserve residual. */ resid = cto->ct_resid; /* * Save actual SCSI status. */ scsi_status = cto->ct_scsi_status; #ifndef STATUS_WITH_DATA sflags |= CT_NO_DATA; /* * We can't do a status at the same time as a data CTIO, so * we need to synthesize an extra CTIO at this level. */ nctios++; #endif } else { sflags = scsi_status = resid = 0; } totxfr = cto->ct_resid = 0; cto->ct_scsi_status = 0; pci = (struct isp_pcisoftc *)mp->isp; dp = &pci->dmaps[isp_handle_index(handle)]; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); } while (nctios--) { int seglim; seglim = nseg; if (seglim) { int seg; if (seglim > ISP_RQDSEG) seglim = ISP_RQDSEG; for (seg = 0; seg < seglim; seg++, nseg--) { /* * Unlike normal initiator commands, we don't * do any swizzling here. */ cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; cto->ct_xfrlen += dm_segs->ds_len; totxfr += dm_segs->ds_len; dm_segs++; } cto->ct_seg_count = seg; } else { /* * This case should only happen when we're sending an * extra CTIO with final status. */ if (send_status == 0) { isp_prt(mp->isp, ISP_LOGWARN, "tdma_mk ran out of segments"); mp->error = EINVAL; return; } } /* * At this point, the fields ct_lun, ct_iid, ct_tagval, * ct_tagtype, and ct_timeout have been carried over * unchanged from what our caller had set. * * The dataseg fields and the seg_count fields we just got * through setting. The data direction we've preserved all * along and only clear it if we're now sending status. */ if (nctios == 0) { /* * We're the last in a sequence of CTIOs, so mark * this CTIO and save the handle to the CCB such that * when this CTIO completes we can free dma resources * and do whatever else we need to do to finish the * rest of the command. We *don't* give this to the * firmware to work on- the caller will do that. */ cto->ct_syshandle = handle; cto->ct_header.rqs_seqno = 1; if (send_status) { cto->ct_scsi_status = scsi_status; cto->ct_flags |= sflags; cto->ct_resid = resid; } if (send_status) { isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO[%x] lun%d iid %d tag %x ct_flags %x " "scsi status %x resid %d", cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, cto->ct_tag_val, cto->ct_flags, cto->ct_scsi_status, cto->ct_resid); } else { isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, cto->ct_tag_val, cto->ct_flags); } ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto); ISP_SWIZ_CTIO(mp->isp, cto, cto); } else { ct_entry_t *octo = cto; /* * Make sure syshandle fields are clean */ cto->ct_syshandle = 0; cto->ct_header.rqs_seqno = 0; isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO[%x] lun%d for ID%d ct_flags 0x%x", cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags); ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto); /* * Get a new CTIO */ cto = (ct_entry_t *) ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); j = *mp->iptrp; *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); if (*mp->iptrp == mp->optr) { isp_prt(mp->isp, ISP_LOGTDEBUG0, "Queue Overflow in tdma_mk"); mp->error = MUSHERR_NOQENTRIES; return; } /* * Fill in the new CTIO with info from the old one. */ cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_fwhandle = octo->ct_fwhandle; cto->ct_header.rqs_flags = 0; cto->ct_lun = octo->ct_lun; cto->ct_iid = octo->ct_iid; cto->ct_reserved2 = octo->ct_reserved2; cto->ct_tgt = octo->ct_tgt; cto->ct_flags = octo->ct_flags; cto->ct_status = 0; cto->ct_scsi_status = 0; cto->ct_tag_val = octo->ct_tag_val; cto->ct_tag_type = octo->ct_tag_type; cto->ct_xfrlen = 0; cto->ct_resid = 0; cto->ct_timeout = octo->ct_timeout; cto->ct_seg_count = 0; MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); /* * Now swizzle the old one for the consumption * of the chip and give it to the firmware to * work on while we do the next. */ ISP_SWIZ_CTIO(mp->isp, octo, octo); ISP_ADD_REQUEST(mp->isp, j); } } } static void tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; struct ccb_scsiio *csio; struct isp_pcisoftc *pci; bus_dmamap_t *dp; ct2_entry_t *cto; u_int16_t scsi_status, send_status, send_sense, handle; u_int32_t totxfr, datalen; u_int8_t sense[QLTM_SENSELEN]; - int nctios; + int nctios, j; mp = (mush_t *) arg; if (error) { mp->error = error; return; } csio = mp->cmd_token; cto = mp->rq; if (nseg == 0) { if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { isp_prt(mp->isp, ISP_LOGWARN, "dma2_tgt_fc, a status CTIO2 without MODE1 " "set (0x%x)", cto->ct_flags); mp->error = EINVAL; return; } cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno = 1; /* ct_syshandle contains the handle set by caller */ /* * We preserve ct_lun, ct_iid, ct_rxid. We set the data * flags to NO DATA and clear relative offset flags. * We preserve the ct_resid and the response area. */ cto->ct_flags |= CT2_NO_DATA; if (cto->ct_resid > 0) - cto->ct_flags |= CT2_DATA_UNDER; + cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; else if (cto->ct_resid < 0) - cto->ct_flags |= CT2_DATA_OVER; + cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; cto->ct_seg_count = 0; cto->ct_reloff = 0; ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto); isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, cto->ct_resid); ISP_SWIZ_CTIO2(isp, cto, cto); return; } if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { isp_prt(mp->isp, ISP_LOGWARN, "dma2_tgt_fc, a data CTIO2 without MODE0 set " "(0x%x)", cto->ct_flags); mp->error = EINVAL; return; } nctios = nseg / ISP_RQDSEG_T2; if (nseg % ISP_RQDSEG_T2) { nctios++; } /* * Save the handle, status, reloff, and residual. We'll reinsert the * handle into the last CTIO2 we're going to send, and reinsert status * and residual (and possibly sense data) if that's to be sent as well. * * We preserve ct_reloff and adjust it for each data CTIO2 we send past * the first one. This is needed so that the FCP DATA IUs being sent * out have the correct offset (they can arrive at the other end out * of order). */ handle = cto->ct_syshandle; cto->ct_syshandle = 0; + send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0; - if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) { - cto->ct_flags &= ~CT2_SENDSTATUS; + if (send_status) { + cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR); /* * Preserve residual, which is actually the total count. */ datalen = cto->ct_resid; /* * Save actual SCSI status. We'll reinsert the * CT2_SNSLEN_VALID later if appropriate. */ scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; /* * If we're sending status and have a CHECK CONDTION and * have sense data, we send one more CTIO2 with just the * status and sense data. The upper layers have stashed * the sense data in the dataseg structure for us. */ if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && send_sense) { bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); nctios++; } } else { scsi_status = send_sense = datalen = 0; } totxfr = cto->ct_resid = 0; cto->rsp.m0.ct_scsi_status = 0; - bzero(&cto->rsp, sizeof (cto->rsp)); + MEMZERO(&cto->rsp, sizeof (cto->rsp)); pci = (struct isp_pcisoftc *)mp->isp; dp = &pci->dmaps[isp_handle_index(handle)]; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); } while (nctios--) { int seg, seglim; seglim = nseg; if (seglim) { if (seglim > ISP_RQDSEG_T2) seglim = ISP_RQDSEG_T2; for (seg = 0; seg < seglim; seg++) { cto->rsp.m0.ct_dataseg[seg].ds_base = dm_segs->ds_addr; cto->rsp.m0.ct_dataseg[seg].ds_count = dm_segs->ds_len; cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; totxfr += dm_segs->ds_len; dm_segs++; } cto->ct_seg_count = seg; } else { /* * This case should only happen when we're sending a * synthesized MODE1 final status with sense data. */ if (send_sense == 0) { isp_prt(mp->isp, ISP_LOGWARN, "dma2_tgt_fc ran out of segments, " "no SENSE DATA"); mp->error = EINVAL; return; } } /* * At this point, the fields ct_lun, ct_iid, ct_rxid, * ct_timeout have been carried over unchanged from what * our caller had set. * * The field ct_reloff is either what the caller set, or * what we've added to below. * * The dataseg fields and the seg_count fields we just got * through setting. The data direction we've preserved all * along and only clear it if we're sending a MODE1 status * as the last CTIO. * */ if (nctios == 0) { /* * We're the last in a sequence of CTIO2s, so mark this * CTIO2 and save the handle to the CCB such that when * this CTIO2 completes we can free dma resources and * do whatever else we need to do to finish the rest * of the command. */ cto->ct_syshandle = handle; cto->ct_header.rqs_seqno = 1; if (send_status) { + /* + * Get 'real' residual and set flags based + * on it. + */ + cto->ct_resid = datalen - totxfr; if (send_sense) { - bcopy(sense, cto->rsp.m1.ct_resp, + MEMCPY(cto->rsp.m1.ct_resp, sense, QLTM_SENSELEN); cto->rsp.m1.ct_senselen = QLTM_SENSELEN; scsi_status |= CT2_SNSLEN_VALID; cto->rsp.m1.ct_scsi_status = scsi_status; cto->ct_flags &= CT2_FLAG_MMASK; cto->ct_flags |= CT2_FLAG_MODE1 | - CT2_NO_DATA| CT2_SENDSTATUS; + CT2_NO_DATA | CT2_SENDSTATUS | + CT2_CCINCR; + if (cto->ct_resid > 0) + cto->rsp.m1.ct_scsi_status |= + CT2_DATA_UNDER; + else if (cto->ct_resid < 0) + cto->rsp.m1.ct_scsi_status |= + CT2_DATA_OVER; } else { cto->rsp.m0.ct_scsi_status = scsi_status; - cto->ct_flags |= CT2_SENDSTATUS; + cto->ct_flags |= + CT2_SENDSTATUS | CT2_CCINCR; + if (cto->ct_resid > 0) + cto->rsp.m0.ct_scsi_status |= + CT2_DATA_UNDER; + else if (cto->ct_resid < 0) + cto->rsp.m0.ct_scsi_status |= + CT2_DATA_OVER; } - /* - * Get 'real' residual and set flags based - * on it. - */ - cto->ct_resid = datalen - totxfr; - if (cto->ct_resid > 0) - cto->ct_flags |= CT2_DATA_UNDER; - else if (cto->ct_resid < 0) - cto->ct_flags |= CT2_DATA_OVER; } ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto); isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x" " ssts 0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, cto->ct_resid); ISP_SWIZ_CTIO2(isp, cto, cto); } else { ct2_entry_t *octo = cto; /* * Make sure handle fields are clean */ cto->ct_syshandle = 0; cto->ct_header.rqs_seqno = 0; ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto); isp_prt(mp->isp, ISP_LOGTDEBUG1, "CTIO2[%x] lun %d->iid%d flgs 0x%x", cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, cto->ct_flags); /* * Get a new CTIO2 */ cto = (ct2_entry_t *) ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); + j = *mp->iptrp; *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); if (*mp->iptrp == mp->optr) { isp_prt(mp->isp, ISP_LOGWARN, "Queue Overflow in dma2_tgt_fc"); mp->error = MUSHERR_NOQENTRIES; return; } /* * Fill in the new CTIO2 with info from the old one. */ cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_flags = 0; /* ct_header.rqs_seqno && ct_syshandle done later */ cto->ct_fwhandle = octo->ct_fwhandle; cto->ct_lun = octo->ct_lun; cto->ct_iid = octo->ct_iid; cto->ct_rxid = octo->ct_rxid; cto->ct_flags = octo->ct_flags; cto->ct_status = 0; cto->ct_resid = 0; cto->ct_timeout = octo->ct_timeout; cto->ct_seg_count = 0; /* * Adjust the new relative offset by the amount which * is recorded in the data segment of the old CTIO2 we * just finished filling out. */ cto->ct_reloff += octo->rsp.m0.ct_xfrlen; - bzero(&cto->rsp, sizeof (cto->rsp)); - ISP_SWIZ_CTIO2(isp, cto, cto); + MEMZERO(&cto->rsp, sizeof (cto->rsp)); + ISP_SWIZ_CTIO2(isp, octo, octo); + ISP_ADD_REQUEST(mp->isp, j); } } } #endif static void dma2 __P((void *, bus_dma_segment_t *, int, int)); static void dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { mush_t *mp; struct ccb_scsiio *csio; struct isp_pcisoftc *pci; bus_dmamap_t *dp; bus_dma_segment_t *eseg; ispreq_t *rq; ispcontreq_t *crq; int seglim, datalen; mp = (mush_t *) arg; if (error) { mp->error = error; return; } if (nseg < 1) { isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); mp->error = EFAULT; return; } csio = mp->cmd_token; rq = mp->rq; pci = (struct isp_pcisoftc *)mp->isp; dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); } datalen = XS_XFRLEN(csio); /* * We're passed an initial partially filled in entry that * has most fields filled in except for data transfer * related values. * * Our job is to fill in the initial request queue entry and * then to start allocating and filling in continuation entries * until we've covered the entire transfer. */ if (IS_FC(mp->isp)) { seglim = ISP_RQDSEG_T2; ((ispreqt2_t *)rq)->req_totalcnt = datalen; if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; } else { ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; } } else { if (csio->cdb_len > 12) { seglim = 0; } else { seglim = ISP_RQDSEG; } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { rq->req_flags |= REQFLAG_DATA_IN; } else { rq->req_flags |= REQFLAG_DATA_OUT; } } eseg = dm_segs + nseg; while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { if (IS_FC(mp->isp)) { ispreqt2_t *rq2 = (ispreqt2_t *)rq; rq2->req_dataseg[rq2->req_seg_count].ds_base = dm_segs->ds_addr; rq2->req_dataseg[rq2->req_seg_count].ds_count = dm_segs->ds_len; } else { rq->req_dataseg[rq->req_seg_count].ds_base = dm_segs->ds_addr; rq->req_dataseg[rq->req_seg_count].ds_count = dm_segs->ds_len; } datalen -= dm_segs->ds_len; #if 0 if (IS_FC(mp->isp)) { ispreqt2_t *rq2 = (ispreqt2_t *)rq; device_printf(mp->isp->isp_dev, "seg0[%d] cnt 0x%x paddr 0x%08x\n", rq->req_seg_count, rq2->req_dataseg[rq2->req_seg_count].ds_count, rq2->req_dataseg[rq2->req_seg_count].ds_base); } else { device_printf(mp->isp->isp_dev, "seg0[%d] cnt 0x%x paddr 0x%08x\n", rq->req_seg_count, rq->req_dataseg[rq->req_seg_count].ds_count, rq->req_dataseg[rq->req_seg_count].ds_base); } #endif rq->req_seg_count++; dm_segs++; } while (datalen > 0 && dm_segs != eseg) { crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); if (*mp->iptrp == mp->optr) { isp_prt(mp->isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); mp->error = MUSHERR_NOQENTRIES; return; } rq->req_header.rqs_entry_count++; bzero((void *)crq, sizeof (*crq)); crq->req_header.rqs_entry_count = 1; crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; seglim = 0; while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { crq->req_dataseg[seglim].ds_base = dm_segs->ds_addr; crq->req_dataseg[seglim].ds_count = dm_segs->ds_len; #if 0 device_printf(mp->isp->isp_dev, "seg%d[%d] cnt 0x%x paddr 0x%08x\n", rq->req_header.rqs_entry_count-1, seglim, crq->req_dataseg[seglim].ds_count, crq->req_dataseg[seglim].ds_base); #endif rq->req_seg_count++; dm_segs++; seglim++; datalen -= dm_segs->ds_len; } } } static int isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, u_int16_t *iptrp, u_int16_t optr) { struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; bus_dmamap_t *dp = NULL; mush_t mush, *mp; void (*eptr) __P((void *, bus_dma_segment_t *, int, int)); #ifdef ISP_TARGET_MODE if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { if (IS_FC(isp)) { eptr = tdma_mkfc; } else { eptr = tdma_mk; } if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { mp = &mush; mp->isp = isp; mp->cmd_token = csio; mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ mp->iptrp = iptrp; mp->optr = optr; mp->error = 0; (*eptr)(mp, NULL, 0, 0); goto exit; } } else #endif eptr = dma2; /* * NB: if we need to do request queue entry swizzling, * NB: this is where it would need to be done for cmds * NB: that move no data. For commands that move data, * NB: swizzling would take place in those functions. */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { rq->req_seg_count = 1; return (CMD_QUEUED); } /* * Do a virtual grapevine step to collect info for * the callback dma allocation that we have to use... */ mp = &mush; mp->isp = isp; mp->cmd_token = csio; mp->rq = rq; mp->iptrp = iptrp; mp->optr = optr; mp->error = 0; if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { int error, s; dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; s = splsoftvm(); error = bus_dmamap_load(pci->parent_dmat, *dp, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); if (error == EINPROGRESS) { bus_dmamap_unload(pci->parent_dmat, *dp); mp->error = EINVAL; isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); } else if (error && mp->error == 0) { #ifdef DIAGNOSTIC isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); #endif mp->error = error; } splx(s); } else { /* Pointer to physical buffer */ struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; (*eptr)(mp, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); mp->error = EINVAL; } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { isp_prt(isp, ISP_LOGERR, "Virtual segment addresses unsupported"); mp->error = EINVAL; } else { /* Just use the segments provided */ segs = (struct bus_dma_segment *) csio->data_ptr; (*eptr)(mp, segs, csio->sglist_cnt, 0); } } #ifdef ISP_TARGET_MODE exit: #endif if (mp->error) { int retval = CMD_COMPLETE; if (mp->error == MUSHERR_NOQENTRIES) { retval = CMD_EAGAIN; } else if (mp->error == EFBIG) { XS_SETERR(csio, CAM_REQ_TOO_BIG); } else if (mp->error == EINVAL) { XS_SETERR(csio, CAM_REQ_INVALID); } else { XS_SETERR(csio, CAM_UNREC_HBA_ERROR); } return (retval); } else { /* * Check to see if we weren't cancelled while sleeping on * getting DMA resources... */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (dp) { bus_dmamap_unload(pci->parent_dmat, *dp); } return (CMD_COMPLETE); } return (CMD_QUEUED); } } static void isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) { struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)]; if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pci->parent_dmat, *dp); } static void isp_pci_reset1(struct ispsoftc *isp) { /* Make sure the BIOS is disabled */ isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); /* and enable interrupts */ ENABLE_INTS(isp); } static void isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) { struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; if (msg) printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); else printf("%s:\n", device_get_nameunit(isp->isp_dev)); if (IS_SCSI(isp)) printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); else printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); if (IS_SCSI(isp)) { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), ISP_READ(isp, CDMA_FIFO_STS)); printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), ISP_READ(isp, DDMA_FIFO_STS)); printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", ISP_READ(isp, SXP_INTERRUPT), ISP_READ(isp, SXP_GROSS_ERR), ISP_READ(isp, SXP_PINS_CTRL)); ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } printf(" mbox regs: %x %x %x %x %x\n", ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), ISP_READ(isp, OUTMAILBOX4)); printf(" PCI Status Command/Status=%x\n", pci_read_config(pci->pci_dev, PCIR_COMMAND, 1)); } diff --git a/sys/dev/isp/isp_target.c b/sys/dev/isp/isp_target.c index 518b4cbbeebb..86ab25b36347 100644 --- a/sys/dev/isp/isp_target.c +++ b/sys/dev/isp/isp_target.c @@ -1,1191 +1,1162 @@ /* $FreeBSD$ */ /* * Machine and OS Independent Target Mode Code for the Qlogic SCSI/FC adapters. * * Copyright (c) 1999, 2000, 2001 by Matthew Jacob * All rights reserved. * mjacob@feral.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include #endif #ifdef __FreeBSD__ #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef ISP_TARGET_MODE static const char atiocope[] = "ATIO returned for lun %d because it was in the middle of Bus Device Reset"; static const char atior[] = "ATIO returned for lun %d from initiator %d because a Bus Reset occurred"; static void isp_got_msg(struct ispsoftc *, int, in_entry_t *); static void isp_got_msg_fc(struct ispsoftc *, int, in_fcentry_t *); static void isp_notify_ack(struct ispsoftc *, void *); static void isp_handle_atio(struct ispsoftc *, at_entry_t *); static void isp_handle_atio2(struct ispsoftc *, at2_entry_t *); static void isp_handle_ctio(struct ispsoftc *, ct_entry_t *); static void isp_handle_ctio2(struct ispsoftc *, ct2_entry_t *); /* * The Qlogic driver gets an interrupt to look at response queue entries. * Some of these are status completions for initiatior mode commands, but * if target mode is enabled, we get a whole wad of response queue entries * to be handled here. * * Basically the split into 3 main groups: Lun Enable/Modification responses, * SCSI Command processing, and Immediate Notification events. * * You start by writing a request queue entry to enable target mode (and * establish some resource limitations which you can modify later). * The f/w responds with a LUN ENABLE or LUN MODIFY response with * the status of this action. If the enable was successful, you can expect... * * Response queue entries with SCSI commands encapsulate show up in an ATIO * (Accept Target IO) type- sometimes with enough info to stop the command at * this level. Ultimately the driver has to feed back to the f/w's request * queue a sequence of CTIOs (continue target I/O) that describe data to * be moved and/or status to be sent) and finally finishing with sending * to the f/w's response queue an ATIO which then completes the handshake * with the f/w for that command. There's a lot of variations on this theme, * including flags you can set in the CTIO for the Qlogic 2X00 fibre channel * cards that 'auto-replenish' the f/w's ATIO count, but this is the basic * gist of it. * * The third group that can show up in the response queue are Immediate * Notification events. These include things like notifications of SCSI bus * resets, or Bus Device Reset messages or other messages received. This * a classic oddbins area. It can get a little weird because you then turn * around and acknowledge the Immediate Notify by writing an entry onto the * request queue and then the f/w turns around and gives you an acknowledgement * to *your* acknowledgement on the response queue (the idea being to let * the f/w tell you when the event is *really* over I guess). * */ /* * A new response queue entry has arrived. The interrupt service code * has already swizzled it into the platform dependent from canonical form. * * Because of the way this driver is designed, unfortunately most of the * actual synchronization work has to be done in the platform specific * code- we have no synchroniation primitives in the common code. */ int isp_target_notify(struct ispsoftc *isp, void *vptr, u_int16_t *optrp) { u_int16_t status, seqid; union { at_entry_t *atiop; at2_entry_t *at2iop; ct_entry_t *ctiop; ct2_entry_t *ct2iop; lun_entry_t *lunenp; in_entry_t *inotp; in_fcentry_t *inot_fcp; na_entry_t *nackp; na_fcentry_t *nack_fcp; isphdr_t *hp; void * *vp; #define atiop unp.atiop #define at2iop unp.at2iop #define ctiop unp.ctiop #define ct2iop unp.ct2iop #define lunenp unp.lunenp #define inotp unp.inotp #define inot_fcp unp.inot_fcp #define nackp unp.nackp #define nack_fcp unp.nack_fcp #define hdrp unp.hp } unp; int bus, rval = 0; unp.vp = vptr; ISP_TDQE(isp, "isp_target_notify", (int) *optrp, vptr); switch(hdrp->rqs_entry_type) { case RQSTYPE_ATIO: isp_handle_atio(isp, atiop); break; case RQSTYPE_CTIO: isp_handle_ctio(isp, ctiop); break; case RQSTYPE_ATIO2: isp_handle_atio2(isp, at2iop); break; case RQSTYPE_CTIO2: isp_handle_ctio2(isp, ct2iop); break; case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: (void) isp_async(isp, ISPASYNC_TARGET_ACTION, vptr); break; case RQSTYPE_NOTIFY: /* * Either the ISP received a SCSI message it can't * handle, or it's returning an Immed. Notify entry * we sent. We can send Immed. Notify entries to * increment the firmware's resource count for them * (we set this initially in the Enable Lun entry). */ bus = 0; if (IS_FC(isp)) { status = inot_fcp->in_status; seqid = inot_fcp->in_seqid; } else { status = inotp->in_status & 0xff; seqid = inotp->in_seqid; if (IS_DUALBUS(isp)) { bus = (inotp->in_iid & 0x80) >> 7; inotp->in_iid &= ~0x80; } } isp_prt(isp, ISP_LOGTDEBUG1, "Immediate Notify, status=0x%x seqid=0x%x", status, seqid); switch (status) { case IN_RESET: (void) isp_async(isp, ISPASYNC_BUS_RESET, &bus); break; case IN_MSG_RECEIVED: case IN_IDE_RECEIVED: if (IS_FC(isp)) { isp_got_msg_fc(isp, bus, vptr); } else { isp_got_msg(isp, bus, vptr); } break; case IN_RSRC_UNAVAIL: isp_prt(isp, ISP_LOGWARN, "Firmware out of ATIOs"); break; case IN_ABORT_TASK: isp_prt(isp, ISP_LOGWARN, "Abort Task for Initiator %d RX_ID 0x%x", inot_fcp->in_iid, seqid); break; case IN_PORT_LOGOUT: isp_prt(isp, ISP_LOGWARN, "Port Logout for Initiator %d RX_ID 0x%x", inot_fcp->in_iid, seqid); break; case IN_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "Port Changed for Initiator %d RX_ID 0x%x", inot_fcp->in_iid, seqid); break; case IN_GLOBAL_LOGO: isp_prt(isp, ISP_LOGWARN, "All ports logged out"); break; default: isp_prt(isp, ISP_LOGERR, "bad status (0x%x) in isp_target_notify", status); break; } isp_notify_ack(isp, vptr); break; case RQSTYPE_NOTIFY_ACK: /* * The ISP is acknowledging our acknowledgement of an * Immediate Notify entry for some asynchronous event. */ if (IS_FC(isp)) { isp_prt(isp, ISP_LOGTDEBUG1, "Notify Ack status=0x%x seqid 0x%x", nack_fcp->na_status, nack_fcp->na_seqid); } else { isp_prt(isp, ISP_LOGTDEBUG1, "Notify Ack event 0x%x status=0x%x seqid 0x%x", nackp->na_event, nackp->na_status, nackp->na_seqid); } break; default: isp_prt(isp, ISP_LOGERR, "Unknown entry type 0x%x in isp_target_notify", hdrp->rqs_entry_type); rval = -1; break; } #undef atiop #undef at2iop #undef ctiop #undef ct2iop #undef lunenp #undef inotp #undef inot_fcp #undef nackp #undef nack_fcp #undef hdrp return (rval); } /* * Toggle (on/off) target mode for bus/target/lun * * The caller has checked for overlap and legality. * * Note that not all of bus, target or lun can be paid attention to. * Note also that this action will not be complete until the f/w writes * response entry. The caller is responsible for synchronizing this. */ int isp_lun_cmd(struct ispsoftc *isp, int cmd, int bus, int tgt, int lun, - u_int32_t opaque) + int cmd_cnt, int inot_cnt, u_int32_t opaque) { lun_entry_t el; u_int16_t iptr, optr; void *outp; MEMZERO(&el, sizeof (el)); if (IS_DUALBUS(isp)) { el.le_rsvd = (bus & 0x1) << 7; } - el.le_cmd_count = DFLT_CMD_CNT; - el.le_in_count = DFLT_INOTIFY; + el.le_cmd_count = cmd_cnt; + el.le_in_count = inot_cnt; if (cmd == RQSTYPE_ENABLE_LUN) { if (IS_SCSI(isp)) { el.le_flags = LUN_TQAE|LUN_DISAD; el.le_cdb6len = 12; el.le_cdb7len = 12; } } else if (cmd == -RQSTYPE_ENABLE_LUN) { cmd = RQSTYPE_ENABLE_LUN; el.le_cmd_count = 0; el.le_in_count = 0; } else if (cmd == -RQSTYPE_MODIFY_LUN) { cmd = RQSTYPE_MODIFY_LUN; el.le_ops = LUN_CCDECR | LUN_INDECR; } else { el.le_ops = LUN_CCINCR | LUN_ININCR; } el.le_header.rqs_entry_type = cmd; el.le_header.rqs_entry_count = 1; el.le_reserved = opaque; if (IS_SCSI(isp)) { el.le_tgt = tgt; el.le_lun = lun; } else if (isp->isp_maxluns <= 16) { el.le_lun = lun; } el.le_timeout = 2; if (isp_getrqentry(isp, &iptr, &optr, &outp)) { isp_prt(isp, ISP_LOGWARN, "Request Queue Overflow in isp_lun_cmd"); return (-1); } ISP_SWIZ_ENABLE_LUN(isp, outp, &el); ISP_TDQE(isp, "isp_lun_cmd", (int) optr, &el); ISP_ADD_REQUEST(isp, iptr); return (0); } int isp_target_put_entry(struct ispsoftc *isp, void *ap) { void *outp; u_int16_t iptr, optr; u_int8_t etype = ((isphdr_t *) ap)->rqs_entry_type; if (isp_getrqentry(isp, &iptr, &optr, &outp)) { isp_prt(isp, ISP_LOGWARN, "Request Queue Overflow in isp_target_put_entry"); return (-1); } switch (etype) { case RQSTYPE_ATIO: ISP_SWIZ_ATIO(isp, outp, ap); break; case RQSTYPE_ATIO2: ISP_SWIZ_ATIO2(isp, outp, ap); break; case RQSTYPE_CTIO: ISP_SWIZ_CTIO(isp, outp, ap); break; case RQSTYPE_CTIO2: ISP_SWIZ_CTIO2(isp, outp, ap); break; default: isp_prt(isp, ISP_LOGERR, "Unknown type 0x%x in isp_put_entry", etype); return (-1); } ISP_TDQE(isp, "isp_target_put_entry", (int) optr, ap);; ISP_ADD_REQUEST(isp, iptr); return (0); } int isp_target_put_atio(struct ispsoftc *isp, void *arg) { union { at_entry_t _atio; at2_entry_t _atio2; } atun; MEMZERO(&atun, sizeof atun); if (IS_FC(isp)) { at2_entry_t *aep = arg; atun._atio2.at_header.rqs_entry_type = RQSTYPE_ATIO2; atun._atio2.at_header.rqs_entry_count = 1; if (isp->isp_maxluns > 16) { atun._atio2.at_scclun = (u_int16_t) aep->at_scclun; } else { atun._atio2.at_lun = (u_int8_t) aep->at_scclun; } atun._atio2.at_status = CT_OK; } else { at_entry_t *aep = arg; atun._atio.at_header.rqs_entry_type = RQSTYPE_ATIO; atun._atio.at_header.rqs_entry_count = 1; atun._atio.at_handle = aep->at_handle; atun._atio.at_iid = aep->at_iid; atun._atio.at_tgt = aep->at_tgt; atun._atio.at_lun = aep->at_lun; atun._atio.at_tag_type = aep->at_tag_type; atun._atio.at_tag_val = aep->at_tag_val; atun._atio.at_status = (aep->at_flags & AT_TQAE); atun._atio.at_status |= CT_OK; } return (isp_target_put_entry(isp, &atun)); } /* * Command completion- both for handling cases of no resources or * no blackhole driver, or other cases where we have to, inline, * finish the command sanely, or for normal command completion. * * The 'completion' code value has the scsi status byte in the low 8 bits. * If status is a CHECK CONDITION and bit 8 is nonzero, then bits 12..15 have * the sense key and bits 16..23 have the ASCQ and bits 24..31 have the ASC * values. * * NB: the key, asc, ascq, cannot be used for parallel SCSI as it doesn't * NB: inline SCSI sense reporting. As such, we lose this information. XXX. * * For both parallel && fibre channel, we use the feature that does * an automatic resource autoreplenish so we don't have then later do * put of an atio to replenish the f/w's resource count. */ int isp_endcmd(struct ispsoftc *isp, void *arg, u_int32_t code, u_int16_t hdl) { int sts; union { ct_entry_t _ctio; ct2_entry_t _ctio2; } un; MEMZERO(&un, sizeof un); sts = code & 0xff; if (IS_FC(isp)) { at2_entry_t *aep = arg; ct2_entry_t *cto = &un._ctio2; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_iid = aep->at_iid; if (isp->isp_maxluns <= 16) { cto->ct_lun = aep->at_lun; } cto->ct_rxid = aep->at_rxid; cto->rsp.m1.ct_scsi_status = sts & 0xff; cto->ct_flags = CT2_SENDSTATUS | CT2_NO_DATA | CT2_FLAG_MODE1; if (hdl == 0) { cto->ct_flags |= CT2_CCINCR; } if (aep->at_datalen) { cto->ct_resid = aep->at_datalen; - cto->ct_flags |= CT2_DATA_UNDER; + cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if ((sts & 0xff) == SCSI_CHECK && (sts & ECMD_SVALID)) { cto->rsp.m1.ct_resp[0] = 0xf0; cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf; cto->rsp.m1.ct_resp[7] = 8; cto->rsp.m1.ct_resp[12] = (code >> 24) & 0xff; cto->rsp.m1.ct_resp[13] = (code >> 16) & 0xff; cto->rsp.m1.ct_senselen = 16; - cto->ct_flags |= CT2_SNSLEN_VALID; + cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; } cto->ct_syshandle = hdl; } else { at_entry_t *aep = arg; ct_entry_t *cto = &un._ctio; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_fwhandle = aep->at_handle; cto->ct_iid = aep->at_iid; cto->ct_tgt = aep->at_tgt; cto->ct_lun = aep->at_lun; cto->ct_tag_type = aep->at_tag_type; cto->ct_tag_val = aep->at_tag_val; if (aep->at_flags & AT_TQAE) { cto->ct_flags |= CT_TQAE; } cto->ct_flags = CT_SENDSTATUS | CT_NO_DATA; if (hdl == 0) { cto->ct_flags |= CT_CCINCR; } cto->ct_scsi_status = sts; cto->ct_syshandle = hdl; } return (isp_target_put_entry(isp, &un)); } void isp_target_async(struct ispsoftc *isp, int bus, int event) { tmd_event_t evt; tmd_msg_t msg; switch (event) { /* * These three we handle here to propagate an effective bus reset * upstream, but these do not require any immediate notify actions * so we return when done. */ case ASYNC_LIP_OCCURRED: case ASYNC_LOOP_UP: case ASYNC_LOOP_DOWN: evt.ev_bus = bus; evt.ev_event = event; (void) isp_async(isp, ISPASYNC_TARGET_EVENT, &evt); return; case ASYNC_LOOP_RESET: case ASYNC_BUS_RESET: case ASYNC_TIMEOUT_RESET: if (IS_FC(isp)) { return; /* we'll be getting an inotify instead */ } evt.ev_bus = bus; evt.ev_event = event; (void) isp_async(isp, ISPASYNC_TARGET_EVENT, &evt); break; case ASYNC_DEVICE_RESET: /* * Bus Device Reset resets a specific target, so * we pass this as a synthesized message. */ MEMZERO(&msg, sizeof msg); if (IS_FC(isp)) { msg.nt_iid = FCPARAM(isp)->isp_loopid; } else { msg.nt_iid = SDPARAM(isp)->isp_initiator_id; } msg.nt_bus = bus; msg.nt_msg[0] = MSG_BUS_DEV_RESET; (void) isp_async(isp, ISPASYNC_TARGET_MESSAGE, &msg); break; default: isp_prt(isp, ISP_LOGERR, "isp_target_async: unknown event 0x%x", event); break; } if (isp->isp_state == ISP_RUNSTATE) isp_notify_ack(isp, NULL); } /* * Process a received message. * The ISP firmware can handle most messages, there are only * a few that we need to deal with: * - abort: clean up the current command * - abort tag and clear queue */ static void isp_got_msg(struct ispsoftc *isp, int bus, in_entry_t *inp) { u_int8_t status = inp->in_status & ~QLTM_SVALID; if (status == IN_IDE_RECEIVED || status == IN_MSG_RECEIVED) { tmd_msg_t msg; MEMZERO(&msg, sizeof (msg)); msg.nt_bus = bus; msg.nt_iid = inp->in_iid; msg.nt_tgt = inp->in_tgt; msg.nt_lun = inp->in_lun; msg.nt_tagtype = inp->in_tag_type; msg.nt_tagval = inp->in_tag_val; MEMCPY(msg.nt_msg, inp->in_msg, IN_MSGLEN); (void) isp_async(isp, ISPASYNC_TARGET_MESSAGE, &msg); } else { isp_prt(isp, ISP_LOGERR, "unknown immediate notify status 0x%x", inp->in_status); } } /* * Synthesize a message from the task management flags in a FCP_CMND_IU. */ static void isp_got_msg_fc(struct ispsoftc *isp, int bus, in_fcentry_t *inp) { static const char f1[] = "%s from iid %d lun %d seq 0x%x"; static const char f2[] = "unknown %s 0x%x lun %d iid %d task flags 0x%x seq 0x%x\n"; if (inp->in_status != IN_MSG_RECEIVED) { isp_prt(isp, ISP_LOGINFO, f2, "immediate notify status", inp->in_status, inp->in_lun, inp->in_iid, inp->in_task_flags, inp->in_seqid); } else { tmd_msg_t msg; MEMZERO(&msg, sizeof (msg)); msg.nt_bus = bus; msg.nt_iid = inp->in_iid; if (isp->isp_maxluns > 16) { msg.nt_lun = inp->in_scclun; } else { msg.nt_lun = inp->in_lun; } msg.nt_tagval = inp->in_seqid; if (inp->in_task_flags & TASK_FLAGS_ABORT_TASK) { isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK", inp->in_iid, inp->in_lun, inp->in_seqid); msg.nt_msg[0] = MSG_ABORT_TAG; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_TASK_SET) { isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", inp->in_iid, inp->in_lun, inp->in_seqid); msg.nt_msg[0] = MSG_CLEAR_QUEUE; } else if (inp->in_task_flags & TASK_FLAGS_TARGET_RESET) { isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", inp->in_iid, inp->in_lun, inp->in_seqid); msg.nt_msg[0] = MSG_BUS_DEV_RESET; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_ACA) { isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", inp->in_iid, inp->in_lun, inp->in_seqid); /* ???? */ msg.nt_msg[0] = MSG_REL_RECOVERY; } else if (inp->in_task_flags & TASK_FLAGS_TERMINATE_TASK) { isp_prt(isp, ISP_LOGINFO, f1, "TERMINATE TASK", inp->in_iid, inp->in_lun, inp->in_seqid); msg.nt_msg[0] = MSG_TERM_IO_PROC; } else { isp_prt(isp, ISP_LOGWARN, f2, "task flag", inp->in_status, inp->in_lun, inp->in_iid, inp->in_task_flags, inp->in_seqid); } if (msg.nt_msg[0]) { (void) isp_async(isp, ISPASYNC_TARGET_MESSAGE, &msg); } } } static void isp_notify_ack(struct ispsoftc *isp, void *arg) { char storage[QENTRY_LEN]; u_int16_t iptr, optr; void *outp; if (isp_getrqentry(isp, &iptr, &optr, &outp)) { isp_prt(isp, ISP_LOGWARN, "Request Queue Overflow For isp_notify_ack"); return; } MEMZERO(storage, QENTRY_LEN); if (IS_FC(isp)) { na_fcentry_t *na = (na_fcentry_t *) storage; if (arg) { in_fcentry_t *inp = arg; MEMCPY(storage, arg, sizeof (isphdr_t)); na->na_iid = inp->in_iid; if (isp->isp_maxluns > 16) { na->na_lun = inp->in_scclun; } else { na->na_lun = inp->in_lun; } na->na_task_flags = inp->in_task_flags; na->na_seqid = inp->in_seqid; na->na_flags = NAFC_RCOUNT; if (inp->in_status == IN_RESET) { na->na_flags |= NAFC_RST_CLRD; } } else { na->na_flags = NAFC_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; ISP_SWIZ_NOT_ACK_FC(isp, outp, na); } else { na_entry_t *na = (na_entry_t *) storage; if (arg) { in_entry_t *inp = arg; MEMCPY(storage, arg, sizeof (isphdr_t)); na->na_iid = inp->in_iid; na->na_lun = inp->in_lun; na->na_tgt = inp->in_tgt; na->na_seqid = inp->in_seqid; if (inp->in_status == IN_RESET) { na->na_event = NA_RST_CLRD; } } else { na->na_event = NA_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; ISP_SWIZ_NOT_ACK(isp, outp, na); } ISP_TDQE(isp, "isp_notify_ack", (int) optr, storage); ISP_ADD_REQUEST(isp, iptr); } static void isp_handle_atio(struct ispsoftc *isp, at_entry_t *aep) { int lun; lun = aep->at_lun; /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ switch(aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO for disabled lun %d", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO for lun %d because of command count" " overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and no * not increment it. Therefore we should never get * this status here. */ isp_prt(isp, ISP_LOGERR, atiocope, lun); break; case AT_CDB: /* Got a CDB */ case AT_PHASE_ERROR: /* Bus Phase Sequence Error */ /* * Punt to platform specific layer. */ (void) isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along an blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ isp_prt(isp, ISP_LOGWARN, atior, lun, aep->at_iid); break; default: isp_prt(isp, ISP_LOGERR, "Unknown ATIO status 0x%x from initiator %d for lun %d", aep->at_status, aep->at_iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_atio2(struct ispsoftc *isp, at2_entry_t *aep) { int lun; if (isp->isp_maxluns > 16) { lun = aep->at_scclun; } else { lun = aep->at_lun; } /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ switch(aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for disabled lun %d", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for lun %d- command count overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and no * not increment it. Therefore we should never get * this status here. */ isp_prt(isp, ISP_LOGERR, atiocope, lun); break; case AT_CDB: /* Got a CDB */ /* * Punt to platform specific layer. */ (void) isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along an blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ isp_prt(isp, ISP_LOGERR, atior, lun, aep->at_iid); break; default: isp_prt(isp, ISP_LOGERR, "Unknown ATIO2 status 0x%x from initiator %d for lun %d", aep->at_status, aep->at_iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_ctio(struct ispsoftc *isp, ct_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs(isp, ct->ct_syshandle); if (xs == NULL) pl = ISP_LOGALL; } else { xs = NULL; } switch(ct->ct_status & ~QLTM_SVALID) { case CT_OK: /* * There are generally 3 possibilities as to why we'd get * this condition: * We disconnected after receiving a CDB. * We sent or received data. * We sent status & command complete. */ if (ct->ct_flags & CT_SENDSTATUS) { break; } else if ((ct->ct_flags & CT_DATAMASK) == CT_NO_DATA) { /* * Nothing to do in this case. */ isp_prt(isp, pl, "CTIO- iid %d disconnected OK", ct->ct_iid); return; } break; case CT_BDR_MSG: /* * Bus Device Reset message received or the SCSI Bus has * been Reset; the firmware has gone to Bus Free. * * The firmware generates an async mailbox interupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ fmsg = "Bus Device Reset"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) fmsg = "Bus Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) fmsg = "ABORT TASK sent by Initiator"; isp_prt(isp, ISP_LOGWARN, "CTIO destroyed by %s", fmsg); break; case CT_INVAL: /* * CTIO rejected by the firmware due to disabled lun. * "Cannot Happen". */ isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for disabled lun %d", ct->ct_lun); break; case CT_NOPATH: /* * CTIO rejected by the firmware due "no path for the * nondisconnecting nexus specified". This means that * we tried to access the bus while a non-disconnecting * command is in process. */ isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for bad nexus %d/%d/%d", ct->ct_iid, ct->ct_tgt, ct->ct_lun); break; case CT_RSELTMO: fmsg = "Reselection"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) fmsg = "Command"; isp_prt(isp, ISP_LOGERR, "Firmware timed out on %s", fmsg); break; case CT_PANIC: if (fmsg == NULL) fmsg = "Unrecoverable Error"; /*FALLTHROUGH*/ case CT_ERR: if (fmsg == NULL) fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT_PHASE_ERROR: if (fmsg == NULL) fmsg = "Phase Sequence Error"; /*FALLTHROUGH*/ case CT_TERMINATED: if (fmsg == NULL) fmsg = "terminated by TERMINATE TRANSFER"; /*FALLTHROUGH*/ case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; - isp_prt(isp, ISP_LOGERR, "CTIO returned by f/w- %s", fmsg); -#if 0 - if (status & SENSEVALID) { - bcopy((caddr_t) (cep + CTIO_SENSE_OFFSET), - (caddr_t) &cdp->cd_sensedata, - sizeof(scsi_sense_t)); - cdp->cd_flags |= CDF_SENSEVALID; - } -#endif break; default: isp_prt(isp, ISP_LOGERR, "Unknown CTIO status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT_SENDSTATUS) == 0) { isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { - /* - * Final CTIO completed. Release DMA resources and - * notify platform dependent layers. - */ - if (ct->ct_flags & CT_DATAMASK) { + /* + * Final CTIO completed. Release DMA resources and + * notify platform dependent layers. + */ + if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } isp_prt(isp, pl, "final CTIO complete"); /* * The platform layer will destroy the handle if appropriate. */ (void) isp_async(isp, ISPASYNC_TARGET_ACTION, ct); } } static void isp_handle_ctio2(struct ispsoftc *isp, ct2_entry_t *ct) { XS_T *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs(isp, ct->ct_syshandle); if (xs == NULL) pl = ISP_LOGALL; } else { xs = NULL; } switch(ct->ct_status & ~QLTM_SVALID) { + case CT_BUS_ERROR: + isp_prt(isp, ISP_LOGERR, "PCI DMA Bus Error"); + /* FALL Through */ + case CT_DATA_OVER: + case CT_DATA_UNDER: case CT_OK: /* * There are generally 2 possibilities as to why we'd get * this condition: * We sent or received data. * We sent status & command complete. */ break; case CT_BDR_MSG: /* - * Bus Device Reset message received or the SCSI Bus has - * been Reset; the firmware has gone to Bus Free. + * Target Reset function received. * * The firmware generates an async mailbox interupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ - fmsg = "Bus Device Reset"; + fmsg = "TARGET RESET Task Management Function Received"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) - fmsg = "Bus Reset"; + fmsg = "LIP Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) - fmsg = "ABORT TASK sent by Initiator"; + fmsg = "ABORT Task Management Function Received"; isp_prt(isp, ISP_LOGERR, "CTIO2 destroyed by %s", fmsg); break; case CT_INVAL: /* * CTIO rejected by the firmware - invalid data direction. */ isp_prt(isp, ISP_LOGERR, "CTIO2 had wrong data directiond"); break; - case CT_NOPATH: - /* - * CTIO rejected by the firmware due "no path for the - * nondisconnecting nexus specified". This means that - * we tried to access the bus while a non-disconnecting - * command is in process. - */ - isp_prt(isp, ISP_LOGERR, - "Firmware rejected CTIO2 for bad nexus %d->%d", - ct->ct_iid, ct->ct_lun); - break; - case CT_RSELTMO: - fmsg = "Reselection"; + fmsg = "failure to reconnect to initiator"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) - fmsg = "Command"; + fmsg = "command"; isp_prt(isp, ISP_LOGERR, "Firmware timed out on %s", fmsg); break; case CT_ERR: fmsg = "Completed with Error"; /*FALLTHROUGH*/ - case CT_PHASE_ERROR: /* Bus phase sequence error */ - if (fmsg == NULL) - fmsg = "Phase Sequence Error"; - /*FALLTHROUGH*/ - case CT_TERMINATED: - if (fmsg == NULL) - fmsg = "terminated by TERMINATE TRANSFER"; - /*FALLTHROUGH*/ case CT_LOGOUT: if (fmsg == NULL) fmsg = "Port Logout"; /*FALLTHROUGH*/ case CT_PORTNOTAVAIL: if (fmsg == NULL) fmsg = "Port not available"; + case CT_PORTCHANGED: + if (fmsg == NULL) + fmsg = "Port Changed"; case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; - isp_prt(isp, ISP_LOGERR, "CTIO returned by f/w- %s", fmsg); -#if 0 - if (status & SENSEVALID) { - bcopy((caddr_t) (cep + CTIO_SENSE_OFFSET), - (caddr_t) &cdp->cd_sensedata, - sizeof(scsi_sense_t)); - cdp->cd_flags |= CDF_SENSEVALID; - } -#endif break; case CT_INVRXID: /* * CTIO rejected by the firmware because an invalid RX_ID. * Just print a message. */ isp_prt(isp, ISP_LOGERR, "CTIO2 completed with Invalid RX_ID 0x%x", ct->ct_rxid); break; default: isp_prt(isp, ISP_LOGERR, "Unknown CTIO2 status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT_SENDSTATUS) == 0) { isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { + if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { + ISP_DMAFREE(isp, xs, ct->ct_syshandle); + } if (ct->ct_flags & CT_SENDSTATUS) { /* * Sent status and command complete. * * We're now really done with this command, so we * punt to the platform dependent layers because * only there can we do the appropriate command * complete thread synchronization. */ isp_prt(isp, pl, "status CTIO complete"); } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ isp_prt(isp, pl, "data CTIO complete"); - ISP_DMAFREE(isp, xs, ct->ct_syshandle); } (void) isp_async(isp, ISPASYNC_TARGET_ACTION, ct); /* * The platform layer will destroy the handle if appropriate. */ } } #endif diff --git a/sys/dev/isp/isp_target.h b/sys/dev/isp/isp_target.h index fde87ce67ad5..c94bda8593ff 100644 --- a/sys/dev/isp/isp_target.h +++ b/sys/dev/isp/isp_target.h @@ -1,716 +1,720 @@ /* $FreeBSD$ */ /* * Qlogic Target Mode Structure and Flag Definitions * * Copyright (c) 1997, 1998 * Patrick Stirling * pms@psconsult.com * All rights reserved. * * Additional Copyright (c) 1999, 2000, 2001 * Matthew Jacob * mjacob@feral.com * All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _ISP_TARGET_H #define _ISP_TARGET_H /* * Defines for all entry types */ #define QLTM_SVALID 0x80 #define QLTM_SENSELEN 18 /* * Structure for Enable Lun and Modify Lun queue entries */ typedef struct { isphdr_t le_header; u_int32_t le_reserved; u_int8_t le_lun; u_int8_t le_rsvd; u_int8_t le_ops; /* Modify LUN only */ u_int8_t le_tgt; /* Not for FC */ u_int32_t le_flags; /* Not for FC */ u_int8_t le_status; u_int8_t le_reserved2; u_int8_t le_cmd_count; u_int8_t le_in_count; u_int8_t le_cdb6len; /* Not for FC */ u_int8_t le_cdb7len; /* Not for FC */ u_int16_t le_timeout; u_int16_t le_reserved3[20]; } lun_entry_t; /* * le_flags values */ #define LUN_TQAE 0x00000002 /* bit1 Tagged Queue Action Enable */ #define LUN_DSSM 0x01000000 /* bit24 Disable Sending SDP Message */ #define LUN_DISAD 0x02000000 /* bit25 Disable autodisconnect */ #define LUN_DM 0x40000000 /* bit30 Disconnects Mandatory */ /* * le_ops values */ #define LUN_CCINCR 0x01 /* increment command count */ #define LUN_CCDECR 0x02 /* decrement command count */ #define LUN_ININCR 0x40 /* increment immed. notify count */ #define LUN_INDECR 0x80 /* decrement immed. notify count */ /* * le_status values */ #define LUN_OK 0x01 /* we be rockin' */ #define LUN_ERR 0x04 /* request completed with error */ #define LUN_INVAL 0x06 /* invalid request */ #define LUN_NOCAP 0x16 /* can't provide requested capability */ #define LUN_ENABLED 0x3E /* LUN already enabled */ /* * Immediate Notify Entry structure */ #define IN_MSGLEN 8 /* 8 bytes */ #define IN_RSVDLEN 8 /* 8 words */ typedef struct { isphdr_t in_header; u_int32_t in_reserved; u_int8_t in_lun; /* lun */ u_int8_t in_iid; /* initiator */ u_int8_t in_reserved2; u_int8_t in_tgt; /* target */ u_int32_t in_flags; u_int8_t in_status; u_int8_t in_rsvd2; u_int8_t in_tag_val; /* tag value */ u_int8_t in_tag_type; /* tag type */ u_int16_t in_seqid; /* sequence id */ u_int8_t in_msg[IN_MSGLEN]; /* SCSI message bytes */ u_int16_t in_reserved3[IN_RSVDLEN]; u_int8_t in_sense[QLTM_SENSELEN];/* suggested sense data */ } in_entry_t; typedef struct { isphdr_t in_header; u_int32_t in_reserved; u_int8_t in_lun; /* lun */ u_int8_t in_iid; /* initiator */ u_int16_t in_scclun; u_int32_t in_reserved2; u_int16_t in_status; u_int16_t in_task_flags; u_int16_t in_seqid; /* sequence id */ } in_fcentry_t; /* * Values for the in_status field */ #define IN_REJECT 0x0D /* Message Reject message received */ #define IN_RESET 0x0E /* Bus Reset occurred */ #define IN_NO_RCAP 0x16 /* requested capability not available */ #define IN_IDE_RECEIVED 0x33 /* Initiator Detected Error msg received */ #define IN_RSRC_UNAVAIL 0x34 /* resource unavailable */ #define IN_MSG_RECEIVED 0x36 /* SCSI message received */ #define IN_ABORT_TASK 0x20 /* task named in RX_ID is being aborted (FC) */ #define IN_PORT_LOGOUT 0x29 /* port has logged out (FC) */ #define IN_PORT_CHANGED 0x2A /* port changed */ #define IN_GLOBAL_LOGO 0x2E /* all ports logged out */ #define IN_NO_NEXUS 0x3B /* Nexus not established */ /* * Values for the in_task_flags field- should only get one at a time! */ #define TASK_FLAGS_ABORT_TASK (1<<9) #define TASK_FLAGS_CLEAR_TASK_SET (1<<10) #define TASK_FLAGS_TARGET_RESET (1<<13) #define TASK_FLAGS_CLEAR_ACA (1<<14) #define TASK_FLAGS_TERMINATE_TASK (1<<15) #ifndef MSG_ABORT_TAG #define MSG_ABORT_TAG 0x06 #endif #ifndef MSG_CLEAR_QUEUE #define MSG_CLEAR_QUEUE 0x0e #endif #ifndef MSG_BUS_DEV_RESET #define MSG_BUS_DEV_RESET 0x0b #endif #ifndef MSG_REL_RECOVERY #define MSG_REL_RECOVERY 0x10 #endif #ifndef MSG_TERM_IO_PROC #define MSG_TERM_IO_PROC 0x11 #endif /* * Notify Acknowledge Entry structure */ #define NA_RSVDLEN 22 typedef struct { isphdr_t na_header; u_int32_t na_reserved; u_int8_t na_lun; /* lun */ u_int8_t na_iid; /* initiator */ u_int8_t na_reserved2; u_int8_t na_tgt; /* target */ u_int32_t na_flags; u_int8_t na_status; u_int8_t na_event; u_int16_t na_seqid; /* sequence id */ u_int16_t na_reserved3[NA_RSVDLEN]; } na_entry_t; /* * Value for the na_event field */ #define NA_RST_CLRD 0x80 /* Clear an async event notification */ #define NA_OK 0x01 /* Notify Acknowledge Succeeded */ #define NA_INVALID 0x06 /* Invalid Notify Acknowledge */ #define NA2_RSVDLEN 21 typedef struct { isphdr_t na_header; u_int32_t na_reserved; u_int8_t na_lun; /* lun */ u_int8_t na_iid; /* initiator */ u_int16_t na_scclun; u_int16_t na_flags; u_int16_t na_reserved2; u_int16_t na_status; u_int16_t na_task_flags; u_int16_t na_seqid; /* sequence id */ u_int16_t na_reserved3[NA2_RSVDLEN]; } na_fcentry_t; #define NAFC_RCOUNT 0x80 /* increment resource count */ #define NAFC_RST_CLRD 0x20 /* Clear LIP Reset */ /* * Accept Target I/O Entry structure */ #define ATIO_CDBLEN 26 typedef struct { isphdr_t at_header; u_int16_t at_reserved; u_int16_t at_handle; u_int8_t at_lun; /* lun */ u_int8_t at_iid; /* initiator */ u_int8_t at_cdblen; /* cdb length */ u_int8_t at_tgt; /* target */ u_int32_t at_flags; u_int8_t at_status; /* firmware status */ u_int8_t at_scsi_status; /* scsi status */ u_int8_t at_tag_val; /* tag value */ u_int8_t at_tag_type; /* tag type */ u_int8_t at_cdb[ATIO_CDBLEN]; /* received CDB */ u_int8_t at_sense[QLTM_SENSELEN];/* suggested sense data */ } at_entry_t; /* * at_flags values */ #define AT_NODISC 0x00008000 /* disconnect disabled */ #define AT_TQAE 0x00000002 /* Tagged Queue Action enabled */ /* * at_status values */ #define AT_PATH_INVALID 0x07 /* ATIO sent to firmware for disabled lun */ #define AT_RESET 0x0E /* SCSI Bus Reset Occurred */ #define AT_PHASE_ERROR 0x14 /* Bus phase sequence error */ #define AT_NOCAP 0x16 /* Requested capability not available */ #define AT_BDR_MSG 0x17 /* Bus Device Reset msg received */ #define AT_CDB 0x3D /* CDB received */ /* * Macros to create and fetch and test concatenated handle and tag value macros */ #define AT_MAKE_TAGID(tid, aep) \ tid = ((aep)->at_handle << 16); \ if ((aep)->at_flags & AT_TQAE) \ (tid) |= ((aep)->at_tag_val + 1) #define CT_MAKE_TAGID(tid, ct) \ tid = ((ct)->ct_fwhandle << 16); \ if ((ct)->ct_flags & CT_TQAE) \ (tid) |= ((ct)->ct_tag_val + 1) #define AT_HAS_TAG(val) ((val) & 0xffff) #define AT_GET_TAG(val) AT_HAS_TAG(val) - 1 #define AT_GET_HANDLE(val) ((val) >> 16) /* * Accept Target I/O Entry structure, Type 2 */ #define ATIO2_CDBLEN 16 typedef struct { isphdr_t at_header; u_int32_t at_reserved; u_int8_t at_lun; /* lun or reserved */ u_int8_t at_iid; /* initiator */ u_int16_t at_rxid; /* response ID */ u_int16_t at_flags; u_int16_t at_status; /* firmware status */ u_int8_t at_reserved1; u_int8_t at_taskcodes; u_int8_t at_taskflags; u_int8_t at_execodes; u_int8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ u_int32_t at_datalen; /* allocated data len */ u_int16_t at_scclun; /* SCC Lun or reserved */ u_int16_t at_reserved2[10]; u_int16_t at_oxid; } at2_entry_t; #define ATIO2_WWPN_OFFSET 0x2A #define ATIO2_OXID_OFFSET 0x3E #define ATIO2_TC_ATTR_MASK 0x7 #define ATIO2_TC_ATTR_SIMPLEQ 0 #define ATIO2_TC_ATTR_HEADOFQ 1 #define ATIO2_TC_ATTR_ORDERED 2 #define ATIO2_TC_ATTR_ACAQ 4 #define ATIO2_TC_ATTR_UNTAGGED 5 /* * Continue Target I/O Entry structure * Request from driver. The response from the * ISP firmware is the same except that the last 18 * bytes are overwritten by suggested sense data if * the 'autosense valid' bit is set in the status byte. */ typedef struct { isphdr_t ct_header; u_int16_t ct_reserved; #define ct_syshandle ct_reserved /* we use this */ u_int16_t ct_fwhandle; /* required by f/w */ u_int8_t ct_lun; /* lun */ u_int8_t ct_iid; /* initiator id */ u_int8_t ct_reserved2; u_int8_t ct_tgt; /* our target id */ u_int32_t ct_flags; u_int8_t ct_status; /* isp status */ u_int8_t ct_scsi_status; /* scsi status */ u_int8_t ct_tag_val; /* tag value */ u_int8_t ct_tag_type; /* tag type */ u_int32_t ct_xfrlen; /* transfer length */ u_int32_t ct_resid; /* residual length */ u_int16_t ct_timeout; u_int16_t ct_seg_count; ispds_t ct_dataseg[ISP_RQDSEG]; } ct_entry_t; /* * For some of the dual port SCSI adapters, port (bus #) is reported * in the MSbit of ct_iid. Bit fields are a bit too awkward here. * * Note that this does not apply to FC adapters at all which can and * do report IIDs between 129 && 255 (these represent devices that have * logged in across a SCSI fabric). */ #define GET_IID_VAL(x) (x & 0x3f) #define GET_BUS_VAL(x) ((x >> 7) & 0x1) #define SET_IID_VAL(y, x) (y | (x & 0x3f)) #define SET_BUS_VAL(y, x) (y | ((x & 0x1) << 7)) /* * ct_flags values */ #define CT_TQAE 0x00000002 /* bit 1, Tagged Queue Action enable */ #define CT_DATA_IN 0x00000040 /* bits 6&7, Data direction */ #define CT_DATA_OUT 0x00000080 /* bits 6&7, Data direction */ #define CT_NO_DATA 0x000000C0 /* bits 6&7, Data direction */ #define CT_CCINCR 0x00000100 /* bit 8, autoincrement atio count */ #define CT_DATAMASK 0x000000C0 /* bits 6&7, Data direction */ #define CT_INISYNCWIDE 0x00004000 /* bit 14, Do Sync/Wide Negotiation */ #define CT_NODISC 0x00008000 /* bit 15, Disconnects disabled */ #define CT_DSDP 0x01000000 /* bit 24, Disable Save Data Pointers */ #define CT_SENDRDP 0x04000000 /* bit 26, Send Restore Pointers msg */ #define CT_SENDSTATUS 0x80000000 /* bit 31, Send SCSI status byte */ /* * ct_status values * - set by the firmware when it returns the CTIO */ #define CT_OK 0x01 /* completed without error */ #define CT_ABORTED 0x02 /* aborted by host */ #define CT_ERR 0x04 /* see sense data for error */ #define CT_INVAL 0x06 /* request for disabled lun */ #define CT_NOPATH 0x07 /* invalid ITL nexus */ #define CT_INVRXID 0x08 /* (FC only) Invalid RX_ID */ +#define CT_DATA_OVER 0x09 /* (FC only) Data Overrun */ #define CT_RSELTMO 0x0A /* reselection timeout after 2 tries */ #define CT_TIMEOUT 0x0B /* timed out */ #define CT_RESET 0x0E /* SCSI Bus Reset occurred */ #define CT_PARITY 0x0F /* Uncorrectable Parity Error */ +#define CT_BUS_ERROR 0x10 /* (FC Only) DMA PCI Error */ #define CT_PANIC 0x13 /* Unrecoverable Error */ #define CT_PHASE_ERROR 0x14 /* Bus phase sequence error */ #define CT_BDR_MSG 0x17 /* Bus Device Reset msg received */ +#define CT_DATA_UNDER 0x15 /* (FC only) Data Underrun */ #define CT_TERMINATED 0x19 /* due to Terminate Transfer mbox cmd */ #define CT_PORTNOTAVAIL 0x28 /* port not available */ #define CT_LOGOUT 0x29 /* port logout */ #define CT_PORTCHANGED 0x2A /* port changed */ #define CT_IDE 0x33 /* Initiator Detected Error */ #define CT_NOACK 0x35 /* Outstanding Immed. Notify. entry */ /* * When the firmware returns a CTIO entry, it may overwrite the last * part of the structure with sense data. This starts at offset 0x2E * into the entry, which is in the middle of ct_dataseg[1]. Rather * than define a new struct for this, I'm just using the sense data * offset. */ #define CTIO_SENSE_OFFSET 0x2E /* * Entry length in u_longs. All entries are the same size so * any one will do as the numerator. */ #define UINT32_ENTRY_SIZE (sizeof(at_entry_t)/sizeof(u_int32_t)) /* * QLA2100 CTIO (type 2) entry */ #define MAXRESPLEN 26 typedef struct { isphdr_t ct_header; u_int16_t ct_reserved; u_int16_t ct_fwhandle; /* just to match CTIO */ u_int8_t ct_lun; /* lun */ u_int8_t ct_iid; /* initiator id */ u_int16_t ct_rxid; /* response ID */ u_int16_t ct_flags; u_int16_t ct_status; /* isp status */ u_int16_t ct_timeout; u_int16_t ct_seg_count; u_int32_t ct_reloff; /* relative offset */ int32_t ct_resid; /* residual length */ union { /* * The three different modes that the target driver * can set the CTIO2 up as. * * The first is for sending FCP_DATA_IUs as well as * (optionally) sending a terminal SCSI status FCP_RSP_IU. * * The second is for sending SCSI sense data in an FCP_RSP_IU. * Note that no FCP_DATA_IUs will be sent. * * The third is for sending FCP_RSP_IUs as built specifically * in system memory as located by the isp_dataseg. */ struct { u_int32_t _reserved; u_int16_t _reserved2; u_int16_t ct_scsi_status; u_int32_t ct_xfrlen; ispds_t ct_dataseg[ISP_RQDSEG_T2]; } m0; struct { u_int16_t _reserved; u_int16_t _reserved2; u_int16_t ct_senselen; u_int16_t ct_scsi_status; u_int16_t ct_resplen; u_int8_t ct_resp[MAXRESPLEN]; } m1; struct { u_int32_t _reserved; u_int16_t _reserved2; u_int16_t _reserved3; u_int32_t ct_datalen; ispds_t ct_fcp_rsp_iudata; } m2; /* * CTIO2 returned from F/W... */ struct { u_int32_t _reserved[4]; u_int16_t ct_scsi_status; u_int8_t ct_sense[QLTM_SENSELEN]; } fw; } rsp; } ct2_entry_t; /* * ct_flags values for CTIO2 */ #define CT2_FLAG_MMASK 0x0003 #define CT2_FLAG_MODE0 0x0000 #define CT2_FLAG_MODE1 0x0001 #define CT2_FLAG_MODE2 0x0002 #define CT2_DATA_IN CT_DATA_IN #define CT2_DATA_OUT CT_DATA_OUT #define CT2_NO_DATA CT_NO_DATA #define CT2_DATAMASK CT_DATAMASK #define CT2_CCINCR 0x0100 #define CT2_FASTPOST 0x0200 #define CT2_SENDSTATUS 0x8000 /* * ct_status values are (mostly) the same as that for ct_entry. */ /* * ct_scsi_status values- the low 8 bits are the normal SCSI status * we know and love. The upper 8 bits are validity markers for FCP_RSP_IU * fields. */ #define CT2_RSPLEN_VALID 0x0100 #define CT2_SNSLEN_VALID 0x0200 #define CT2_DATA_OVER 0x0400 #define CT2_DATA_UNDER 0x0800 /* * Macros for packing/unpacking the above structures */ #ifdef __sparc__ #define ISP_SBUS_SWOZZLE(isp, src, dst, taga, tagb) \ if (isp->isp_bustype == ISP_BT_SBUS) { \ u_int8_t tmp = src -> taga; \ dst -> taga = dst -> tagb; \ src -> tagb = tmp; \ } else { \ dst -> taga = src -> taga; \ dst -> tagb = src -> taga; \ } #else #define ISP_SBUS_SWOZZLE(isp, src, dst, taga, tagb) \ dst -> taga = src -> taga; \ dst -> tagb = src -> taga #endif #define MCIDF(d, s) if ((void *) d != (void *)s) MEMCPY(d, s, QENTRY_LEN) /* This is really only for SBus cards on a sparc */ #ifdef __sparc__ #define ISP_SWIZ_ATIO(isp, vdst, vsrc) \ { \ at_entry_t *src = (at_entry_t *) vsrc; \ at_entry_t *dst = (at_entry_t *) vdst; \ dst->at_header = src->at_header; \ dst->at_reserved = src->at_reserved; \ dst->at_handle = src->at_handle; \ ISP_SBUS_SWOZZLE(isp, src, dst, at_lun, at_iid); \ ISP_SBUS_SWOZZLE(isp, src, dst, at_cdblen, at_tgt); \ dst->at_flags = src->at_flags; \ ISP_SBUS_SWOZZLE(isp, src, dst, at_status, at_scsi_status); \ ISP_SBUS_SWOZZLE(isp, src, dst, at_tag_val, at_tag_type); \ MEMCPY(dst->at_cdb, src->at_cdb, ATIO_CDBLEN); \ MEMCPY(dst->at_sense, src->at_sense, QLTM_SENSELEN); \ } #define ISP_SWIZ_ATIO2(isp, vdst, vsrc) \ { \ at2_entry_t *src = (at2_entry_t *) vsrc; \ at2_entry_t *dst = (at2_entry_t *) vdst; \ dst->at_reserved = src->at_reserved; \ ISP_SBUS_SWOZZLE(isp, src, dst, at_lun, at_iid); \ dst->at_rxid = src->at_rxid; \ dst->at_flags = src->at_flags; \ dst->at_status = src->at_status; \ ISP_SBUS_SWOZZLE(isp, src, dst, at_reserved1, at_taskcodes); \ ISP_SBUS_SWOZZLE(isp, src, dst, at_taskflags, at_execodes); \ MEMCPY(dst->at_cdb, src->at_cdb, ATIO2_CDBLEN); \ dst->at_datalen = src->at_datalen; \ dst->at_scclun = src->at_scclun; \ MEMCPY(dst->at_reserved2, src->at_reserved2, sizeof dst->at_reserved2);\ dst->at_oxid = src->at_oxid; \ } #define ISP_SWIZ_CTIO(isp, vdst, vsrc) \ { \ ct_entry_t *src = (ct_entry_t *) vsrc; \ ct_entry_t *dst = (ct_entry_t *) vdst; \ dst->ct_header = src->ct_header; \ dst->ct_syshandle = src->ct_syshandle; \ dst->ct_fwhandle = src->ct_fwhandle; \ dst->ct_fwhandle = src->ct_fwhandle; \ ISP_SBUS_SWOZZLE(isp, src, dst, ct_lun, ct_iid); \ ISP_SBUS_SWOZZLE(isp, src, dst, ct_reserved2, ct_tgt); \ dst->ct_flags = src->ct_flags; \ ISP_SBUS_SWOZZLE(isp, src, dst, ct_status, ct_scsi_status); \ ISP_SBUS_SWOZZLE(isp, src, dst, ct_tag_val, ct_tag_type); \ dst->ct_xfrlen = src->ct_xfrlen; \ dst->ct_resid = src->ct_resid; \ dst->ct_timeout = src->ct_timeout; \ dst->ct_seg_count = src->ct_seg_count; \ MEMCPY(dst->ct_dataseg, src->ct_dataseg, sizeof (dst->ct_dataseg)); \ } #define ISP_SWIZ_CTIO2(isp, vdst, vsrc) \ { \ ct2_entry_t *src = (ct2_entry_t *) vsrc; \ ct2_entry_t *dst = (ct2_entry_t *) vdst; \ dst->ct_header = src->ct_header; \ dst->ct_syshandle = src->ct_syshandle; \ dst->ct_fwhandle = src->ct_fwhandle; \ dst->ct_fwhandle = src->ct_fwhandle; \ ISP_SBUS_SWOZZLE(isp, src, dst, ct_lun, ct_iid); \ dst->ct_rxid = src->ct_rxid; \ dst->ct_flags = src->ct_flags; \ dst->ct_status = src->ct_status; \ dst->ct_timeout = src->ct_timeout; \ dst->ct_seg_count = src->ct_seg_count; \ dst->ct_reloff = src->ct_reloff; \ dst->ct_resid = src->ct_resid; \ dst->rsp = src->rsp; \ } #define ISP_SWIZ_ENABLE_LUN(isp, vdst, vsrc) \ { \ lun_entry_t *src = (lun_entry_t *)vsrc; \ lun_entry_t *dst = (lun_entry_t *)vdst; \ dst->le_header = src->le_header; \ dst->le_reserved2 = src->le_reserved2; \ ISP_SBUS_SWOZZLE(isp, src, dst, le_lun, le_rsvd); \ ISP_SBUS_SWOZZLE(isp, src, dst, le_ops, le_tgt); \ dst->le_flags = src->le_flags; \ ISP_SBUS_SWOZZLE(isp, src, dst, le_status, le_reserved2); \ ISP_SBUS_SWOZZLE(isp, src, dst, le_cmd_count, le_in_count); \ ISP_SBUS_SWOZZLE(isp, src, dst, le_cdb6len, le_cdb7len); \ dst->le_timeout = src->le_timeout; \ dst->le_reserved = src->le_reserved; \ } #define ISP_SWIZ_NOTIFY(isp, vdst, vsrc) \ { \ in_entry_type *src = (in_entry_t *)vsrc; \ in_entry_type *dst = (in_entry_t *)vdst; \ dst->in_header = src->in_header; \ dst->in_reserved2 = src->in_reserved2; \ ISP_SBUS_SWOZZLE(isp, src, dst, in_lun, in_iid); \ ISP_SBUS_SWOZZLE(isp, src, dst, in_reserved2, in_tgt); \ dst->in_flags = src->in_flags; \ ISP_SBUS_SWOZZLE(isp, src, dst, in_status, in_rsvd2); \ ISP_SBUS_SWOZZLE(isp, src, dst, in_tag_val, in_tag_type); \ dst->in_seqid = src->in_seqid; \ MEMCPY(dst->in_msg, src->in_msg, IN_MSGLEN); \ MEMCPY(dst->in_reserved, src->in_reserved, IN_RESERVED); \ MEMCPY(dst->in_sense, src->in_sense, QLTM_SENSELEN); \ } #define ISP_SWIZ_NOTIFY_FC(isp, vdst, vsrc) \ { \ in_fcentry_type *src = (in_fcentry_t *)vsrc; \ in_fcentry_type *dst = (in_fcentry_t *)vdst; \ dst->in_header = src->in_header; \ dst->in_reserved2 = src->in_reserved2; \ ISP_SBUS_SWOZZLE(isp, src, dst, in_lun, in_iid); \ dst->in_scclun = src->in_scclun; \ dst->in_reserved2 = src->in_reserved2; \ dst->in_status = src->in_status; \ dst->in_task_flags = src->in_task_flags; \ dst->in_seqid = src->in_seqid; \ } #define ISP_SWIZ_NOT_ACK(isp, vdst, vsrc) \ { \ na_entry_t *src = (na_entry_t *)vsrc; \ na_entry_t *dst = (na_entry_t *)vdst; \ dst->na_header = src->na_header; \ dst->na_reserved = src->na_reserved; \ ISP_SBUS_SWOZZLE(isp, src, dst, na_lun, na_iid); \ dst->na_reserved2 = src->na_reserved2; \ ISP_SBUS_SWOZZLE(isp, src, dst, na_reserved, na_tgt); \ dst->na_flags = src->na_flags; \ ISP_SBUS_SWOZZLE(isp, src, dst, na_status, na_event); \ dst->na_seqid = src->na_seqid; \ MEMCPY(dst->na_reserved3, src->na_reserved3, NA_RSVDLEN); \ } #define ISP_SWIZ_NOT_ACK_FC(isp, vdst, vsrc) \ { \ na_fcentry_t *src = (na_fcentry_t *)vsrc; \ na_fcentry_t *dst = (na_fcentry_t *)vdst; \ dst->na_header = src->na_header; \ dst->na_reserved = src->na_reserved; \ ISP_SBUS_SWOZZLE(isp, src, dst, na_lun, na_iid); \ dst->na_scclun = src->na_scclun; \ dst->na_flags = src->na_flags; \ dst->na_reserved2 = src->na_reserved2; \ dst->na_status = src->na_status; \ dst->na_task_flags = src->na_task_flags; \ dst->na_seqid = src->na_seqid; \ MEMCPY(dst->na_reserved3, src->na_reserved3, NA2_RSVDLEN); \ } #else #define ISP_SWIZ_ATIO(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_ATIO2(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_CTIO(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_CTIO2(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_ENABLE_LUN(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_ATIO2(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_CTIO2(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_NOTIFY(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_NOTIFY_FC(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_NOT_ACK(isp, d, s) MCIDF(d, s) #define ISP_SWIZ_NOT_ACK_FC(isp, d, s) MCIDF(d, s) #endif /* * Debug macros */ #define ISP_TDQE(isp, msg, idx, arg) \ if (isp->isp_dblev & ISP_LOGTDEBUG2) isp_print_qentry(isp, msg, idx, arg) /* * The functions below are target mode functions that * are generally internal to the Qlogic driver. */ /* * This function handles new response queue entry appropriate for target mode. */ int isp_target_notify(struct ispsoftc *, void *, u_int16_t *); /* * Enable/Disable/Modify a logical unit. + * (softc, cmd, bus, tgt, lun, cmd_cnt, inotify_cnt, opaque) */ -#define DFLT_CMD_CNT 32 /* XX */ -#define DFLT_INOTIFY (4) -int isp_lun_cmd(struct ispsoftc *, int, int, int, int, u_int32_t); +#define DFLT_CMND_CNT 32 +#define DFLT_INOT_CNT 4 +int isp_lun_cmd(struct ispsoftc *, int, int, int, int, int, int, u_int32_t); /* * General request queue 'put' routine for target mode entries. */ int isp_target_put_entry(struct ispsoftc *isp, void *); /* * General routine to put back an ATIO entry- * used for replenishing f/w resource counts. * The argument is a pointer to a source ATIO * or ATIO2. */ int isp_target_put_atio(struct ispsoftc *, void *); /* * General routine to send a final CTIO for a command- used mostly for * local responses. */ int isp_endcmd(struct ispsoftc *, void *, u_int32_t, u_int16_t); #define ECMD_SVALID 0x100 /* * Handle an asynchronous event */ void isp_target_async(struct ispsoftc *, int, int); #endif /* _ISP_TARGET_H */ diff --git a/sys/dev/isp/isp_tpublic.h b/sys/dev/isp/isp_tpublic.h index 03f832dd8b5a..4a03c9930266 100644 --- a/sys/dev/isp/isp_tpublic.h +++ b/sys/dev/isp/isp_tpublic.h @@ -1,333 +1,334 @@ /* $FreeBSD$ */ /* * Qlogic ISP Host Adapter Public Target Interface Structures && Routines *--------------------------------------- * Copyright (c) 2000 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Matthew Jacob * Feral Software * mjacob@feral.com */ /* * Required software target mode message and event handling structures. * * The message and event structures are used by the MI layer * to propagate messages and events upstream. */ #ifndef IN_MSGLEN #define IN_MSGLEN 8 #endif typedef struct { void * nt_hba; /* HBA tag */ u_int64_t nt_iid; /* inititator id */ u_int64_t nt_tgt; /* target id */ u_int64_t nt_lun; /* logical unit */ u_int8_t nt_bus; /* bus */ u_int8_t nt_tagtype; /* tag type */ u_int16_t nt_tagval; /* tag value */ u_int8_t nt_msg[IN_MSGLEN]; /* message content */ } tmd_msg_t; typedef struct { void * ev_hba; /* HBA tag */ u_int16_t ev_bus; /* bus */ u_int16_t ev_event; /* type of async event */ } tmd_event_t; /* * Suggested Software Target Mode Command Handling structure. * * A note about terminology: * * MD stands for "Machine Dependent". * * This driver is structured in three layers: Outer MD, core, and inner MD. * The latter also is bus dependent (i.e., is cognizant of PCI bus issues * as well as platform issues). * * * "Outer Layer" means "Other Module" * * Some additional module that actually implements SCSI target command * policy is the recipient of incoming commands and the source of the * disposition for them. * * The command structure below is one suggested possible MD command structure, * but since the handling of thbis is entirely in the MD layer, there is * no explicit or implicit requirement that it be used. * * The cd_private tag should be used by the MD layer to keep a free list * of these structures. Code outside of this driver can then use this * as an to identify it's own unit structures. That is, when not on the MD * layer's freelist, the MD layer should shove into it the identifier * that the outer layer has for it- passed in on an initial QIN_HBA_REG * call (see below). * * The cd_hba tag is a tag that uniquely identifies the HBA this target * mode command is coming from. The outer layer has to pass this back * unchanged to avoid chaos. * * The cd_iid, cd_tgt, cd_lun and cd_bus tags are used to identify the * id of the initiator who sent us a command, the target claim to be, the * lun on the target we claim to be, and the bus instance (for multiple * bus host adapters) that this applies to (consider it an extra Port * parameter). The iid, tgt and lun values are deliberately chosen to be * fat so that, for example, World Wide Names can be used instead of * the units that the Qlogic firmware uses (in the case where the MD * layer maintains a port database, for example). * * The cd_tagtype field specifies what kind of command tag has been * sent with the command. The cd_tagval is the tag's value (low 16 * bits). It also contains (in the upper 16 bits) any command handle. * * * N.B.: when the MD layer sends this command to outside software * the outside software likely *MUST* return the same cd_tagval that * was in place because this value is likely what the Qlogic f/w uses * to identify a command. * * The cd_cdb contains storage for the passed in command descriptor block. * This is the maximum size we can get out of the Qlogic f/w. There's no * passed in length because whoever decodes the command to act upon it * will know what the appropriate length is. * * The tag cd_lflags are the flags set by the MD driver when it gets * command incoming or when it needs to inform any outside entities * that the last requested action failed. * * The tag cd_hflags should be set by any outside software to indicate * the validity of sense and status fields (defined below) and to indicate * the direction data is expected to move. It is an error to have both * CDFH_DATA_IN and CDFH_DATA_OUT set. * * If the CDFH_STSVALID flag is set, the command should be completed (after * sending any data and/or status). If CDFH_SNSVALID is set and the MD layer * can also handle sending the associated sense data (either back with an * FCP RESPONSE IU for Fibre Channel or otherwise automatically handling a * REQUEST SENSE from the initator for this target/lun), the MD layer will * set the CDFL_SENTSENSE flag on successful transmission of the sense data. * It is an error for the CDFH_SNSVALID bit to be set and CDFH_STSVALID not * to be set. It is an error for the CDFH_SNSVALID be set and the associated * SCSI status (cd_scsi_status) not be set to CHECK CONDITON. * * The tag cd_data points to a data segment to either be filled or * read from depending on the direction of data movement. The tag * is undefined if no data direction is set. The MD layer and outer * layers must agree on the meaning of cd_data. * * The tag cd_totlen is the total data amount expected to be moved * over the life of the command. It *may* be set by the MD layer, possibly * from the datalen field of an FCP CMND IU unit. If it shows up in the outer * layers set to zero and the CDB indicates data should be moved, the outer * layer should set it to the amount expected to be moved. * * The tag cd_resid should be the total residual of data not transferred. * The outer layers need to set this at the begining of command processing * to equal cd_totlen. As data is successfully moved, this value is decreased. * At the end of a command, any nonzero residual indicates the number of bytes * requested but not moved. XXXXXXXXXXXXXXXXXXXXXXX TOO VAGUE!!! * * The tag cd_xfrlen is the length of the currently active data transfer. * This allows several interations between any outside software and the * MD layer to move data. * * The reason that total length and total residual have to be tracked * is that fibre channel FCP DATA IU units have to have a relative * offset field. * * N.B.: there is no necessary 1-to-1 correspondence between any one * data transfer segment and the number of CTIOs that will be generated * satisfy the current data transfer segment. It's not also possible to * predict how big a transfer can be before it will be 'too big'. Be * reasonable- a 64KB transfer is 'reasonable'. A 1MB transfer may not * be. A 32MB transfer is unreasonable. The problem here has to do with * how CTIOs can be used to map passed data pointers. In systems which * have page based scatter-gather requirements, each PAGESIZEd chunk will * consume one data segment descriptor- you get 3 or 4 of them per CTIO. * The size of the REQUEST QUEUE you drop a CTIO onto is finite (typically * it's 256, but on some systems it's even smaller, and note you have to * sure this queue with the initiator side of this driver). * * The tags cd_sense and cd_scsi_status are pretty obvious. * * The tag cd_error is to communicate between the MD layer and outer software * the current error conditions. * * The tag cd_reserved pads out the structure to 128 bytes. The first * half of the pad area is reserved to the MD layer, and the second half * may be used by outer layers, for scratch purposes. */ #ifndef _LP64 #if defined(__alpha__) || defined(__sparcv9cpu) || defined(__sparc_v9__) #define _LP64 #endif #endif #ifndef _TMD_PAD_LEN #ifdef _LP64 #define _TMD_PAD_LEN 12 #else #define _TMD_PAD_LEN 24 #endif #endif #ifndef ATIO_CDBLEN #define ATIO_CDBLEN 26 #endif #ifndef QLTM_SENSELEN #define QLTM_SENSELEN 18 #endif typedef struct tmd_cmd { void * cd_private; /* layer private data */ void * cd_hba; /* HBA tag */ void * cd_data; /* 'pointer' to data */ u_int64_t cd_iid; /* initiator ID */ u_int64_t cd_tgt; /* target id */ u_int64_t cd_lun; /* logical unit */ u_int8_t cd_bus; /* bus */ u_int8_t cd_tagtype; /* tag type */ u_int32_t cd_tagval; /* tag value */ u_int8_t cd_cdb[ATIO_CDBLEN]; /* Command */ u_int8_t cd_lflags; /* flags lower level sets */ u_int8_t cd_hflags; /* flags higher level sets */ u_int32_t cd_totlen; /* total data requirement */ u_int32_t cd_resid; /* total data residual */ u_int32_t cd_xfrlen; /* current data requirement */ int32_t cd_error; /* current error */ u_int8_t cd_sense[QLTM_SENSELEN]; u_int16_t cd_scsi_status; /* closing SCSI status */ u_int8_t cd_reserved[_TMD_PAD_LEN]; } tmd_cmd_t; -#define CDFL_BUSY 0x01 /* this command is not on a free list */ +#define CDFL_SNSVALID 0x01 /* sense data (from f/w) valid */ #define CDFL_NODISC 0x02 /* disconnects disabled */ #define CDFL_SENTSENSE 0x04 /* last action sent sense data */ #define CDFL_SENTSTATUS 0x08 /* last action sent status */ #define CDFL_ERROR 0x10 /* last action ended in error */ +#define CDFL_BUSY 0x40 /* this command is not on a free list */ #define CDFL_PRIVATE_0 0x80 /* private layer flags */ #define CDFH_SNSVALID 0x01 /* sense data valid */ #define CDFH_STSVALID 0x02 /* status valid */ #define CDFH_NODATA 0x00 /* no data transfer expected */ #define CDFH_DATA_IN 0x04 /* target (us) -> initiator (them) */ #define CDFH_DATA_OUT 0x08 /* initiator (them) -> target (us) */ #define CDFH_DATA_MASK 0x0C /* mask to cover data direction */ #define CDFH_PRIVATE_0 0x80 /* private layer flags */ /* * Action codes set by the Qlogic MD target driver for * the external layer to figure out what to do with. */ typedef enum { QOUT_HBA_REG=0, /* the argument is a pointer to a hba_register_t */ QOUT_TMD_START, /* the argument is a pointer to a tmd_cmd_t */ QOUT_TMD_DONE, /* the argument is a pointer to a tmd_cmd_t */ QOUT_TEVENT, /* the argument is a pointer to a tmd_event_t */ QOUT_TMSG, /* the argument is a pointer to a tmd_msg_t */ QOUT_HBA_UNREG /* the argument is a pointer to a hba_register_t */ } tact_e; /* * Action codes set by the external layer for the * MD Qlogic driver to figure out what to do with. */ typedef enum { QIN_HBA_REG=6, /* the argument is a pointer to a hba_register_t */ QIN_ENABLE, /* the argument is a pointer to a tmd_cmd_t */ QIN_DISABLE, /* the argument is a pointer to a tmd_cmd_t */ QIN_TMD_CONT, /* the argument is a pointer to a tmd_cmd_t */ QIN_TMD_FIN, /* the argument is a pointer to a done tmd_cmd_t */ QIN_HBA_UNREG /* the argument is a pointer to a hba_register_t */ } qact_e; /* * A word about the START/CONT/DONE/FIN dance: * * When the HBA is enabled for receiving commands, one may show up * without notice. When that happens, the Qlogic target mode driver * gets a tmd_cmd_t, fills it with the info that just arrived, and * calls the outer layer with a QOUT_TMD_START code and pointer to * the tmd_cmd_t. * * The outer layer decodes the command, fetches data, prepares stuff, * whatever, and starts by passing back the pointer with a QIN_TMD_CONT * code which causes the Qlogic target mode driver to generate CTIOs to * satisfy whatever action needs to be taken. When those CTIOs complete, * the Qlogic target driver sends the pointer to the cmd_tmd_t back with * a QOUT_TMD_DONE code. This repeats for as long as necessary. * * The outer layer signals it wants to end the command by settings within * the tmd_cmd_t itself. When the final QIN_TMD_CONT is reported completed, * the outer layer frees the tmd_cmd_t by sending the pointer to it * back with a QIN_TMD_FIN code. * * The graph looks like: * * QOUT_TMD_START -> [ QIN_TMD_CONT -> QOUT_TMD_DONE ] * -> QIN_TMD_FIN. * */ /* * A word about ENABLE/DISABLE: the argument is a pointer to an tmd_cmd_t * with cd_hba, cd_bus, cd_tgt and cd_lun filled out. If an error occurs * in either enabling or disabling the described lun, cd_lflags is set * with CDFL_ERROR. * * Logical unit zero must be the first enabled and the last disabled. */ /* * Target handler functions. * The MD target handler function (the outer layer calls this) * should be be prototyped like: * * void target_action(qact_e, void *arg) * * The outer layer target handler function (the MD layer calls this) * should be be prototyped like: * * void system_action(tact_e, void *arg) */ /* * This structure is used to register to other software modules the * binding of an HBA identifier, driver name and instance and the * lun width capapbilities of this target driver. It's up to each * platform to figure out how it wants to do this, but a typical * sequence would be for the MD layer to find some external module's * entry point and start by sending a QOUT_HBA_REG with info filled * in, and the external module to call back with a QIN_HBA_REG that * passes back the corresponding information. */ typedef struct { void * r_identity; char r_name[8]; int r_inst; int r_lunwidth; int r_buswidth; void (*r_action)(int, void *); } hba_register_t; diff --git a/sys/dev/isp/ispvar.h b/sys/dev/isp/ispvar.h index fa30e616816c..35c778a54913 100644 --- a/sys/dev/isp/ispvar.h +++ b/sys/dev/isp/ispvar.h @@ -1,786 +1,792 @@ /* $FreeBSD$ */ /* * Soft Definitions for for Qlogic ISP SCSI adapters. * * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _ISPVAR_H #define _ISPVAR_H #if defined(__NetBSD__) || defined(__OpenBSD__) #include #ifdef ISP_TARGET_MODE #include #include #endif #endif #ifdef __FreeBSD__ #include #ifdef ISP_TARGET_MODE #include #include #endif #endif #ifdef __linux__ #include "ispmbox.h" #ifdef ISP_TARGET_MODE #include "isp_target.h" #include "isp_tpublic.h" #endif #endif #define ISP_CORE_VERSION_MAJOR 2 #define ISP_CORE_VERSION_MINOR 1 /* * Vector for bus specific code to provide specific services. */ struct ispsoftc; struct ispmdvec { u_int16_t (*dv_rd_reg) (struct ispsoftc *, int); void (*dv_wr_reg) (struct ispsoftc *, int, u_int16_t); int (*dv_mbxdma) (struct ispsoftc *); int (*dv_dmaset) (struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); void (*dv_dmaclr) (struct ispsoftc *, XS_T *, u_int16_t); void (*dv_reset0) (struct ispsoftc *); void (*dv_reset1) (struct ispsoftc *); void (*dv_dregs) (struct ispsoftc *, const char *); const u_int16_t *dv_ispfw; /* ptr to f/w */ u_int16_t dv_conf1; u_int16_t dv_clock; /* clock frequency */ }; /* * Overall parameters */ #define MAX_TARGETS 16 #define MAX_FC_TARG 256 #define ISP_MAX_TARGETS(isp) (IS_FC(isp)? MAX_FC_TARG : MAX_TARGETS) #define ISP_MAX_LUNS(isp) (isp)->isp_maxluns /* * Macros to access ISP registers through bus specific layers- * mostly wrappers to vector through the mdvec structure. */ #define ISP_READ(isp, reg) \ (*(isp)->isp_mdvec->dv_rd_reg)((isp), (reg)) #define ISP_WRITE(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), (val)) #define ISP_MBOXDMASETUP(isp) \ (*(isp)->isp_mdvec->dv_mbxdma)((isp)) #define ISP_DMASETUP(isp, xs, req, iptrp, optr) \ (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req), (iptrp), (optr)) #define ISP_DMAFREE(isp, xs, hndl) \ if ((isp)->isp_mdvec->dv_dmaclr) \ (*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl)) #define ISP_RESET0(isp) \ if ((isp)->isp_mdvec->dv_reset0) (*(isp)->isp_mdvec->dv_reset0)((isp)) #define ISP_RESET1(isp) \ if ((isp)->isp_mdvec->dv_reset1) (*(isp)->isp_mdvec->dv_reset1)((isp)) #define ISP_DUMPREGS(isp, m) \ if ((isp)->isp_mdvec->dv_dregs) (*(isp)->isp_mdvec->dv_dregs)((isp),(m)) #define ISP_SETBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) | (val)) #define ISP_CLRBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) & ~(val)) /* * The MEMORYBARRIER macro is defined per platform (to provide synchronization * on Request and Response Queues, Scratch DMA areas, and Registers) * * Defined Memory Barrier Synchronization Types */ #define SYNC_REQUEST 0 /* request queue synchronization */ #define SYNC_RESULT 1 /* result queue synchronization */ #define SYNC_SFORDEV 2 /* scratch, sync for ISP */ #define SYNC_SFORCPU 3 /* scratch, sync for CPU */ #define SYNC_REG 4 /* for registers */ /* * Request/Response Queue defines and macros. * The maximum is defined per platform (and can be based on board type). */ /* This is the size of a queue entry (request and response) */ #define QENTRY_LEN 64 /* Both request and result queue length must be a power of two */ #define RQUEST_QUEUE_LEN(x) MAXISPREQUEST(x) #define RESULT_QUEUE_LEN(x) \ (((MAXISPREQUEST(x) >> 2) < 64)? 64 : MAXISPREQUEST(x) >> 2) #define ISP_QUEUE_ENTRY(q, idx) ((q) + ((idx) * QENTRY_LEN)) #define ISP_QUEUE_SIZE(n) ((n) * QENTRY_LEN) #define ISP_NXT_QENTRY(idx, qlen) (((idx) + 1) & ((qlen)-1)) #define ISP_QFREE(in, out, qlen) \ ((in == out)? (qlen - 1) : ((in > out)? \ ((qlen - 1) - (in - out)) : (out - in - 1))) #define ISP_QAVAIL(isp) \ ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp)) #define ISP_ADD_REQUEST(isp, iptr) \ MEMORYBARRIER(isp, SYNC_REQUEST, iptr, QENTRY_LEN); \ ISP_WRITE(isp, INMAILBOX4, iptr); \ isp->isp_reqidx = iptr /* * SCSI Specific Host Adapter Parameters- per bus, per target */ typedef struct { u_int isp_gotdparms : 1, isp_req_ack_active_neg : 1, isp_data_line_active_neg: 1, isp_cmd_dma_burst_enable: 1, isp_data_dma_burst_enabl: 1, isp_fifo_threshold : 3, isp_ultramode : 1, isp_diffmode : 1, isp_lvdmode : 1, isp_fast_mttr : 1, /* fast sram */ isp_initiator_id : 4, isp_async_data_setup : 4; u_int16_t isp_selection_timeout; u_int16_t isp_max_queue_depth; u_int8_t isp_tag_aging; u_int8_t isp_bus_reset_delay; u_int8_t isp_retry_count; u_int8_t isp_retry_delay; struct { u_int dev_enable : 1, /* ignored */ : 1, dev_update : 1, dev_refresh : 1, exc_throttle : 8, cur_offset : 4, sync_offset : 4; u_int8_t cur_period; /* current sync period */ u_int8_t sync_period; /* goal sync period */ u_int16_t dev_flags; /* goal device flags */ u_int16_t cur_dflags; /* current device flags */ } isp_devparam[MAX_TARGETS]; } sdparam; /* * Device Flags */ #define DPARM_DISC 0x8000 #define DPARM_PARITY 0x4000 #define DPARM_WIDE 0x2000 #define DPARM_SYNC 0x1000 #define DPARM_TQING 0x0800 #define DPARM_ARQ 0x0400 #define DPARM_QFRZ 0x0200 #define DPARM_RENEG 0x0100 #define DPARM_NARROW 0x0080 #define DPARM_ASYNC 0x0040 #define DPARM_PPR 0x0020 #define DPARM_DEFAULT (0xFF00 & ~DPARM_QFRZ) #define DPARM_SAFE_DFLT (DPARM_DEFAULT & ~(DPARM_WIDE|DPARM_SYNC|DPARM_TQING)) /* technically, not really correct, as they need to be rated based upon clock */ #define ISP_80M_SYNCPARMS 0x0c09 #define ISP_40M_SYNCPARMS 0x0c0a #define ISP_20M_SYNCPARMS 0x0c0c #define ISP_20M_SYNCPARMS_1040 0x080c #define ISP_10M_SYNCPARMS 0x0c19 #define ISP_08M_SYNCPARMS 0x0c25 #define ISP_05M_SYNCPARMS 0x0c32 #define ISP_04M_SYNCPARMS 0x0c41 /* * Fibre Channel Specifics */ #define FL_PORT_ID 0x7e /* FL_Port Special ID */ #define FC_PORT_ID 0x7f /* Fabric Controller Special ID */ #define FC_SNS_ID 0x80 /* SNS Server Special ID */ typedef struct { u_int32_t isp_fwoptions : 16, : 3, isp_iid_set : 1, loop_seen_once : 1, isp_loopstate : 3, /* Current Loop State */ isp_fwstate : 3, /* ISP F/W state */ isp_gotdparms : 1, isp_topo : 3, isp_onfabric : 1; u_int8_t isp_iid; /* 'initiator' id */ u_int8_t isp_loopid; /* hard loop id */ u_int8_t isp_alpa; /* ALPA */ u_int32_t isp_portid; volatile u_int16_t isp_lipseq; /* LIP sequence # */ u_int16_t isp_xxxxxx; u_int8_t isp_execthrottle; u_int8_t isp_retry_delay; u_int8_t isp_retry_count; u_int8_t isp_reserved; u_int16_t isp_maxalloc; u_int16_t isp_maxfrmlen; u_int64_t isp_nodewwn; u_int64_t isp_portwwn; /* * Port Data Base. This is indexed by 'target', which is invariate. * However, elements within can move around due to loop changes, * so the actual loop ID passed to the F/W is in this structure. * The first time the loop is seen up, loopid will match the index * (except for fabric nodes which are above mapped above FC_SNS_ID * and are completely virtual), but subsequent LIPs can cause things * to move around. */ struct lportdb { u_int loopid : 8, : 2, was_fabric_dev : 1, fabric_dev : 1, loggedin : 1, roles : 2, valid : 1; u_int32_t portid; u_int64_t node_wwn; u_int64_t port_wwn; } portdb[MAX_FC_TARG], tport[FC_PORT_ID]; /* * Scratch DMA mapped in area to fetch Port Database stuff, etc. */ caddr_t isp_scratch; u_int32_t isp_scdma; } fcparam; #define FW_CONFIG_WAIT 0 #define FW_WAIT_AL_PA 1 #define FW_WAIT_LOGIN 2 #define FW_READY 3 #define FW_LOSS_OF_SYNC 4 #define FW_ERROR 5 #define FW_REINIT 6 #define FW_NON_PART 7 #define LOOP_NIL 0 #define LOOP_LIP_RCVD 1 #define LOOP_PDB_RCVD 2 #define LOOP_SCANNING_FABRIC 3 #define LOOP_FSCAN_DONE 4 #define LOOP_SCANNING_LOOP 5 #define LOOP_LSCAN_DONE 4 #define LOOP_SYNCING_PDB 6 #define LOOP_READY 7 #define TOPO_NL_PORT 0 #define TOPO_FL_PORT 1 #define TOPO_N_PORT 2 #define TOPO_F_PORT 3 #define TOPO_PTP_STUB 4 /* * Soft Structure per host adapter */ typedef struct ispsoftc { /* * Platform (OS) specific data */ struct isposinfo isp_osinfo; /* * Pointer to bus specific functions and data */ struct ispmdvec * isp_mdvec; /* * (Mostly) nonvolatile state. Board specific parameters * may contain some volatile state (e.g., current loop state). */ void * isp_param; /* type specific */ u_int16_t isp_fwrev[3]; /* Loaded F/W revision */ u_int16_t isp_romfw_rev[3]; /* PROM F/W revision */ u_int16_t isp_maxcmds; /* max possible I/O cmds */ u_int8_t isp_type; /* HBA Chip Type */ u_int8_t isp_revision; /* HBA Chip H/W Revision */ u_int32_t isp_maxluns; /* maximum luns supported */ u_int32_t isp_clock : 8, /* input clock */ : 6, isp_role : 2, : 1, isp_touched : 1, /* board ever seen? */ isp_bustype : 1, /* SBus or PCI */ isp_loaded_fw : 1, /* loaded firmware */ isp_dblev : 12; /* debug log mask */ u_int32_t isp_confopts; /* config options */ /* * Instrumentation */ u_int64_t isp_intcnt; /* total int count */ u_int64_t isp_intbogus; /* spurious int count */ /* * Volatile state */ volatile u_int32_t isp_mboxbsy : 8, /* mailbox command active */ : 1, isp_state : 3, isp_sendmarker : 2, /* send a marker entry */ isp_update : 2, /* update parameters */ isp_nactive : 16; /* how many commands active */ volatile u_int16_t isp_reqodx; /* index of last ISP pickup */ volatile u_int16_t isp_reqidx; /* index of next request */ volatile u_int16_t isp_residx; /* index of next result */ volatile u_int16_t isp_lasthdls; /* last handle seed */ volatile u_int16_t isp_mboxtmp[MAX_MAILBOX]; volatile u_int16_t isp_lastmbxcmd; /* last mbox command sent */ /* * Active commands are stored here, indexed by handle functions. */ XS_T **isp_xflist; /* * request/result queue pointers and dma handles for them. */ caddr_t isp_rquest; caddr_t isp_result; u_int32_t isp_rquest_dma; u_int32_t isp_result_dma; } ispsoftc_t; #define SDPARAM(isp) ((sdparam *) (isp)->isp_param) #define FCPARAM(isp) ((fcparam *) (isp)->isp_param) /* * ISP Driver Run States */ #define ISP_NILSTATE 0 #define ISP_RESETSTATE 1 #define ISP_INITSTATE 2 #define ISP_RUNSTATE 3 /* * ISP Configuration Options */ #define ISP_CFG_NORELOAD 0x80 /* don't download f/w */ #define ISP_CFG_NONVRAM 0x40 /* ignore NVRAM */ #define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */ #define ISP_CFG_OWNWWN 0x02 /* override NVRAM wwn */ #define ISP_CFG_PORT_PREF 0x0C /* Mask for Port Prefs (2200 only) */ #define ISP_CFG_LPORT 0x00 /* prefer {N/F}L-Port connection */ #define ISP_CFG_NPORT 0x04 /* prefer {N/F}-Port connection */ #define ISP_CFG_NPORT_ONLY 0x08 /* insist on {N/F}-Port connection */ #define ISP_CFG_LPORT_ONLY 0x0C /* insist on {N/F}L-Port connection */ /* * Prior to calling isp_reset for the first time, the outer layer * should set isp_role to one of NONE, INITIATOR, TARGET, BOTH. * * If you set ISP_ROLE_NONE, the cards will be reset, new firmware loaded, * NVRAM read, and defaults set, but any further initialization (e.g. * INITIALIZE CONTROL BLOCK commands for 2X00 cards) won't be done. * * If INITIATOR MODE isn't set, attempts to run commands will be stopped * at isp_start and completed with the moral equivalent of SELECTION TIMEOUT. * * If TARGET MODE is set, it doesn't mean that the rest of target mode support * needs to be enabled, or will even work. What happens with the 2X00 cards * here is that if you have enabled it with TARGET MODE as part of the ICB * options, but you haven't given the f/w any ram resources for ATIOs or * Immediate Notifies, the f/w just handles what it can and you never see * anything. Basically, it sends a single byte of data (the first byte, * which you can set as part of the INITIALIZE CONTROL BLOCK command) for * INQUIRY, and sends back QUEUE FULL status for any other command. * */ #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_INITIATOR 0x1 #define ISP_ROLE_TARGET 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) #define ISP_ROLE_EITHER ISP_ROLE_BOTH #ifndef ISP_DEFAULT_ROLES #define ISP_DEFAULT_ROLES ISP_ROLE_INITIATOR #endif /* * Firmware related defines */ #define ISP_CODE_ORG 0x1000 /* default f/w code start */ #define ISP_FW_REV(maj, min, mic) ((maj << 24) | (min << 16) | mic) #define ISP_FW_REVX(xp) ((xp[0]<<24) | (xp[1] << 16) | xp[2]) /* * Bus (implementation) types */ #define ISP_BT_PCI 0 /* PCI Implementations */ #define ISP_BT_SBUS 1 /* SBus Implementations */ /* * Chip Types */ #define ISP_HA_SCSI 0xf #define ISP_HA_SCSI_UNKNOWN 0x1 #define ISP_HA_SCSI_1020 0x2 #define ISP_HA_SCSI_1020A 0x3 #define ISP_HA_SCSI_1040 0x4 #define ISP_HA_SCSI_1040A 0x5 #define ISP_HA_SCSI_1040B 0x6 #define ISP_HA_SCSI_1040C 0x7 #define ISP_HA_SCSI_1240 0x8 #define ISP_HA_SCSI_1080 0x9 #define ISP_HA_SCSI_1280 0xa #define ISP_HA_SCSI_12160 0xb #define ISP_HA_FC 0xf0 #define ISP_HA_FC_2100 0x10 #define ISP_HA_FC_2200 0x20 +#define ISP_HA_FC_2300 0x30 #define IS_SCSI(isp) (isp->isp_type & ISP_HA_SCSI) #define IS_1240(isp) (isp->isp_type == ISP_HA_SCSI_1240) #define IS_1080(isp) (isp->isp_type == ISP_HA_SCSI_1080) #define IS_1280(isp) (isp->isp_type == ISP_HA_SCSI_1280) #define IS_12160(isp) (isp->isp_type == ISP_HA_SCSI_12160) #define IS_12X0(isp) (IS_1240(isp) || IS_1280(isp)) #define IS_DUALBUS(isp) (IS_12X0(isp) || IS_12160(isp)) #define IS_ULTRA2(isp) (IS_1080(isp) || IS_1280(isp) || IS_12160(isp)) #define IS_ULTRA3(isp) (IS_12160(isp)) -#define IS_FC(isp) (isp->isp_type & ISP_HA_FC) -#define IS_2100(isp) (isp->isp_type == ISP_HA_FC_2100) -#define IS_2200(isp) (isp->isp_type == ISP_HA_FC_2200) +#define IS_FC(isp) ((isp)->isp_type & ISP_HA_FC) +#define IS_2100(isp) ((isp)->isp_type == ISP_HA_FC_2100) +#define IS_2200(isp) ((isp)->isp_type == ISP_HA_FC_2200) +#define IS_2300(isp) ((isp)->isp_type == ISP_HA_FC_2300) + +/* 2300 Support isn't ready yet */ +#define ISP_DISABLE_2300_SUPPORT 1 /* * DMA cookie macros */ #define DMA_MSW(x) (((x) >> 16) & 0xffff) #define DMA_LSW(x) (((x) & 0xffff)) /* * Core System Function Prototypes */ /* * Reset Hardware. Totally. Assumes that you'll follow this with * a call to isp_init. */ void isp_reset(struct ispsoftc *); /* * Initialize Hardware to known state */ void isp_init(struct ispsoftc *); /* * Reset the ISP and call completion for any orphaned commands. */ void isp_reinit(struct ispsoftc *); /* * Interrupt Service Routine */ int isp_intr(void *); /* * Command Entry Point- Platform Dependent layers call into this */ int isp_start(XS_T *); /* these values are what isp_start returns */ #define CMD_COMPLETE 101 /* command completed */ #define CMD_EAGAIN 102 /* busy- maybe retry later */ #define CMD_QUEUED 103 /* command has been queued for execution */ #define CMD_RQLATER 104 /* requeue this command later */ /* * Command Completion Point- Core layers call out from this with completed cmds */ void isp_done(XS_T *); /* * Platform Dependent to External to Internal Control Function * * Assumes locks are held on entry. You should note that with many of * these commands and locks may be released while this is occurring. * * A few notes about some of these functions: * * ISPCTL_FCLINK_TEST tests to make sure we have good fibre channel link. * The argument is a pointer to an integer which is the time, in microseconds, * we should wait to see whether we have good link. This test, if successful, * lets us know our connection topology and our Loop ID/AL_PA and so on. * You can't get anywhere without this. * * ISPCTL_SCAN_FABRIC queries the name server (if we're on a fabric) for * all entities using the FC Generic Services subcommand GET ALL NEXT. * For each found entity, an ISPASYNC_FABRICDEV event is generated (see * below). * * ISPCTL_SCAN_LOOP does a local loop scan. This is only done if the connection * topology is NL or FL port (private or public loop). Since the Qlogic f/w * 'automatically' manages local loop connections, this function essentially * notes the arrival, departure, and possible shuffling around of local loop * entities. Thus for each arrival and departure this generates an isp_async * event of ISPASYNC_PROMENADE (see below). * * ISPCTL_PDB_SYNC is somewhat misnamed. It actually is the final step, in * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, and ISPCTL_SCAN_LOOP. * The main purpose of ISPCTL_PDB_SYNC is to complete management of logging * and logging out of fabric devices (if one is on a fabric) and then marking * the 'loop state' as being ready to now be used for sending commands to * devices. Originally fabric name server and local loop scanning were * part of this function. It's now been seperated to allow for finer control. */ typedef enum { ISPCTL_RESET_BUS, /* Reset Bus */ ISPCTL_RESET_DEV, /* Reset Device */ ISPCTL_ABORT_CMD, /* Abort Command */ ISPCTL_UPDATE_PARAMS, /* Update Operating Parameters (SCSI) */ ISPCTL_FCLINK_TEST, /* Test FC Link Status */ ISPCTL_SCAN_FABRIC, /* (Re)scan Fabric Name Server */ ISPCTL_SCAN_LOOP, /* (Re)scan Local Loop */ ISPCTL_PDB_SYNC, /* Synchronize Port Database */ ISPCTL_SEND_LIP, /* Send a LIP */ ISPCTL_GET_POSMAP, /* Get FC-AL position map */ ISPCTL_RUN_MBOXCMD, /* run a mailbox command */ ISPCTL_TOGGLE_TMODE /* toggle target mode */ } ispctl_t; int isp_control(struct ispsoftc *, ispctl_t, void *); /* * Platform Dependent to Internal to External Control Function * (each platform must provide such a function) * * Assumes locks are held. * * A few notes about some of these functions: * * ISPASYNC_CHANGE_NOTIFY notifies the outer layer that a change has * occurred that invalidates the list of fabric devices known and/or * the list of known loop devices. The argument passed is a pointer * whose values are defined below (local loop change, name server * change, other). 'Other' may simply be a LIP, or a change in * connection topology. * * ISPASYNC_FABRIC_DEV announces the next element in a list of * fabric device names we're getting out of the name server. The * argument points to a GET ALL NEXT response structure. The list * is known to terminate with an entry that refers to ourselves. * One of the main purposes of this function is to allow outer * layers, which are OS dependent, to set policy as to which fabric * devices might actually be logged into (and made visible) later * at ISPCTL_PDB_SYNC time. Since there's a finite number of fabric * devices that we can log into (256 less 3 'reserved' for F-port * topologies), and fabrics can grow up to 8 million or so entries * (24 bits of Port Address, less a wad of reserved spaces), clearly * we had better let the OS determine login policy. * * ISPASYNC_PROMENADE has an argument that is a pointer to an integer which * is an index into the portdb in the softc ('target'). Whether that entrie's * valid tag is set or not says whether something has arrived or departed. * The name refers to a favorite pastime of many city dwellers- watching * people come and go, talking of Michaelangelo, and so on.. * * ISPASYNC_UNHANDLED_RESPONSE gives outer layers a chance to parse a * response queue entry not otherwise handled. The outer layer should * return non-zero if it handled it. The 'arg' points to a (possibly only * partially) massaged response queue entry (see the platform's * ISP_UNSWIZZLE_RESPONSE macro). */ typedef enum { ISPASYNC_NEW_TGT_PARAMS, /* New Target Parameters Negotiated */ ISPASYNC_BUS_RESET, /* Bus Was Reset */ ISPASYNC_LOOP_DOWN, /* FC Loop Down */ ISPASYNC_LOOP_UP, /* FC Loop Up */ + ISPASYNC_LIP, /* LIP Received */ + ISPASYNC_LOOP_RESET, /* Loop Reset Received */ ISPASYNC_CHANGE_NOTIFY, /* FC Change Notification */ ISPASYNC_FABRIC_DEV, /* FC Fabric Device Arrival */ ISPASYNC_PROMENADE, /* FC Objects coming && going */ ISPASYNC_TARGET_MESSAGE, /* target message */ ISPASYNC_TARGET_EVENT, /* target asynchronous event */ ISPASYNC_TARGET_ACTION, /* other target command action */ ISPASYNC_CONF_CHANGE, /* Platform Configuration Change */ ISPASYNC_UNHANDLED_RESPONSE /* Unhandled Response Entry */ } ispasync_t; int isp_async(struct ispsoftc *, ispasync_t, void *); #define ISPASYNC_CHANGE_PDB ((void *) 0) #define ISPASYNC_CHANGE_SNS ((void *) 1) #define ISPASYNC_CHANGE_OTHER ((void *) 2) /* * Platform Dependent Error and Debug Printout */ #ifdef __GNUC__ void isp_prt(struct ispsoftc *, int level, const char *, ...) __attribute__((__format__(__printf__,3,4))); #else void isp_prt(struct ispsoftc *, int level, const char *, ...); #endif #define ISP_LOGALL 0x0 /* log always */ #define ISP_LOGCONFIG 0x1 /* log configuration messages */ #define ISP_LOGINFO 0x2 /* log informational messages */ #define ISP_LOGWARN 0x4 /* log warning messages */ #define ISP_LOGERR 0x8 /* log error messages */ #define ISP_LOGDEBUG0 0x10 /* log simple debug messages */ #define ISP_LOGDEBUG1 0x20 /* log intermediate debug messages */ #define ISP_LOGDEBUG2 0x40 /* log most debug messages */ #define ISP_LOGDEBUG3 0x100 /* log high frequency debug messages */ #define ISP_LOGTDEBUG0 0x200 /* log simple debug messages (target mode) */ #define ISP_LOGTDEBUG1 0x400 /* log intermediate debug messages (target) */ #define ISP_LOGTDEBUG2 0x800 /* log all debug messages (target) */ /* * Each Platform provides it's own isposinfo substructure of the ispsoftc * defined above. * * Each platform must also provide the following macros/defines: * * * INLINE - platform specific define for 'inline' functions * * ISP2100_SCRLEN - length for the Fibre Channel scratch DMA area * * MEMZERO(dst, src) platform zeroing function * MEMCPY(dst, src, count) platform copying function * SNPRINTF(buf, bufsize, fmt, ...) snprintf * STRNCAT(dstbuf, size, srcbuf) strncat * USEC_DELAY(usecs) microsecond spindelay function * USEC_SLEEP(isp, usecs) microsecond sleep function * * NANOTIME_T nanosecond time type * * GET_NANOTIME(NANOTIME_T *) get current nanotime. * * GET_NANOSEC(NANOTIME_T *) get u_int64_t from NANOTIME_T * * NANOTIME_SUB(NANOTIME_T *, NANOTIME_T *) * subtract two NANOTIME_T values * * * MAXISPREQUEST(struct ispsoftc *) maximum request queue size * for this particular board type * * MEMORYBARRIER(struct ispsoftc *, barrier_type, offset, size) * * Function/Macro the provides memory synchronization on * various objects so that the ISP's and the system's view * of the same object is consistent. * * MBOX_ACQUIRE(struct ispsoftc *) acquire lock on mailbox regs * MBOX_WAIT_COMPLETE(struct ispsoftc *) wait for mailbox cmd to be done * MBOX_NOTIFY_COMPLETE(struct ispsoftc *) notification of mbox cmd donee * MBOX_RELEASE(struct ispsoftc *) release lock on mailbox regs * * * SCSI_GOOD SCSI 'Good' Status * SCSI_CHECK SCSI 'Check Condition' Status * SCSI_BUSY SCSI 'Busy' Status * SCSI_QFULL SCSI 'Queue Full' Status * * XS_T Platform SCSI transaction type (i.e., command for HBA) * XS_ISP(xs) gets an instance out of an XS_T * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" * XS_TGT(xs) gets the target "" * XS_LUN(xs) gets the lun "" * XS_CDBP(xs) gets a pointer to the scsi CDB "" * XS_CDBLEN(xs) gets the CDB's length "" * XS_XFRLEN(xs) gets the associated data transfer length "" * XS_TIME(xs) gets the time (in milliseconds) for this command * XS_RESID(xs) gets the current residual count * XS_STSP(xs) gets a pointer to the SCSI status byte "" * XS_SNSP(xs) gets a pointer to the associate sense data * XS_SNSLEN(xs) gets the length of sense data storage * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key * XS_TAG_P(xs) predicate of whether this command should be tagged * XS_TAG_TYPE(xs) which type of tag to use * XS_SETERR(xs) set error state * * HBA_NOERROR command has no erros * HBA_BOTCH hba botched something * HBA_CMDTIMEOUT command timed out * HBA_SELTIMEOUT selection timed out (also port logouts for FC) * HBA_TGTBSY target returned a BUSY status * HBA_BUSRESET bus reset destroyed command * HBA_ABORTED command was aborted (by request) * HBA_DATAOVR a data overrun was detected * HBA_ARQFAIL Automatic Request Sense failed * * XS_ERR(xs) return current error state * XS_NOERR(xs) there is no error currently set * XS_INITERR(xs) initialize error state * * XS_SAVE_SENSE(xs, sp) save sense data * * XS_SET_STATE_STAT(isp, sp, xs) platform dependent interpreter of * response queue entry status bits * * * DEFAULT_IID(struct ispsoftc *) Default SCSI initiator ID * DEFAULT_LOOPID(struct ispsoftc *) Default FC Loop ID * DEFAULT_NODEWWN(struct ispsoftc *) Default Node WWN * DEFAULT_PORTWWN(struct ispsoftc *) Default Port WWN * These establish reasonable defaults for each platform. * These must be available independent of card NVRAM and are * to be used should NVRAM not be readable. * * ISP_NODEWWN(struct ispsoftc *) FC Node WWN to use * ISP_PORTWWN(struct ispsoftc *) FC Port WWN to use * * These are to be used after NVRAM is read. The tags * in fcparam.isp_{node,port}wwn reflect the values * read from NVRAM (possibly corrected for card botches). * Each platform can take that information and override * it or ignore and return the Node and Port WWNs to be * used when sending the Qlogic f/w the Initialization Control * Block. * * (XXX these do endian specific transformations- in transition XXX) * ISP_SWIZZLE_ICB * ISP_UNSWIZZLE_AND_COPY_PDBP * ISP_SWIZZLE_CONTINUATION * ISP_SWIZZLE_REQUEST * ISP_UNSWIZZLE_RESPONSE * ISP_SWIZZLE_SNS_REQ * ISP_UNSWIZZLE_SNS_RSP * ISP_SWIZZLE_NVRAM_WORD - * - * */ + #endif /* _ISPVAR_H */