Index: stable/11/sys/cam/scsi/scsi_low.c =================================================================== --- stable/11/sys/cam/scsi/scsi_low.c (revision 314380) +++ stable/11/sys/cam/scsi/scsi_low.c (revision 314381) @@ -1,4209 +1,4200 @@ /* $NecBSD: scsi_low.c,v 1.24.10.8 2001/06/26 07:39:44 honda Exp $ */ /* $NetBSD$ */ #include __FBSDID("$FreeBSD$"); #define SCSI_LOW_STATICS #define SCSI_LOW_DEBUG #define SCSI_LOW_NEGOTIATE_BEFORE_SENSE #define SCSI_LOW_START_UP_CHECK /* #define SCSI_LOW_INFO_DETAIL */ /* #define SCSI_LOW_QCLEAR_AFTER_CA */ /* #define SCSI_LOW_FLAGS_QUIRKS_OK */ #define SCSI_LOW_FLAGS_QUIRKS_OK /*- * [NetBSD for NEC PC-98 series] * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * NetBSD/pc98 porting staff. All rights reserved. * Copyright (c) 1995, 1996, 1997, 1998, 1999, 2000, 2001 * Naofumi HONDA. All rights reserved. * * [Ported for FreeBSD CAM] * Copyright (c) 2000, 2001 * MITSUNAGA Noriaki, NOKUBI Hirotaka and TAKAHASHI Yoshihiro. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * When our host is reselected, * nexus establish processes are little complicated. * Normal steps are followings: * 1) Our host selected by target => target nexus (slp->sl_Tnexus) * 2) Identify msgin => lun nexus (slp->sl_Lnexus) * 3) Qtag msg => ccb nexus (slp->sl_Qnexus) */ #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /************************************************************** * Constants **************************************************************/ #define SCSI_LOW_POLL_HZ 1000 /* functions return values */ #define SCSI_LOW_START_NO_QTAG 0 #define SCSI_LOW_START_QTAG 1 #define SCSI_LOW_DONE_COMPLETE 0 #define SCSI_LOW_DONE_RETRY 1 /* internal disk flags */ #define SCSI_LOW_DISK_DISC 0x00000001 #define SCSI_LOW_DISK_QTAG 0x00000002 #define SCSI_LOW_DISK_LINK 0x00000004 #define SCSI_LOW_DISK_PARITY 0x00000008 #define SCSI_LOW_DISK_SYNC 0x00010000 #define SCSI_LOW_DISK_WIDE_16 0x00020000 #define SCSI_LOW_DISK_WIDE_32 0x00040000 #define SCSI_LOW_DISK_WIDE (SCSI_LOW_DISK_WIDE_16 | SCSI_LOW_DISK_WIDE_32) #define SCSI_LOW_DISK_LFLAGS 0x0000ffff #define SCSI_LOW_DISK_TFLAGS 0xffff0000 static MALLOC_DEFINE(M_SCSILOW, "SCSI low", "SCSI low buffers"); /************************************************************** * Declarations **************************************************************/ /* static */ void scsi_low_info(struct scsi_low_softc *, struct targ_info *, u_char *); static void scsi_low_engage(void *); static struct slccb *scsi_low_establish_ccb(struct targ_info *, struct lun_info *, scsi_low_tag_t); static int scsi_low_done(struct scsi_low_softc *, struct slccb *); static int scsi_low_setup_done(struct scsi_low_softc *, struct slccb *); static void scsi_low_bus_release(struct scsi_low_softc *, struct targ_info *); static void scsi_low_twiddle_wait(void); static struct lun_info *scsi_low_alloc_li(struct targ_info *, int, int); static struct targ_info *scsi_low_alloc_ti(struct scsi_low_softc *, int); static void scsi_low_calcf_lun(struct lun_info *); static void scsi_low_calcf_target(struct targ_info *); static void scsi_low_calcf_show(struct lun_info *); static void scsi_low_reset_nexus(struct scsi_low_softc *, int); static void scsi_low_reset_nexus_target(struct scsi_low_softc *, struct targ_info *, int); static void scsi_low_reset_nexus_lun(struct scsi_low_softc *, struct lun_info *, int); static int scsi_low_init(struct scsi_low_softc *, u_int); static void scsi_low_start(struct scsi_low_softc *); static void scsi_low_free_ti(struct scsi_low_softc *); static int scsi_low_alloc_qtag(struct slccb *); static int scsi_low_dealloc_qtag(struct slccb *); static int scsi_low_enqueue(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *, u_int, u_int); static int scsi_low_message_enqueue(struct scsi_low_softc *, struct targ_info *, struct lun_info *, u_int); static void scsi_low_unit_ready_cmd(struct slccb *); static void scsi_low_timeout(void *); static int scsi_low_timeout_check(struct scsi_low_softc *); #ifdef SCSI_LOW_START_UP_CHECK static int scsi_low_start_up(struct scsi_low_softc *); #endif /* SCSI_LOW_START_UP_CHECK */ static int scsi_low_abort_ccb(struct scsi_low_softc *, struct slccb *); static struct slccb *scsi_low_revoke_ccb(struct scsi_low_softc *, struct slccb *, int); int scsi_low_version_major = 2; int scsi_low_version_minor = 17; static struct scsi_low_softc_tab sl_tab = LIST_HEAD_INITIALIZER(sl_tab); static struct mtx sl_tab_lock; MTX_SYSINIT(sl_tab_lock, &sl_tab_lock, "scsi low table", MTX_DEF); /************************************************************** * Debug, Run test and Statics **************************************************************/ #ifdef SCSI_LOW_INFO_DETAIL #define SCSI_LOW_INFO(slp, ti, s) scsi_low_info((slp), (ti), (s)) #else /* !SCSI_LOW_INFO_DETAIL */ #define SCSI_LOW_INFO(slp, ti, s) device_printf((slp)->sl_dev, "%s\n", (s)) #endif /* !SCSI_LOW_INFO_DETAIL */ #ifdef SCSI_LOW_STATICS static struct scsi_low_statics { int nexus_win; int nexus_fail; int nexus_disconnected; int nexus_reselected; int nexus_conflict; } scsi_low_statics; #endif /* SCSI_LOW_STATICS */ #ifdef SCSI_LOW_DEBUG #define SCSI_LOW_DEBUG_DONE 0x00001 #define SCSI_LOW_DEBUG_DISC 0x00002 #define SCSI_LOW_DEBUG_SENSE 0x00004 #define SCSI_LOW_DEBUG_CALCF 0x00008 #define SCSI_LOW_DEBUG_ACTION 0x10000 int scsi_low_debug = 0; #define SCSI_LOW_MAX_ATTEN_CHECK 32 #define SCSI_LOW_ATTEN_CHECK 0x0001 #define SCSI_LOW_CMDLNK_CHECK 0x0002 #define SCSI_LOW_ABORT_CHECK 0x0004 #define SCSI_LOW_NEXUS_CHECK 0x0008 int scsi_low_test = 0; int scsi_low_test_id = 0; static void scsi_low_test_abort(struct scsi_low_softc *, struct targ_info *, struct lun_info *); static void scsi_low_test_cmdlnk(struct scsi_low_softc *, struct slccb *); static void scsi_low_test_atten(struct scsi_low_softc *, struct targ_info *, u_int); #define SCSI_LOW_DEBUG_TEST_GO(fl, id) \ ((scsi_low_test & (fl)) != 0 && (scsi_low_test_id & (1 << (id))) == 0) #define SCSI_LOW_DEBUG_GO(fl, id) \ ((scsi_low_debug & (fl)) != 0 && (scsi_low_test_id & (1 << (id))) == 0) #endif /* SCSI_LOW_DEBUG */ /************************************************************** * CCB **************************************************************/ GENERIC_CCB_STATIC_ALLOC(scsi_low, slccb) GENERIC_CCB(scsi_low, slccb, ccb_chain) /************************************************************** * Inline functions **************************************************************/ #define SCSI_LOW_INLINE static __inline SCSI_LOW_INLINE void scsi_low_activate_qtag(struct slccb *); SCSI_LOW_INLINE void scsi_low_deactivate_qtag(struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_assert(struct slccb *, u_int); SCSI_LOW_INLINE void scsi_low_ccb_message_exec(struct scsi_low_softc *, struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_retry(struct slccb *); SCSI_LOW_INLINE void scsi_low_ccb_message_clear(struct slccb *); SCSI_LOW_INLINE void scsi_low_init_msgsys(struct scsi_low_softc *, struct targ_info *); SCSI_LOW_INLINE void scsi_low_activate_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; if (cb->ccb_tag != SCSI_LOW_UNKTAG) return; li->li_nqio ++; cb->ccb_tag = cb->ccb_otag; } SCSI_LOW_INLINE void scsi_low_deactivate_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; if (cb->ccb_tag == SCSI_LOW_UNKTAG) return; li->li_nqio --; cb->ccb_tag = SCSI_LOW_UNKTAG; } SCSI_LOW_INLINE void scsi_low_ccb_message_exec(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { scsi_low_assert_msg(slp, cb->ti, cb->ccb_msgoutflag, 0); cb->ccb_msgoutflag = 0; } SCSI_LOW_INLINE void scsi_low_ccb_message_assert(cb, msg) struct slccb *cb; u_int msg; { cb->ccb_msgoutflag = cb->ccb_omsgoutflag = msg; } SCSI_LOW_INLINE void scsi_low_ccb_message_retry(cb) struct slccb *cb; { cb->ccb_msgoutflag = cb->ccb_omsgoutflag; } SCSI_LOW_INLINE void scsi_low_ccb_message_clear(cb) struct slccb *cb; { cb->ccb_msgoutflag = 0; } SCSI_LOW_INLINE void scsi_low_init_msgsys(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { ti->ti_msginptr = 0; ti->ti_emsgflags = ti->ti_msgflags = ti->ti_omsgflags = 0; SCSI_LOW_DEASSERT_ATN(slp); SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_NULL); } /*============================================================= * START OF OS switch (All OS depend fucntions should be here) =============================================================*/ /* common os depend utitlities */ #define SCSI_LOW_CMD_RESIDUAL_CHK 0x0001 #define SCSI_LOW_CMD_ORDERED_QTAG 0x0002 #define SCSI_LOW_CMD_ABORT_WARNING 0x0004 static u_int8_t scsi_low_cmd_flags[256] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /*0*/ 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0, 0, 0, 0, /*1*/ 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, /*2*/ 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 5, 0, 0, 0, 5, 5, /*3*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, }; struct scsi_low_error_code { int error_bits; int error_code; }; static struct slccb *scsi_low_find_ccb(struct scsi_low_softc *, u_int, u_int, void *); static int scsi_low_translate_error_code(struct slccb *, struct scsi_low_error_code *); static struct slccb * scsi_low_find_ccb(slp, target, lun, osdep) struct scsi_low_softc *slp; u_int target, lun; void *osdep; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; ti = slp->sl_ti[target]; li = scsi_low_alloc_li(ti, lun, 0); if (li == NULL) return NULL; if ((cb = slp->sl_Qnexus) != NULL && cb->osdep == osdep) return cb; TAILQ_FOREACH(cb, &slp->sl_start, ccb_chain) { if (cb->osdep == osdep) return cb; } TAILQ_FOREACH(cb, &li->li_discq, ccb_chain) { if (cb->osdep == osdep) return cb; } return NULL; } static int scsi_low_translate_error_code(cb, tp) struct slccb *cb; struct scsi_low_error_code *tp; { if (cb->ccb_error == 0) return tp->error_code; for (tp ++; (cb->ccb_error & tp->error_bits) == 0; tp ++) ; return tp->error_code; } /************************************************************** * SCSI INTERFACE (CAM) **************************************************************/ #define SCSI_LOW_MALLOC(size) malloc((size), M_SCSILOW, M_NOWAIT) #define SCSI_LOW_FREE(pt) free((pt), M_SCSILOW) #define SCSI_LOW_ALLOC_CCB(flags) scsi_low_get_ccb() static void scsi_low_poll_cam(struct cam_sim *); void scsi_low_scsi_action_cam(struct cam_sim *, union ccb *); static int scsi_low_attach_cam(struct scsi_low_softc *); static int scsi_low_detach_cam(struct scsi_low_softc *); static int scsi_low_ccb_setup_cam(struct scsi_low_softc *, struct slccb *); static int scsi_low_done_cam(struct scsi_low_softc *, struct slccb *); struct scsi_low_error_code scsi_low_error_code_cam[] = { {0, CAM_REQ_CMP}, {SENSEIO, CAM_AUTOSNS_VALID | CAM_REQ_CMP_ERR}, {SENSEERR, CAM_AUTOSENSE_FAIL}, {UACAERR, CAM_SCSI_STATUS_ERROR}, {BUSYERR | STATERR, CAM_SCSI_STATUS_ERROR}, {SELTIMEOUTIO, CAM_SEL_TIMEOUT}, {TIMEOUTIO, CAM_CMD_TIMEOUT}, {PDMAERR, CAM_DATA_RUN_ERR}, {PARITYERR, CAM_UNCOR_PARITY}, {UBFERR, CAM_UNEXP_BUSFREE}, {ABORTIO, CAM_REQ_ABORTED}, {-1, CAM_UNREC_HBA_ERROR} }; #define SIM2SLP(sim) ((struct scsi_low_softc *) cam_sim_softc((sim))) /* XXX: * Please check a polling hz, currently we assume scsi_low_poll() is * called each 1 ms. */ #define SCSI_LOW_CAM_POLL_HZ 1000 /* OK ? */ static void scsi_low_poll_cam(sim) struct cam_sim *sim; { struct scsi_low_softc *slp = SIM2SLP(sim); SCSI_LOW_ASSERT_LOCKED(slp); (*slp->sl_funcs->scsi_low_poll) (slp); if (slp->sl_poll_count ++ >= SCSI_LOW_CAM_POLL_HZ / SCSI_LOW_TIMEOUT_HZ) { slp->sl_poll_count = 0; scsi_low_timeout_check(slp); } } void scsi_low_scsi_action_cam(sim, ccb) struct cam_sim *sim; union ccb *ccb; { struct scsi_low_softc *slp = SIM2SLP(sim); struct targ_info *ti; struct lun_info *li; struct slccb *cb; u_int lun, flags, msg, target; int rv; SCSI_LOW_ASSERT_LOCKED(slp); target = (u_int) (ccb->ccb_h.target_id); lun = (u_int) ccb->ccb_h.target_lun; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_ACTION, target) != 0) { device_printf(slp->sl_dev, "cam_action: func code 0x%x target: %d, lun: %d\n", ccb->ccb_h.func_code, target, lun); } #endif /* SCSI_LOW_DEBUG */ switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD) { device_printf(slp->sl_dev, "invalid target/lun\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ if (((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL)) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } ti = slp->sl_ti[target]; cb->osdep = ccb; cb->bp = NULL; if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) flags = CCB_AUTOSENSE | CCB_SCSIIO; else flags = CCB_SCSIIO; li = scsi_low_alloc_li(ti, lun, 1); if (ti->ti_setup_msg != 0) { scsi_low_message_enqueue(slp, ti, li, CCB_AUTOSENSE); } scsi_low_enqueue(slp, ti, li, cb, flags, 0); #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ABORT_CHECK, target) != 0) { scsi_low_test_abort(slp, ti, li); } #endif /* SCSI_LOW_DEBUG */ break; - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ - /* XXX Implement */ - ccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(ccb); - break; - case XPT_ABORT: /* Abort the specified CCB */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD || lun == CAM_LUN_WILDCARD) { device_printf(slp->sl_dev, "invalid target/lun\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ cb = scsi_low_find_ccb(slp, target, lun, ccb->cab.abort_ccb); rv = scsi_low_abort_ccb(slp, cb); if (rv == 0) ccb->ccb_h.status = CAM_REQ_CMP; else ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; u_int val; #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ cts = &ccb->cts; ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((spi->valid & (CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET)) != 0) { if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { val = spi->bus_width; if (val < ti->ti_width) ti->ti_width = val; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { val = spi->sync_period; if (val == 0 || val > ti->ti_maxsynch.period) ti->ti_maxsynch.period = val; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { val = spi->sync_offset; if (val < ti->ti_maxsynch.offset) ti->ti_maxsynch.offset = val; } ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_QUIRKS_VALID; scsi_low_calcf_target(ti); } if ((spi->valid & CTS_SPI_FLAGS_DISC_ENB) != 0 || (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { li = scsi_low_alloc_li(ti, lun, 1); if (spi->valid & CTS_SPI_FLAGS_DISC_ENB) { li->li_quirks |= SCSI_LOW_DISK_DISC; } else { li->li_quirks &= ~SCSI_LOW_DISK_DISC; } if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { li->li_quirks |= SCSI_LOW_DISK_QTAG; } else { li->li_quirks &= ~SCSI_LOW_DISK_QTAG; } li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_QUIRKS_VALID; scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); if ((slp->sl_show_result & SHOW_CALCF_RES) != 0) scsi_low_calcf_show(li); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; u_int diskflags; cts = &ccb->cts; #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; li = scsi_low_alloc_li(ti, lun, 1); if (li != NULL && cts->type == CTS_TYPE_CURRENT_SETTINGS) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; #ifdef SCSI_LOW_DIAGNOSTIC if (li->li_flags_valid != SCSI_LOW_LUN_FLAGS_ALL_VALID) { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; device_printf(slp->sl_dev, "invalid GET_TRANS_CURRENT_SETTINGS call\n"); goto settings_out; } #endif /* SCSI_LOW_DIAGNOSTIC */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; diskflags = li->li_diskflags & li->li_cfgflags; if (diskflags & SCSI_LOW_DISK_DISC) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if (diskflags & SCSI_LOW_DISK_QTAG) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; spi->sync_period = ti->ti_maxsynch.period; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->sync_offset = ti->ti_maxsynch.offset; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = ti->ti_width; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; } else ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; settings_out: xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* not yet HN2 */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ #ifdef SCSI_LOW_DIAGNOSTIC if (target == CAM_TARGET_WILDCARD) { device_printf(slp->sl_dev, "invalid target\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } #endif /* SCSI_LOW_DIAGNOSTIC */ msg = SCSI_LOW_MSG_RESET; if (((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL)) { ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } ti = slp->sl_ti[target]; if (lun == CAM_LUN_WILDCARD) lun = 0; cb->osdep = ccb; cb->bp = NULL; if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) flags = CCB_AUTOSENSE | CCB_NORETRY | CCB_URGENT; else flags = CCB_NORETRY | CCB_URGENT; li = scsi_low_alloc_li(ti, lun, 1); scsi_low_enqueue(slp, ti, li, cb, flags, msg); break; case XPT_PATH_INQ: { /* Path routing inquiry */ struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = scsi_low_version_major; cpi->hba_inquiry = PI_TAG_ABLE | PI_LINKED_CDB; ti = slp->sl_ti[slp->sl_hostid]; /* host id */ if (ti->ti_width > SCSI_LOW_BUS_WIDTH_8) cpi->hba_inquiry |= PI_WIDE_16; if (ti->ti_width > SCSI_LOW_BUS_WIDTH_16) cpi->hba_inquiry |= PI_WIDE_32; if (ti->ti_maxsynch.offset > 0) cpi->hba_inquiry |= PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = slp->sl_ntargs - 1; cpi->max_lun = slp->sl_nluns - 1; cpi->initiator_id = slp->sl_hostid; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "SCSI_LOW", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: printf("scsi_low: non support func_code = %d ", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static int scsi_low_attach_cam(slp) struct scsi_low_softc *slp; { struct cam_devq *devq; int tagged_openings; devq = cam_simq_alloc(SCSI_LOW_NCCB); if (devq == NULL) return (ENOMEM); /* * ask the adapter what subunits are present */ tagged_openings = min(slp->sl_openings, SCSI_LOW_MAXNEXUS); slp->sl_sim = cam_sim_alloc(scsi_low_scsi_action_cam, scsi_low_poll_cam, device_get_name(slp->sl_dev), slp, device_get_unit(slp->sl_dev), &slp->sl_lock, slp->sl_openings, tagged_openings, devq); if (slp->sl_sim == NULL) { cam_simq_free(devq); return ENODEV; } SCSI_LOW_LOCK(slp); if (xpt_bus_register(slp->sl_sim, slp->sl_dev, 0) != CAM_SUCCESS) { cam_sim_free(slp->sl_sim, TRUE); SCSI_LOW_UNLOCK(slp); return ENODEV; } if (xpt_create_path(&slp->sl_path, /*periph*/NULL, cam_sim_path(slp->sl_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(slp->sl_sim)); cam_sim_free(slp->sl_sim, /*free_simq*/TRUE); SCSI_LOW_UNLOCK(slp); return ENODEV; } slp->sl_show_result = SHOW_CALCF_RES; /* OK ? */ SCSI_LOW_UNLOCK(slp); return 0; } static int scsi_low_detach_cam(slp) struct scsi_low_softc *slp; { xpt_async(AC_LOST_DEVICE, slp->sl_path, NULL); xpt_free_path(slp->sl_path); xpt_bus_deregister(cam_sim_path(slp->sl_sim)); cam_sim_free(slp->sl_sim, /* free_devq */ TRUE); return 0; } static int scsi_low_ccb_setup_cam(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { union ccb *ccb = (union ccb *) cb->osdep; if ((cb->ccb_flags & CCB_SCSIIO) != 0) { cb->ccb_scp.scp_cmd = ccb->csio.cdb_io.cdb_bytes; cb->ccb_scp.scp_cmdlen = (int) ccb->csio.cdb_len; cb->ccb_scp.scp_data = ccb->csio.data_ptr; cb->ccb_scp.scp_datalen = (int) ccb->csio.dxfer_len; if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) cb->ccb_scp.scp_direction = SCSI_LOW_WRITE; else /* if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) */ cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = ccb->ccb_h.timeout / 1000; } else { scsi_low_unit_ready_cmd(cb); } return SCSI_LOW_START_QTAG; } static int scsi_low_done_cam(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { union ccb *ccb; ccb = (union ccb *) cb->osdep; if (cb->ccb_error == 0) { ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.resid = 0; } else { if (cb->ccb_rcnt >= slp->sl_max_retry) cb->ccb_error |= ABORTIO; if ((cb->ccb_flags & CCB_NORETRY) == 0 && (cb->ccb_error & ABORTIO) == 0) return EJUSTRETURN; if ((cb->ccb_error & SENSEIO) != 0) { memcpy(&ccb->csio.sense_data, &cb->ccb_sense, sizeof(ccb->csio.sense_data)); } ccb->ccb_h.status = scsi_low_translate_error_code(cb, &scsi_low_error_code_cam[0]); #ifdef SCSI_LOW_DIAGNOSTIC if ((cb->ccb_flags & CCB_SILENT) == 0 && cb->ccb_scp.scp_cmdlen > 0 && (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_ABORT_WARNING) != 0) { device_printf(slp->sl_dev, "WARNING: scsi_low IO abort\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DIAGNOSTIC */ } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 0) ccb->ccb_h.status |= CAM_REQ_CMP_ERR; if (cb->ccb_scp.scp_status == ST_UNKNOWN) ccb->csio.scsi_status = 0; /* XXX */ else ccb->csio.scsi_status = cb->ccb_scp.scp_status; if ((cb->ccb_flags & CCB_NOSDONE) == 0) xpt_done(ccb); return 0; } /************************************************************** * scsi low deactivate and activate **************************************************************/ int scsi_low_is_busy(slp) struct scsi_low_softc *slp; { if (slp->sl_nio > 0) return EBUSY; return 0; } int scsi_low_deactivate(slp) struct scsi_low_softc *slp; { slp->sl_flags |= HW_INACTIVE; callout_stop(&slp->sl_timeout_timer); callout_stop(&slp->sl_engage_timer); return 0; } int scsi_low_activate(slp) struct scsi_low_softc *slp; { int error; slp->sl_flags &= ~HW_INACTIVE; if ((error = scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, NULL)) != 0) { slp->sl_flags |= HW_INACTIVE; return error; } slp->sl_timeout_count = 0; callout_reset(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ, scsi_low_timeout, slp); return 0; } /************************************************************** * scsi low log **************************************************************/ #ifdef SCSI_LOW_DIAGNOSTIC static void scsi_low_msg_log_init(struct scsi_low_msg_log *); static void scsi_low_msg_log_write(struct scsi_low_msg_log *, u_int8_t *, int); static void scsi_low_msg_log_show(struct scsi_low_msg_log *, char *, int); static void scsi_low_msg_log_init(slmlp) struct scsi_low_msg_log *slmlp; { slmlp->slml_ptr = 0; } static void scsi_low_msg_log_write(slmlp, datap, len) struct scsi_low_msg_log *slmlp; u_int8_t *datap; int len; { int ptr, ind; if (slmlp->slml_ptr >= SCSI_LOW_MSG_LOG_DATALEN) return; ptr = slmlp->slml_ptr ++; for (ind = 0; ind < sizeof(slmlp->slml_msg[0]) && ind < len; ind ++) slmlp->slml_msg[ptr].msg[ind] = datap[ind]; for ( ; ind < sizeof(slmlp->slml_msg[0]); ind ++) slmlp->slml_msg[ptr].msg[ind] = 0; } static void scsi_low_msg_log_show(slmlp, s, len) struct scsi_low_msg_log *slmlp; char *s; int len; { int ptr, ind; printf("%s: (%d) ", s, slmlp->slml_ptr); for (ptr = 0; ptr < slmlp->slml_ptr; ptr ++) { for (ind = 0; ind < len && ind < sizeof(slmlp->slml_msg[0]); ind ++) { printf("[%x]", (u_int) slmlp->slml_msg[ptr].msg[ind]); } printf(">"); } printf("\n"); } #endif /* SCSI_LOW_DIAGNOSTIC */ /************************************************************** * power control **************************************************************/ static void scsi_low_engage(arg) void *arg; { struct scsi_low_softc *slp = arg; SCSI_LOW_ASSERT_LOCKED(slp); switch (slp->sl_rstep) { case 0: slp->sl_rstep ++; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_ENGAGE); callout_reset(&slp->sl_engage_timer, hz / 1000, scsi_low_engage, slp); break; case 1: slp->sl_rstep ++; slp->sl_flags &= ~HW_RESUME; scsi_low_start(slp); break; case 2: break; } } static int scsi_low_init(slp, flags) struct scsi_low_softc *slp; u_int flags; { int rv = 0; slp->sl_flags |= HW_INITIALIZING; /* clear power control timeout */ if ((slp->sl_flags & HW_POWERCTRL) != 0) { callout_stop(&slp->sl_engage_timer); slp->sl_flags &= ~(HW_POWDOWN | HW_RESUME); slp->sl_active = 1; slp->sl_powc = SCSI_LOW_POWDOWN_TC; } /* reset current nexus */ scsi_low_reset_nexus(slp, flags); if ((slp->sl_flags & HW_INACTIVE) != 0) { rv = EBUSY; goto out; } if (flags != SCSI_LOW_RESTART_SOFT) { rv = ((*slp->sl_funcs->scsi_low_init) (slp, flags)); } out: slp->sl_flags &= ~HW_INITIALIZING; return rv; } /************************************************************** * allocate lun_info **************************************************************/ static struct lun_info * scsi_low_alloc_li(ti, lun, alloc) struct targ_info *ti; int lun; int alloc; { struct scsi_low_softc *slp = ti->ti_sc; struct lun_info *li; li = LIST_FIRST(&ti->ti_litab); if (li != NULL) { if (li->li_lun == lun) return li; while ((li = LIST_NEXT(li, lun_chain)) != NULL) { if (li->li_lun == lun) { LIST_REMOVE(li, lun_chain); LIST_INSERT_HEAD(&ti->ti_litab, li, lun_chain); return li; } } } if (alloc == 0) return li; li = SCSI_LOW_MALLOC(ti->ti_lunsize); if (li == NULL) panic("no lun info mem"); bzero(li, ti->ti_lunsize); li->li_lun = lun; li->li_ti = ti; li->li_cfgflags = SCSI_LOW_SYNC | SCSI_LOW_LINK | SCSI_LOW_DISC | SCSI_LOW_QTAG; li->li_quirks = li->li_diskflags = SCSI_LOW_DISK_LFLAGS; li->li_flags_valid = SCSI_LOW_LUN_FLAGS_USER_VALID; #ifdef SCSI_LOW_FLAGS_QUIRKS_OK li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_QUIRKS_VALID; #endif /* SCSI_LOW_FLAGS_QUIRKS_OK */ li->li_qtagbits = (u_int) -1; TAILQ_INIT(&li->li_discq); LIST_INSERT_HEAD(&ti->ti_litab, li, lun_chain); /* host specific structure initialization per lun */ if (slp->sl_funcs->scsi_low_lun_init != NULL) (*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_ALLOC); scsi_low_calcf_lun(li); return li; } /************************************************************** * allocate targ_info **************************************************************/ static struct targ_info * scsi_low_alloc_ti(slp, targ) struct scsi_low_softc *slp; int targ; { struct targ_info *ti; if (TAILQ_FIRST(&slp->sl_titab) == NULL) TAILQ_INIT(&slp->sl_titab); ti = SCSI_LOW_MALLOC(slp->sl_targsize); if (ti == NULL) panic("%s short of memory", device_get_nameunit(slp->sl_dev)); bzero(ti, slp->sl_targsize); ti->ti_id = targ; ti->ti_sc = slp; slp->sl_ti[targ] = ti; TAILQ_INSERT_TAIL(&slp->sl_titab, ti, ti_chain); LIST_INIT(&ti->ti_litab); ti->ti_quirks = ti->ti_diskflags = SCSI_LOW_DISK_TFLAGS; ti->ti_owidth = SCSI_LOW_BUS_WIDTH_8; ti->ti_flags_valid = SCSI_LOW_TARG_FLAGS_USER_VALID; #ifdef SCSI_LOW_FLAGS_QUIRKS_OK ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_QUIRKS_VALID; #endif /* SCSI_LOW_FLAGS_QUIRKS_OK */ if (slp->sl_funcs->scsi_low_targ_init != NULL) { (*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_ALLOC); } scsi_low_calcf_target(ti); return ti; } static void scsi_low_free_ti(slp) struct scsi_low_softc *slp; { struct targ_info *ti, *tib; struct lun_info *li, *nli; for (ti = TAILQ_FIRST(&slp->sl_titab); ti; ti = tib) { for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = nli) { if (slp->sl_funcs->scsi_low_lun_init != NULL) { (*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_DEALLOC); } nli = LIST_NEXT(li, lun_chain); SCSI_LOW_FREE(li); } if (slp->sl_funcs->scsi_low_targ_init != NULL) { (*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_DEALLOC); } tib = TAILQ_NEXT(ti, ti_chain); SCSI_LOW_FREE(ti); } } /************************************************************** * timeout **************************************************************/ void scsi_low_bus_idle(slp) struct scsi_low_softc *slp; { slp->sl_retry_sel = 0; if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } static void scsi_low_timeout(arg) void *arg; { struct scsi_low_softc *slp = arg; SCSI_LOW_ASSERT_LOCKED(slp); (void) scsi_low_timeout_check(slp); callout_schedule(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ); } static int scsi_low_timeout_check(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb = NULL; /* XXX */ /* selection restart */ if (slp->sl_retry_sel != 0) { slp->sl_retry_sel = 0; if (slp->sl_Tnexus != NULL) goto step1; cb = TAILQ_FIRST(&slp->sl_start); if (cb == NULL) goto step1; if (cb->ccb_selrcnt >= SCSI_LOW_MAX_SELECTION_RETRY) { cb->ccb_flags |= CCB_NORETRY; cb->ccb_error |= SELTIMEOUTIO; if (scsi_low_revoke_ccb(slp, cb, 1) != NULL) panic("%s: ccb not finished", device_get_nameunit(slp->sl_dev)); } if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } /* call hardware timeout */ step1: if (slp->sl_funcs->scsi_low_timeout != NULL) { (*slp->sl_funcs->scsi_low_timeout) (slp); } if (slp->sl_timeout_count ++ < SCSI_LOW_TIMEOUT_CHECK_INTERVAL * SCSI_LOW_TIMEOUT_HZ) return 0; slp->sl_timeout_count = 0; if (slp->sl_nio > 0) { if ((cb = slp->sl_Qnexus) != NULL) { cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } else if (slp->sl_disc == 0) { if ((cb = TAILQ_FIRST(&slp->sl_start)) == NULL) return 0; cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } else for (ti = TAILQ_FIRST(&slp->sl_titab); ti != NULL; ti = TAILQ_NEXT(ti, ti_chain)) { if (ti->ti_disc == 0) continue; for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { for (cb = TAILQ_FIRST(&li->li_discq); cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) { cb->ccb_tc -= SCSI_LOW_TIMEOUT_CHECK_INTERVAL; if (cb->ccb_tc < 0) goto bus_reset; } } } } else if ((slp->sl_flags & HW_POWERCTRL) != 0) { if ((slp->sl_flags & (HW_POWDOWN | HW_RESUME)) != 0) return 0; if (slp->sl_active != 0) { slp->sl_powc = SCSI_LOW_POWDOWN_TC; slp->sl_active = 0; return 0; } slp->sl_powc --; if (slp->sl_powc < 0) { slp->sl_powc = SCSI_LOW_POWDOWN_TC; slp->sl_flags |= HW_POWDOWN; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_POWDOWN); } } return 0; bus_reset: cb->ccb_error |= TIMEOUTIO; device_printf(slp->sl_dev, "slccb (0x%lx) timeout!\n", (u_long) cb); scsi_low_info(slp, NULL, "scsi bus hangup. try to recover."); scsi_low_init(slp, SCSI_LOW_RESTART_HARD); scsi_low_start(slp); return ERESTART; } static int scsi_low_abort_ccb(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti; struct lun_info *li; u_int msg; if (cb == NULL) return EINVAL; if ((cb->ccb_omsgoutflag & (SCSI_LOW_MSG_ABORT | SCSI_LOW_MSG_ABORT_QTAG)) != 0) return EBUSY; ti = cb->ti; li = cb->li; if (cb->ccb_tag == SCSI_LOW_UNKTAG) msg = SCSI_LOW_MSG_ABORT; else msg = SCSI_LOW_MSG_ABORT_QTAG; cb->ccb_error |= ABORTIO; cb->ccb_flags |= CCB_NORETRY; scsi_low_ccb_message_assert(cb, msg); if (cb == slp->sl_Qnexus) { scsi_low_assert_msg(slp, ti, msg, 1); } else if ((cb->ccb_flags & CCB_DISCQ) != 0) { if (scsi_low_revoke_ccb(slp, cb, 0) == NULL) panic("%s: revoked ccb done", device_get_nameunit(slp->sl_dev)); cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); if (slp->sl_Tnexus == NULL) scsi_low_start(slp); } else { if (scsi_low_revoke_ccb(slp, cb, 1) != NULL) panic("%s: revoked ccb retried", device_get_nameunit(slp->sl_dev)); } return 0; } /************************************************************** * Generic SCSI INTERFACE **************************************************************/ int scsi_low_attach(slp, openings, ntargs, nluns, targsize, lunsize) struct scsi_low_softc *slp; int openings, ntargs, nluns, targsize, lunsize; { struct targ_info *ti; struct lun_info *li; int i, nccb, rv; if (ntargs > SCSI_LOW_NTARGETS) { printf("scsi_low: %d targets are too large\n", ntargs); printf("change kernel options SCSI_LOW_NTARGETS"); return EINVAL; } if (openings <= 0) slp->sl_openings = (SCSI_LOW_NCCB / ntargs); else slp->sl_openings = openings; slp->sl_ntargs = ntargs; slp->sl_nluns = nluns; slp->sl_max_retry = SCSI_LOW_MAX_RETRY; if (lunsize < sizeof(struct lun_info)) lunsize = sizeof(struct lun_info); if (targsize < sizeof(struct targ_info)) targsize = sizeof(struct targ_info); slp->sl_targsize = targsize; for (i = 0; i < ntargs; i ++) { ti = scsi_low_alloc_ti(slp, i); ti->ti_lunsize = lunsize; li = scsi_low_alloc_li(ti, 0, 1); } /* initialize queue */ nccb = openings * ntargs; if (nccb >= SCSI_LOW_NCCB || nccb <= 0) nccb = SCSI_LOW_NCCB; scsi_low_init_ccbque(nccb); TAILQ_INIT(&slp->sl_start); /* call os depend attach */ rv = scsi_low_attach_cam(slp); if (rv != 0) { device_printf(slp->sl_dev, "scsi_low_attach: osdep attach failed\n"); return (rv); } /* check hardware */ DELAY(1000); /* wait for 1ms */ SCSI_LOW_LOCK(slp); if (scsi_low_init(slp, SCSI_LOW_RESTART_HARD) != 0) { device_printf(slp->sl_dev, "scsi_low_attach: initialization failed\n"); SCSI_LOW_UNLOCK(slp); return EINVAL; } /* start watch dog */ slp->sl_timeout_count = 0; callout_reset(&slp->sl_timeout_timer, hz / SCSI_LOW_TIMEOUT_HZ, scsi_low_timeout, slp); mtx_lock(&sl_tab_lock); LIST_INSERT_HEAD(&sl_tab, slp, sl_chain); mtx_unlock(&sl_tab_lock); /* fake call */ scsi_low_abort_ccb(slp, scsi_low_find_ccb(slp, 0, 0, NULL)); #ifdef SCSI_LOW_START_UP_CHECK /* probing devices */ scsi_low_start_up(slp); #endif /* SCSI_LOW_START_UP_CHECK */ SCSI_LOW_UNLOCK(slp); return 0; } int scsi_low_detach(slp) struct scsi_low_softc *slp; { int rv; SCSI_LOW_LOCK(slp); if (scsi_low_is_busy(slp) != 0) { SCSI_LOW_UNLOCK(slp); return EBUSY; } scsi_low_deactivate(slp); rv = scsi_low_detach_cam(slp); if (rv != 0) { SCSI_LOW_UNLOCK(slp); return EBUSY; } scsi_low_free_ti(slp); SCSI_LOW_UNLOCK(slp); callout_drain(&slp->sl_timeout_timer); callout_drain(&slp->sl_engage_timer); mtx_lock(&sl_tab_lock); LIST_REMOVE(slp, sl_chain); mtx_unlock(&sl_tab_lock); return 0; } /************************************************************** * Generic enqueue **************************************************************/ static int scsi_low_enqueue(slp, ti, li, cb, flags, msg) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; u_int flags, msg; { cb->ti = ti; cb->li = li; scsi_low_ccb_message_assert(cb, msg); cb->ccb_otag = cb->ccb_tag = SCSI_LOW_UNKTAG; scsi_low_alloc_qtag(cb); cb->ccb_flags = flags | CCB_STARTQ; cb->ccb_tc = cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; cb->ccb_error |= PENDINGIO; if ((flags & CCB_URGENT) != 0) { TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); } else { TAILQ_INSERT_TAIL(&slp->sl_start, cb, ccb_chain); } slp->sl_nio ++; if (slp->sl_Tnexus == NULL) scsi_low_start(slp); return 0; } static int scsi_low_message_enqueue(slp, ti, li, flags) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; u_int flags; { struct slccb *cb; u_int tmsgflags; tmsgflags = ti->ti_setup_msg; ti->ti_setup_msg = 0; flags |= CCB_NORETRY; if ((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL) return ENOMEM; cb->osdep = NULL; cb->bp = NULL; scsi_low_enqueue(slp, ti, li, cb, flags, tmsgflags); return 0; } /************************************************************** * Generic Start & Done **************************************************************/ #define SLSC_MODE_SENSE_SHORT 0x1a static u_int8_t ss_cmd[6] = {START_STOP, 0, 0, 0, SSS_START, 0}; static u_int8_t sms_cmd[6] = {SLSC_MODE_SENSE_SHORT, 0x08, 0x0a, 0, sizeof(struct scsi_low_mode_sense_data), 0}; static u_int8_t inq_cmd[6] = {INQUIRY, 0, 0, 0, sizeof(struct scsi_low_inq_data), 0}; static u_int8_t unit_ready_cmd[6]; static int scsi_low_setup_start(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *); static int scsi_low_sense_abort_start(struct scsi_low_softc *, struct targ_info *, struct lun_info *, struct slccb *); static int scsi_low_resume(struct scsi_low_softc *); static void scsi_low_unit_ready_cmd(cb) struct slccb *cb; { cb->ccb_scp.scp_cmd = unit_ready_cmd; cb->ccb_scp.scp_cmdlen = sizeof(unit_ready_cmd); cb->ccb_scp.scp_datalen = 0; cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; } static int scsi_low_sense_abort_start(slp, ti, li, cb) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; { cb->ccb_scp.scp_cmdlen = 6; bzero(cb->ccb_scsi_cmd, cb->ccb_scp.scp_cmdlen); cb->ccb_scsi_cmd[0] = REQUEST_SENSE; cb->ccb_scsi_cmd[4] = sizeof(cb->ccb_sense); cb->ccb_scp.scp_cmd = cb->ccb_scsi_cmd; cb->ccb_scp.scp_data = (u_int8_t *) &cb->ccb_sense; cb->ccb_scp.scp_datalen = sizeof(cb->ccb_sense); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; scsi_low_ccb_message_clear(cb); if ((cb->ccb_flags & CCB_CLEARQ) != 0) { scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } else { bzero(&cb->ccb_sense, sizeof(cb->ccb_sense)); #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE scsi_low_assert_msg(slp, ti, ti->ti_setup_msg_done, 0); #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ } return SCSI_LOW_START_NO_QTAG; } static int scsi_low_setup_start(slp, ti, li, cb) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; struct slccb *cb; { switch(li->li_state) { case SCSI_LOW_LUN_SLEEP: scsi_low_unit_ready_cmd(cb); break; case SCSI_LOW_LUN_START: cb->ccb_scp.scp_cmd = ss_cmd; cb->ccb_scp.scp_cmdlen = sizeof(ss_cmd); cb->ccb_scp.scp_datalen = 0; cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 30; break; case SCSI_LOW_LUN_INQ: cb->ccb_scp.scp_cmd = inq_cmd; cb->ccb_scp.scp_cmdlen = sizeof(inq_cmd); cb->ccb_scp.scp_data = (u_int8_t *)&li->li_inq; cb->ccb_scp.scp_datalen = sizeof(li->li_inq); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; break; case SCSI_LOW_LUN_MODEQ: cb->ccb_scp.scp_cmd = sms_cmd; cb->ccb_scp.scp_cmdlen = sizeof(sms_cmd); cb->ccb_scp.scp_data = (u_int8_t *)&li->li_sms; cb->ccb_scp.scp_datalen = sizeof(li->li_sms); cb->ccb_scp.scp_direction = SCSI_LOW_READ; cb->ccb_tcmax = 15; return SCSI_LOW_START_QTAG; default: panic("%s: no setup phase", device_get_nameunit(slp->sl_dev)); } return SCSI_LOW_START_NO_QTAG; } static int scsi_low_resume(slp) struct scsi_low_softc *slp; { if (slp->sl_flags & HW_RESUME) return EJUSTRETURN; slp->sl_flags &= ~HW_POWDOWN; if (slp->sl_funcs->scsi_low_power != NULL) { slp->sl_flags |= HW_RESUME; slp->sl_rstep = 0; (*slp->sl_funcs->scsi_low_power) (slp, SCSI_LOW_ENGAGE); callout_reset(&slp->sl_engage_timer, hz / 1000, scsi_low_engage, slp); return EJUSTRETURN; } return 0; } static void scsi_low_start(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; int rv; /* check hardware exists or under initializations ? */ if ((slp->sl_flags & (HW_INACTIVE | HW_INITIALIZING)) != 0) return; /* check hardware power up ? */ if ((slp->sl_flags & HW_POWERCTRL) != 0) { slp->sl_active ++; if (slp->sl_flags & (HW_POWDOWN | HW_RESUME)) { if (scsi_low_resume(slp) == EJUSTRETURN) return; } } /* setup nexus */ #ifdef SCSI_LOW_DIAGNOSTIC if (slp->sl_Tnexus || slp->sl_Lnexus || slp->sl_Qnexus) { scsi_low_info(slp, NULL, "NEXUS INCOSISTENT"); panic("%s: inconsistent", device_get_nameunit(slp->sl_dev)); } #endif /* SCSI_LOW_DIAGNOSTIC */ for (cb = TAILQ_FIRST(&slp->sl_start); cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) { li = cb->li; if (li->li_disc == 0) { goto scsi_low_cmd_start; } else if (li->li_nqio > 0) { if (li->li_nqio < li->li_maxnqio || (cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) goto scsi_low_cmd_start; } } return; scsi_low_cmd_start: cb->ccb_flags &= ~CCB_STARTQ; TAILQ_REMOVE(&slp->sl_start, cb, ccb_chain); ti = cb->ti; /* clear all error flag bits (for restart) */ cb->ccb_error = 0; cb->ccb_datalen = -1; cb->ccb_scp.scp_status = ST_UNKNOWN; /* setup nexus pointer */ slp->sl_Qnexus = cb; slp->sl_Lnexus = li; slp->sl_Tnexus = ti; /* initialize msgsys */ scsi_low_init_msgsys(slp, ti); /* exec cmd */ if ((cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) { /* CA state or forced abort */ rv = scsi_low_sense_abort_start(slp, ti, li, cb); } else if (li->li_state >= SCSI_LOW_LUN_OK) { cb->ccb_flags &= ~CCB_INTERNAL; rv = scsi_low_ccb_setup_cam(slp, cb); if (cb->ccb_msgoutflag != 0) { scsi_low_ccb_message_exec(slp, cb); } } else { cb->ccb_flags |= CCB_INTERNAL; rv = scsi_low_setup_start(slp, ti, li, cb); } /* allocate qtag */ #define SCSI_LOW_QTAG_OK (SCSI_LOW_QTAG | SCSI_LOW_DISC) if (rv == SCSI_LOW_START_QTAG && (li->li_flags & SCSI_LOW_QTAG_OK) == SCSI_LOW_QTAG_OK && li->li_maxnqio > 0) { u_int qmsg; scsi_low_activate_qtag(cb); if ((scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_ORDERED_QTAG) != 0) qmsg = SCSI_LOW_MSG_ORDERED_QTAG; else if ((cb->ccb_flags & CCB_URGENT) != 0) qmsg = SCSI_LOW_MSG_HEAD_QTAG; else qmsg = SCSI_LOW_MSG_SIMPLE_QTAG; scsi_low_assert_msg(slp, ti, qmsg, 0); } /* timeout */ if (cb->ccb_tcmax < SCSI_LOW_MIN_TOUT) cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; cb->ccb_tc = cb->ccb_tcmax; /* setup saved scsi data pointer */ cb->ccb_sscp = cb->ccb_scp; /* setup current scsi pointer */ slp->sl_scp = cb->ccb_sscp; slp->sl_error = cb->ccb_error; /* assert always an identify msg */ scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_IDENTIFY, 0); /* debug section */ #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_init(&ti->ti_log_msgin); scsi_low_msg_log_init(&ti->ti_log_msgout); #endif /* SCSI_LOW_DIAGNOSTIC */ /* selection start */ slp->sl_selid = cb; rv = ((*slp->sl_funcs->scsi_low_start_bus) (slp, cb)); if (rv == SCSI_LOW_START_OK) { #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_win ++; #endif /* SCSI_LOW_STATICS */ return; } scsi_low_arbit_fail(slp, cb); #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_fail ++; #endif /* SCSI_LOW_STATICS */ } void scsi_low_arbit_fail(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti = cb->ti; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); scsi_low_bus_release(slp, ti); cb->ccb_selrcnt ++; if (slp->sl_disc == 0) { #ifdef SCSI_LOW_DIAGNOSTIC device_printf(slp->sl_dev, "try selection again\n"); #endif /* SCSI_LOW_DIAGNOSTIC */ slp->sl_retry_sel = 1; } } static void scsi_low_bus_release(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { if (ti->ti_disc > 0) { SCSI_LOW_SETUP_PHASE(ti, PH_DISC); } else { SCSI_LOW_SETUP_PHASE(ti, PH_NULL); } /* clear all nexus pointer */ slp->sl_Qnexus = NULL; slp->sl_Lnexus = NULL; slp->sl_Tnexus = NULL; /* clear selection assert */ slp->sl_selid = NULL; /* clear nexus data */ slp->sl_scp.scp_direction = SCSI_LOW_RWUNK; /* clear phase change counter */ slp->sl_ph_count = 0; } static int scsi_low_setup_done(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { struct targ_info *ti; struct lun_info *li; ti = cb->ti; li = cb->li; if (cb->ccb_rcnt >= slp->sl_max_retry) { cb->ccb_error |= ABORTIO; return SCSI_LOW_DONE_COMPLETE; } /* XXX: special huck for selection timeout */ if (li->li_state == SCSI_LOW_LUN_SLEEP && (cb->ccb_error & SELTIMEOUTIO) != 0) { cb->ccb_error |= ABORTIO; return SCSI_LOW_DONE_COMPLETE; } switch(li->li_state) { case SCSI_LOW_LUN_INQ: if (cb->ccb_error != 0) { li->li_diskflags &= ~(SCSI_LOW_DISK_LINK | SCSI_LOW_DISK_QTAG); if (li->li_lun > 0) goto resume; ti->ti_diskflags &= ~(SCSI_LOW_DISK_SYNC | SCSI_LOW_DISK_WIDE); } else if ((li->li_inq.sd_version & 7) >= 2 || (li->li_inq.sd_len >= 4)) { if ((li->li_inq.sd_support & 0x2) == 0) li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; if ((li->li_inq.sd_support & 0x8) == 0) li->li_diskflags &= ~SCSI_LOW_DISK_LINK; if (li->li_lun > 0) goto resume; if ((li->li_inq.sd_support & 0x10) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_SYNC; if ((li->li_inq.sd_support & 0x20) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE_16; if ((li->li_inq.sd_support & 0x40) == 0) ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE_32; } else { li->li_diskflags &= ~(SCSI_LOW_DISK_QTAG | SCSI_LOW_DISK_LINK); if (li->li_lun > 0) goto resume; ti->ti_diskflags &= ~SCSI_LOW_DISK_WIDE; } ti->ti_flags_valid |= SCSI_LOW_TARG_FLAGS_DISK_VALID; resume: scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); break; case SCSI_LOW_LUN_MODEQ: if (cb->ccb_error != 0) { if (cb->ccb_error & SENSEIO) { #ifdef SCSI_LOW_DEBUG if (scsi_low_debug & SCSI_LOW_DEBUG_SENSE) { int error_code, sense_key, asc, ascq; scsi_extract_sense(&cb->ccb_sense, &error_code, &sense_key, &asc, &ascq); printf("SENSE: [%x][%x][%x][%x]\n", error_code, sense_key, asc, ascq); } #endif /* SCSI_LOW_DEBUG */ } else { li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; } } else if ((li->li_sms.sms_cmp.cmp_page & 0x3f) == 0x0a) { if (li->li_sms.sms_cmp.cmp_qc & 0x02) li->li_qflags |= SCSI_LOW_QFLAG_CA_QCLEAR; else li->li_qflags &= ~SCSI_LOW_QFLAG_CA_QCLEAR; if ((li->li_sms.sms_cmp.cmp_qc & 0x01) != 0) li->li_diskflags &= ~SCSI_LOW_DISK_QTAG; } li->li_flags_valid |= SCSI_LOW_LUN_FLAGS_DISK_VALID; scsi_low_calcf_lun(li); break; default: break; } li->li_state ++; if (li->li_state == SCSI_LOW_LUN_OK) { scsi_low_calcf_target(ti); scsi_low_calcf_lun(li); if (li->li_flags_valid == SCSI_LOW_LUN_FLAGS_ALL_VALID && (slp->sl_show_result & SHOW_CALCF_RES) != 0) { scsi_low_calcf_show(li); } } cb->ccb_rcnt --; return SCSI_LOW_DONE_RETRY; } static int scsi_low_done(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { int rv; if (cb->ccb_error == 0) { if ((cb->ccb_flags & (CCB_SENSE | CCB_CLEARQ)) != 0) { #ifdef SCSI_LOW_QCLEAR_AFTER_CA /* XXX: * SCSI-2 draft suggests * page 0x0a QErr bit determins if * the target aborts or continues * the queueing io's after CA state resolved. * However many targets seem not to support * the page 0x0a. Thus we should manually clear the * queuing io's after CA state. */ if ((cb->ccb_flags & CCB_CLEARQ) == 0) { cb->ccb_rcnt --; cb->ccb_flags |= CCB_CLEARQ; goto retry; } #endif /* SCSI_LOW_QCLEAR_AFTER_CA */ if ((cb->ccb_flags & CCB_SENSE) != 0) cb->ccb_error |= (SENSEIO | ABORTIO); cb->ccb_flags &= ~(CCB_SENSE | CCB_CLEARQ); } else switch (cb->ccb_sscp.scp_status) { case ST_GOOD: case ST_MET: case ST_INTERGOOD: case ST_INTERMET: if (cb->ccb_datalen == 0 || cb->ccb_scp.scp_datalen == 0) break; if (cb->ccb_scp.scp_cmdlen > 0 && (scsi_low_cmd_flags[cb->ccb_scp.scp_cmd[0]] & SCSI_LOW_CMD_RESIDUAL_CHK) == 0) break; cb->ccb_error |= PDMAERR; break; case ST_BUSY: case ST_QUEFULL: cb->ccb_error |= (BUSYERR | STATERR); break; case ST_CONFLICT: cb->ccb_error |= (STATERR | ABORTIO); break; case ST_CHKCOND: case ST_CMDTERM: if (cb->ccb_flags & (CCB_AUTOSENSE | CCB_INTERNAL)) { cb->ccb_rcnt --; cb->ccb_flags |= CCB_SENSE; goto retry; } cb->ccb_error |= (UACAERR | STATERR | ABORTIO); break; case ST_UNKNOWN: default: cb->ccb_error |= FATALIO; break; } } else { if (cb->ccb_flags & CCB_SENSE) { cb->ccb_error |= (SENSEERR | ABORTIO); } cb->ccb_flags &= ~(CCB_CLEARQ | CCB_SENSE); } /* internal ccb */ if ((cb->ccb_flags & CCB_INTERNAL) != 0) { if (scsi_low_setup_done(slp, cb) == SCSI_LOW_DONE_RETRY) goto retry; } /* check a ccb msgout flag */ if (cb->ccb_omsgoutflag != 0) { #define SCSI_LOW_MSG_ABORT_OK (SCSI_LOW_MSG_ABORT | \ SCSI_LOW_MSG_ABORT_QTAG | \ SCSI_LOW_MSG_CLEAR_QTAG | \ SCSI_LOW_MSG_TERMIO) if ((cb->ccb_omsgoutflag & SCSI_LOW_MSG_ABORT_OK) != 0) { cb->ccb_error |= ABORTIO; } } /* call OS depend done */ if (cb->osdep != NULL) { rv = scsi_low_done_cam(slp, cb); if (rv == EJUSTRETURN) goto retry; } else if (cb->ccb_error != 0) { if (cb->ccb_rcnt >= slp->sl_max_retry) cb->ccb_error |= ABORTIO; if ((cb->ccb_flags & CCB_NORETRY) == 0 && (cb->ccb_error & ABORTIO) == 0) goto retry; } /* free our target */ #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DONE, cb->ti->ti_id) != 0) { printf(">> SCSI_LOW_DONE_COMPLETE ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ scsi_low_deactivate_qtag(cb); scsi_low_dealloc_qtag(cb); scsi_low_free_ccb(cb); slp->sl_nio --; return SCSI_LOW_DONE_COMPLETE; retry: #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DONE, cb->ti->ti_id) != 0) { printf("** SCSI_LOW_DONE_RETRY ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ cb->ccb_rcnt ++; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); return SCSI_LOW_DONE_RETRY; } /************************************************************** * Reset **************************************************************/ static void scsi_low_reset_nexus_target(slp, ti, fdone) struct scsi_low_softc *slp; struct targ_info *ti; int fdone; { struct lun_info *li; for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { scsi_low_reset_nexus_lun(slp, li, fdone); li->li_state = SCSI_LOW_LUN_SLEEP; li->li_maxnqio = 0; } ti->ti_disc = 0; ti->ti_setup_msg = 0; ti->ti_setup_msg_done = 0; ti->ti_osynch.offset = ti->ti_osynch.period = 0; ti->ti_owidth = SCSI_LOW_BUS_WIDTH_8; ti->ti_diskflags = SCSI_LOW_DISK_TFLAGS; ti->ti_flags_valid &= ~SCSI_LOW_TARG_FLAGS_DISK_VALID; if (slp->sl_funcs->scsi_low_targ_init != NULL) { ((*slp->sl_funcs->scsi_low_targ_init) (slp, ti, SCSI_LOW_INFO_REVOKE)); } scsi_low_calcf_target(ti); for (li = LIST_FIRST(&ti->ti_litab); li != NULL; li = LIST_NEXT(li, lun_chain)) { li->li_flags = 0; li->li_diskflags = SCSI_LOW_DISK_LFLAGS; li->li_flags_valid &= ~SCSI_LOW_LUN_FLAGS_DISK_VALID; if (slp->sl_funcs->scsi_low_lun_init != NULL) { ((*slp->sl_funcs->scsi_low_lun_init) (slp, ti, li, SCSI_LOW_INFO_REVOKE)); } scsi_low_calcf_lun(li); } } static void scsi_low_reset_nexus(slp, fdone) struct scsi_low_softc *slp; int fdone; { struct targ_info *ti; struct slccb *cb, *topcb; if ((cb = slp->sl_Qnexus) != NULL) { topcb = scsi_low_revoke_ccb(slp, cb, fdone); } else { topcb = NULL; } for (ti = TAILQ_FIRST(&slp->sl_titab); ti != NULL; ti = TAILQ_NEXT(ti, ti_chain)) { scsi_low_reset_nexus_target(slp, ti, fdone); scsi_low_bus_release(slp, ti); scsi_low_init_msgsys(slp, ti); } if (topcb != NULL) { topcb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, topcb, ccb_chain); } slp->sl_disc = 0; slp->sl_retry_sel = 0; slp->sl_flags &= ~HW_PDMASTART; } /* misc */ static int tw_pos; static char tw_chars[] = "|/-\\"; #define TWIDDLEWAIT 10000 static void scsi_low_twiddle_wait(void) { cnputc('\b'); cnputc(tw_chars[tw_pos++]); tw_pos %= (sizeof(tw_chars) - 1); DELAY(TWIDDLEWAIT); } void scsi_low_bus_reset(slp) struct scsi_low_softc *slp; { int i; (*slp->sl_funcs->scsi_low_bus_reset) (slp); device_printf(slp->sl_dev, "try to reset scsi bus "); for (i = 0; i <= SCSI2_RESET_DELAY / TWIDDLEWAIT ; i++) scsi_low_twiddle_wait(); cnputc('\b'); printf("\n"); } int scsi_low_restart(slp, flags, s) struct scsi_low_softc *slp; int flags; u_char *s; { int error; if (s != NULL) device_printf(slp->sl_dev, "scsi bus restart. reason: %s\n", s); if ((error = scsi_low_init(slp, flags)) != 0) return error; scsi_low_start(slp); return 0; } /************************************************************** * disconnect and reselect **************************************************************/ #define MSGCMD_LUN(msg) (msg & 0x07) static struct slccb * scsi_low_establish_ccb(ti, li, tag) struct targ_info *ti; struct lun_info *li; scsi_low_tag_t tag; { struct scsi_low_softc *slp = ti->ti_sc; struct slccb *cb; if (li == NULL) return NULL; cb = TAILQ_FIRST(&li->li_discq); for ( ; cb != NULL; cb = TAILQ_NEXT(cb, ccb_chain)) if (cb->ccb_tag == tag) goto found; return cb; /* * establish our ccb nexus */ found: #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id) != 0) { device_printf(slp->sl_dev, "nexus(0x%lx) abort check start\n", (u_long) cb); cb->ccb_flags |= (CCB_NORETRY | CCB_SILENT); scsi_low_revoke_ccb(slp, cb, 1); return NULL; } if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id) != 0) { if (cb->ccb_omsgoutflag == 0) scsi_low_ccb_message_assert(cb, SCSI_LOW_MSG_NOOP); } #endif /* SCSI_LOW_DEBUG */ TAILQ_REMOVE(&li->li_discq, cb, ccb_chain); cb->ccb_flags &= ~CCB_DISCQ; slp->sl_Qnexus = cb; slp->sl_scp = cb->ccb_sscp; slp->sl_error |= cb->ccb_error; slp->sl_disc --; ti->ti_disc --; li->li_disc --; /* inform "ccb nexus established" to the host driver */ (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); /* check msg */ if (cb->ccb_msgoutflag != 0) { scsi_low_ccb_message_exec(slp, cb); } return cb; } struct targ_info * scsi_low_reselected(slp, targ) struct scsi_low_softc *slp; u_int targ; { struct targ_info *ti; struct slccb *cb; u_char *s; /* * Check select vs reselected collision. */ if ((cb = slp->sl_selid) != NULL) { scsi_low_arbit_fail(slp, cb); #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_conflict ++; #endif /* SCSI_LOW_STATICS */ } /* * Check if no current active nexus. */ if (slp->sl_Tnexus != NULL) { s = "host busy"; goto world_restart; } /* * Check a valid target id asserted ? */ if (targ >= slp->sl_ntargs || targ == slp->sl_hostid) { s = "scsi id illegal"; goto world_restart; } /* * Check the target scsi status. */ ti = slp->sl_ti[targ]; if (ti->ti_phase != PH_DISC && ti->ti_phase != PH_NULL) { s = "phase mismatch"; goto world_restart; } /* * Setup init msgsys */ slp->sl_error = 0; scsi_low_init_msgsys(slp, ti); /* * Establish our target nexus */ SCSI_LOW_SETUP_PHASE(ti, PH_RESEL); slp->sl_Tnexus = ti; #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_reselected ++; #endif /* SCSI_LOW_STATICS */ return ti; world_restart: device_printf(slp->sl_dev, "reselect(%x:unknown) %s\n", targ, s); scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, "reselect: scsi world confused"); return NULL; } /************************************************************** * cmd out pointer setup **************************************************************/ int scsi_low_cmd(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct slccb *cb = slp->sl_Qnexus; slp->sl_ph_count ++; if (cb == NULL) { /* * no ccb, abort! */ slp->sl_scp.scp_cmd = (u_int8_t *) &unit_ready_cmd; slp->sl_scp.scp_cmdlen = sizeof(unit_ready_cmd); slp->sl_scp.scp_datalen = 0; slp->sl_scp.scp_direction = SCSI_LOW_READ; slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "CMDOUT: ccb nexus not found"); return EINVAL; } else { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_CMDLNK_CHECK, ti->ti_id)) { scsi_low_test_cmdlnk(slp, cb); } #endif /* SCSI_LOW_DEBUG */ } return 0; } /************************************************************** * data out pointer setup **************************************************************/ int scsi_low_data(slp, ti, bp, direction) struct scsi_low_softc *slp; struct targ_info *ti; struct buf **bp; int direction; { struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && direction == cb->ccb_sscp.scp_direction) { *bp = cb->bp; return 0; } slp->sl_error |= (FATALIO | PDMAERR); slp->sl_scp.scp_datalen = 0; slp->sl_scp.scp_direction = direction; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); if (ti->ti_ophase != ti->ti_phase) { char *s; if (cb == NULL) s = "DATA PHASE: ccb nexus not found"; else s = "DATA PHASE: xfer direction mismatch"; SCSI_LOW_INFO(slp, ti, s); } *bp = NULL; return EINVAL; } /************************************************************** * MSG_SYS **************************************************************/ #define MSGINPTR_CLR(ti) {(ti)->ti_msginptr = 0; (ti)->ti_msginlen = 0;} #define MSGIN_PERIOD(ti) ((ti)->ti_msgin[3]) #define MSGIN_OFFSET(ti) ((ti)->ti_msgin[4]) #define MSGIN_WIDTHP(ti) ((ti)->ti_msgin[3]) #define MSGIN_DATA_LAST 0x30 static int scsi_low_errfunc_synch(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_wide(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_identify(struct scsi_low_softc *, u_int); static int scsi_low_errfunc_qtag(struct scsi_low_softc *, u_int); static int scsi_low_msgfunc_synch(struct scsi_low_softc *); static int scsi_low_msgfunc_wide(struct scsi_low_softc *); static int scsi_low_msgfunc_identify(struct scsi_low_softc *); static int scsi_low_msgfunc_abort(struct scsi_low_softc *); static int scsi_low_msgfunc_qabort(struct scsi_low_softc *); static int scsi_low_msgfunc_qtag(struct scsi_low_softc *); static int scsi_low_msgfunc_reset(struct scsi_low_softc *); struct scsi_low_msgout_data { u_int md_flags; u_int8_t md_msg; int (*md_msgfunc)(struct scsi_low_softc *); int (*md_errfunc)(struct scsi_low_softc *, u_int); #define MSG_RELEASE_ATN 0x0001 u_int md_condition; }; struct scsi_low_msgout_data scsi_low_msgout_data[] = { /* 0 */ {SCSI_LOW_MSG_RESET, MSG_RESET, scsi_low_msgfunc_reset, NULL, MSG_RELEASE_ATN}, /* 1 */ {SCSI_LOW_MSG_REJECT, MSG_REJECT, NULL, NULL, MSG_RELEASE_ATN}, /* 2 */ {SCSI_LOW_MSG_PARITY, MSG_PARITY, NULL, NULL, MSG_RELEASE_ATN}, /* 3 */ {SCSI_LOW_MSG_ERROR, MSG_I_ERROR, NULL, NULL, MSG_RELEASE_ATN}, /* 4 */ {SCSI_LOW_MSG_IDENTIFY, MSG_IDENTIFY, scsi_low_msgfunc_identify, scsi_low_errfunc_identify, 0}, /* 5 */ {SCSI_LOW_MSG_ABORT, MSG_ABORT, scsi_low_msgfunc_abort, NULL, MSG_RELEASE_ATN}, /* 6 */ {SCSI_LOW_MSG_TERMIO, MSG_TERM_IO, NULL, NULL, MSG_RELEASE_ATN}, /* 7 */ {SCSI_LOW_MSG_SIMPLE_QTAG, MSG_SIMPLE_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 8 */ {SCSI_LOW_MSG_ORDERED_QTAG, MSG_ORDERED_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 9 */{SCSI_LOW_MSG_HEAD_QTAG, MSG_HEAD_QTAG, scsi_low_msgfunc_qtag, scsi_low_errfunc_qtag, 0}, /* 10 */ {SCSI_LOW_MSG_ABORT_QTAG, MSG_ABORT_QTAG, scsi_low_msgfunc_qabort, NULL, MSG_RELEASE_ATN}, /* 11 */ {SCSI_LOW_MSG_CLEAR_QTAG, MSG_CLEAR_QTAG, scsi_low_msgfunc_abort, NULL, MSG_RELEASE_ATN}, /* 12 */{SCSI_LOW_MSG_WIDE, MSG_EXTEND, scsi_low_msgfunc_wide, scsi_low_errfunc_wide, MSG_RELEASE_ATN}, /* 13 */{SCSI_LOW_MSG_SYNCH, MSG_EXTEND, scsi_low_msgfunc_synch, scsi_low_errfunc_synch, MSG_RELEASE_ATN}, /* 14 */{SCSI_LOW_MSG_NOOP, MSG_NOOP, NULL, NULL, MSG_RELEASE_ATN}, /* 15 */{SCSI_LOW_MSG_ALL, 0}, }; static int scsi_low_msginfunc_ext(struct scsi_low_softc *); static int scsi_low_synch(struct scsi_low_softc *); static int scsi_low_wide(struct scsi_low_softc *); static int scsi_low_msginfunc_msg_reject(struct scsi_low_softc *); static int scsi_low_msginfunc_rejop(struct scsi_low_softc *); static int scsi_low_msginfunc_rp(struct scsi_low_softc *); static int scsi_low_msginfunc_sdp(struct scsi_low_softc *); static int scsi_low_msginfunc_disc(struct scsi_low_softc *); static int scsi_low_msginfunc_cc(struct scsi_low_softc *); static int scsi_low_msginfunc_lcc(struct scsi_low_softc *); static int scsi_low_msginfunc_parity(struct scsi_low_softc *); static int scsi_low_msginfunc_noop(struct scsi_low_softc *); static int scsi_low_msginfunc_simple_qtag(struct scsi_low_softc *); static int scsi_low_msginfunc_i_wide_residue(struct scsi_low_softc *); struct scsi_low_msgin_data { u_int md_len; int (*md_msgfunc)(struct scsi_low_softc *); }; struct scsi_low_msgin_data scsi_low_msgin_data[] = { /* 0 */ {1, scsi_low_msginfunc_cc}, /* 1 */ {2, scsi_low_msginfunc_ext}, /* 2 */ {1, scsi_low_msginfunc_sdp}, /* 3 */ {1, scsi_low_msginfunc_rp}, /* 4 */ {1, scsi_low_msginfunc_disc}, /* 5 */ {1, scsi_low_msginfunc_rejop}, /* 6 */ {1, scsi_low_msginfunc_rejop}, /* 7 */ {1, scsi_low_msginfunc_msg_reject}, /* 8 */ {1, scsi_low_msginfunc_noop}, /* 9 */ {1, scsi_low_msginfunc_parity}, /* a */ {1, scsi_low_msginfunc_lcc}, /* b */ {1, scsi_low_msginfunc_lcc}, /* c */ {1, scsi_low_msginfunc_rejop}, /* d */ {2, scsi_low_msginfunc_rejop}, /* e */ {1, scsi_low_msginfunc_rejop}, /* f */ {1, scsi_low_msginfunc_rejop}, /* 0x10 */ {1, scsi_low_msginfunc_rejop}, /* 0x11 */ {1, scsi_low_msginfunc_rejop}, /* 0x12 */ {1, scsi_low_msginfunc_rejop}, /* 0x13 */ {1, scsi_low_msginfunc_rejop}, /* 0x14 */ {1, scsi_low_msginfunc_rejop}, /* 0x15 */ {1, scsi_low_msginfunc_rejop}, /* 0x16 */ {1, scsi_low_msginfunc_rejop}, /* 0x17 */ {1, scsi_low_msginfunc_rejop}, /* 0x18 */ {1, scsi_low_msginfunc_rejop}, /* 0x19 */ {1, scsi_low_msginfunc_rejop}, /* 0x1a */ {1, scsi_low_msginfunc_rejop}, /* 0x1b */ {1, scsi_low_msginfunc_rejop}, /* 0x1c */ {1, scsi_low_msginfunc_rejop}, /* 0x1d */ {1, scsi_low_msginfunc_rejop}, /* 0x1e */ {1, scsi_low_msginfunc_rejop}, /* 0x1f */ {1, scsi_low_msginfunc_rejop}, /* 0x20 */ {2, scsi_low_msginfunc_simple_qtag}, /* 0x21 */ {2, scsi_low_msginfunc_rejop}, /* 0x22 */ {2, scsi_low_msginfunc_rejop}, /* 0x23 */ {2, scsi_low_msginfunc_i_wide_residue}, /* 0x24 */ {2, scsi_low_msginfunc_rejop}, /* 0x25 */ {2, scsi_low_msginfunc_rejop}, /* 0x26 */ {2, scsi_low_msginfunc_rejop}, /* 0x27 */ {2, scsi_low_msginfunc_rejop}, /* 0x28 */ {2, scsi_low_msginfunc_rejop}, /* 0x29 */ {2, scsi_low_msginfunc_rejop}, /* 0x2a */ {2, scsi_low_msginfunc_rejop}, /* 0x2b */ {2, scsi_low_msginfunc_rejop}, /* 0x2c */ {2, scsi_low_msginfunc_rejop}, /* 0x2d */ {2, scsi_low_msginfunc_rejop}, /* 0x2e */ {2, scsi_low_msginfunc_rejop}, /* 0x2f */ {2, scsi_low_msginfunc_rejop}, /* 0x30 */ {1, scsi_low_msginfunc_rejop} /* default rej op */ }; /************************************************************** * msgout **************************************************************/ static int scsi_low_msgfunc_synch(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int ptr = ti->ti_msgoutlen; ti->ti_msgoutstr[ptr + 1] = MSG_EXTEND_SYNCHLEN; ti->ti_msgoutstr[ptr + 2] = MSG_EXTEND_SYNCHCODE; ti->ti_msgoutstr[ptr + 3] = ti->ti_maxsynch.period; ti->ti_msgoutstr[ptr + 4] = ti->ti_maxsynch.offset; return MSG_EXTEND_SYNCHLEN + 2; } static int scsi_low_msgfunc_wide(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int ptr = ti->ti_msgoutlen; ti->ti_msgoutstr[ptr + 1] = MSG_EXTEND_WIDELEN; ti->ti_msgoutstr[ptr + 2] = MSG_EXTEND_WIDECODE; ti->ti_msgoutstr[ptr + 3] = ti->ti_width; return MSG_EXTEND_WIDELEN + 2; } static int scsi_low_msgfunc_identify(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct lun_info *li = slp->sl_Lnexus; struct slccb *cb = slp->sl_Qnexus; int ptr = ti->ti_msgoutlen; u_int8_t msg; msg = MSG_IDENTIFY; if (cb == NULL) { slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGOUT: nexus unknown"); } else { if (scsi_low_is_disconnect_ok(cb) != 0) msg |= (MSG_IDENTIFY_DISCPRIV | li->li_lun); else msg |= li->li_lun; if (ti->ti_phase == PH_MSGOUT) { (*slp->sl_funcs->scsi_low_establish_lun_nexus) (slp); if (cb->ccb_tag == SCSI_LOW_UNKTAG) { (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); } } } ti->ti_msgoutstr[ptr + 0] = msg; return 1; } static int scsi_low_msgfunc_abort(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_ABORT); return 1; } static int scsi_low_msgfunc_qabort(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_TERM); return 1; } static int scsi_low_msgfunc_reset(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_RESET); return 1; } static int scsi_low_msgfunc_qtag(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct slccb *cb = slp->sl_Qnexus; int ptr = ti->ti_msgoutlen; if (cb == NULL || cb->ccb_tag == SCSI_LOW_UNKTAG) { ti->ti_msgoutstr[ptr + 0] = MSG_NOOP; return 1; } else { ti->ti_msgoutstr[ptr + 1] = (u_int8_t) cb->ccb_tag; if (ti->ti_phase == PH_MSGOUT) { (*slp->sl_funcs->scsi_low_establish_ccb_nexus) (slp); } } return 2; } /* * The following functions are called when targets give unexpected * responces in msgin (after msgout). */ static int scsi_low_errfunc_identify(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { if (slp->sl_Lnexus != NULL) { slp->sl_Lnexus->li_cfgflags &= ~SCSI_LOW_DISC; scsi_low_calcf_lun(slp->sl_Lnexus); } return 0; } static int scsi_low_errfunc_synch(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { struct targ_info *ti = slp->sl_Tnexus; MSGIN_PERIOD(ti) = 0; MSGIN_OFFSET(ti) = 0; scsi_low_synch(slp); return 0; } static int scsi_low_errfunc_wide(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { struct targ_info *ti = slp->sl_Tnexus; MSGIN_WIDTHP(ti) = 0; scsi_low_wide(slp); return 0; } static int scsi_low_errfunc_qtag(slp, msgflags) struct scsi_low_softc *slp; u_int msgflags; { if ((msgflags & SCSI_LOW_MSG_REJECT) != 0) { if (slp->sl_Qnexus != NULL) { scsi_low_deactivate_qtag(slp->sl_Qnexus); } if (slp->sl_Lnexus != NULL) { slp->sl_Lnexus->li_cfgflags &= ~SCSI_LOW_QTAG; scsi_low_calcf_lun(slp->sl_Lnexus); } device_printf(slp->sl_dev, "scsi_low: qtag msg rejected\n"); } return 0; } int scsi_low_msgout(slp, ti, fl) struct scsi_low_softc *slp; struct targ_info *ti; u_int fl; { struct scsi_low_msgout_data *mdp; int len = 0; #ifdef SCSI_LOW_DIAGNOSTIC if (ti != slp->sl_Tnexus) { scsi_low_print(slp, NULL); panic("scsi_low_msgout: Target nexus inconsistent"); } #endif /* SCSI_LOW_DIAGNOSTIC */ slp->sl_ph_count ++; if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES) { device_printf(slp->sl_dev, "too many phase changes\n"); slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } /* STEP I. * Scsi phase changes. * Previously msgs asserted are accepted by our target or * processed by scsi_low_msgin. * Thus clear all saved informations. */ if ((fl & SCSI_LOW_MSGOUT_INIT) != 0) { ti->ti_omsgflags = 0; ti->ti_emsgflags = 0; } else if (slp->sl_atten == 0) { /* STEP II. * We did not assert attention, however still our target required * msgs. Resend previous msgs. */ ti->ti_msgflags |= ti->ti_omsgflags; ti->ti_omsgflags = 0; #ifdef SCSI_LOW_DIAGNOSTIC device_printf(slp->sl_dev, "scsi_low_msgout: retry msgout\n"); #endif /* SCSI_LOW_DIAGNOSTIC */ } /* STEP III. * We have no msgs. send MSG_NOOP (OK?) */ if (scsi_low_is_msgout_continue(ti, 0) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_NOOP, 0); /* STEP IV. * Process all msgs */ ti->ti_msgoutlen = 0; slp->sl_clear_atten = 0; mdp = &scsi_low_msgout_data[0]; for ( ; mdp->md_flags != SCSI_LOW_MSG_ALL; mdp ++) { if ((ti->ti_msgflags & mdp->md_flags) != 0) { ti->ti_omsgflags |= mdp->md_flags; ti->ti_msgflags &= ~mdp->md_flags; ti->ti_emsgflags = mdp->md_flags; ti->ti_msgoutstr[ti->ti_msgoutlen] = mdp->md_msg; if (mdp->md_msgfunc != NULL) len = (*mdp->md_msgfunc) (slp); else len = 1; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_write(&ti->ti_log_msgout, &ti->ti_msgoutstr[ti->ti_msgoutlen], len); #endif /* SCSI_LOW_DIAGNOSTIC */ ti->ti_msgoutlen += len; if ((mdp->md_condition & MSG_RELEASE_ATN) != 0) { slp->sl_clear_atten = 1; break; } if ((fl & SCSI_LOW_MSGOUT_UNIFY) == 0 || ti->ti_msgflags == 0) break; if (ti->ti_msgoutlen >= SCSI_LOW_MAX_MSGLEN - 5) break; } } if (scsi_low_is_msgout_continue(ti, 0) == 0) slp->sl_clear_atten = 1; return ti->ti_msgoutlen; } /************************************************************** * msgin **************************************************************/ static int scsi_low_msginfunc_noop(slp) struct scsi_low_softc *slp; { return 0; } static int scsi_low_msginfunc_rejop(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; u_int8_t msg = ti->ti_msgin[0]; device_printf(slp->sl_dev, "MSGIN: msg 0x%x rejected\n", (u_int) msg); scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_cc(slp) struct scsi_low_softc *slp; { struct lun_info *li; SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_CMDC); /* validate status */ if (slp->sl_Qnexus == NULL) return ENOENT; slp->sl_Qnexus->ccb_sscp.scp_status = slp->sl_scp.scp_status; li = slp->sl_Lnexus; switch (slp->sl_scp.scp_status) { case ST_GOOD: li->li_maxnqio = li->li_maxnexus; break; case ST_CHKCOND: li->li_maxnqio = 0; if (li->li_qflags & SCSI_LOW_QFLAG_CA_QCLEAR) scsi_low_reset_nexus_lun(slp, li, 0); break; case ST_BUSY: li->li_maxnqio = 0; break; case ST_QUEFULL: if (li->li_maxnexus >= li->li_nqio) li->li_maxnexus = li->li_nqio - 1; li->li_maxnqio = li->li_maxnexus; break; case ST_INTERGOOD: case ST_INTERMET: slp->sl_error |= MSGERR; break; default: break; } return 0; } static int scsi_low_msginfunc_lcc(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *ncb, *cb; ti = slp->sl_Tnexus; li = slp->sl_Lnexus; if ((cb = slp->sl_Qnexus) == NULL) goto bad; cb->ccb_sscp.scp_status = slp->sl_scp.scp_status; switch (slp->sl_scp.scp_status) { case ST_INTERGOOD: case ST_INTERMET: li->li_maxnqio = li->li_maxnexus; break; default: slp->sl_error |= MSGERR; break; } if ((li->li_flags & SCSI_LOW_LINK) == 0) goto bad; cb->ccb_error |= slp->sl_error; if (cb->ccb_error != 0) goto bad; for (ncb = TAILQ_FIRST(&slp->sl_start); ncb != NULL; ncb = TAILQ_NEXT(ncb, ccb_chain)) { if (ncb->li == li) goto cmd_link_start; } bad: SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_LCTERM); scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return EIO; cmd_link_start: ncb->ccb_flags &= ~CCB_STARTQ; TAILQ_REMOVE(&slp->sl_start, ncb, ccb_chain); scsi_low_dealloc_qtag(ncb); ncb->ccb_tag = cb->ccb_tag; ncb->ccb_otag = cb->ccb_otag; cb->ccb_tag = SCSI_LOW_UNKTAG; cb->ccb_otag = SCSI_LOW_UNKTAG; if (scsi_low_done(slp, cb) == SCSI_LOW_DONE_RETRY) panic("%s: linked ccb retried", device_get_nameunit(slp->sl_dev)); slp->sl_Qnexus = ncb; slp->sl_ph_count = 0; ncb->ccb_error = 0; ncb->ccb_datalen = -1; ncb->ccb_scp.scp_status = ST_UNKNOWN; ncb->ccb_flags &= ~CCB_INTERNAL; scsi_low_init_msgsys(slp, ti); scsi_low_ccb_setup_cam(slp, ncb); if (ncb->ccb_tcmax < SCSI_LOW_MIN_TOUT) ncb->ccb_tcmax = SCSI_LOW_MIN_TOUT; ncb->ccb_tc = ncb->ccb_tcmax; /* setup saved scsi data pointer */ ncb->ccb_sscp = ncb->ccb_scp; slp->sl_scp = ncb->ccb_sscp; slp->sl_error = ncb->ccb_error; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_init(&ti->ti_log_msgin); scsi_low_msg_log_init(&ti->ti_log_msgout); #endif /* SCSI_LOW_DIAGNOSTIC */ return EJUSTRETURN; } static int scsi_low_msginfunc_disc(slp) struct scsi_low_softc *slp; { SCSI_LOW_SETUP_MSGPHASE(slp, MSGPH_DISC); return 0; } static int scsi_low_msginfunc_sdp(slp) struct scsi_low_softc *slp; { struct slccb *cb = slp->sl_Qnexus; if (cb != NULL) { cb->ccb_sscp.scp_datalen = slp->sl_scp.scp_datalen; cb->ccb_sscp.scp_data = slp->sl_scp.scp_data; } else scsi_low_assert_msg(slp, slp->sl_Tnexus, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_rp(slp) struct scsi_low_softc *slp; { if (slp->sl_Qnexus != NULL) slp->sl_scp = slp->sl_Qnexus->ccb_sscp; else scsi_low_assert_msg(slp, slp->sl_Tnexus, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_synch(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; u_int period = 0, offset = 0, speed; u_char *s; int error; if ((MSGIN_PERIOD(ti) >= ti->ti_maxsynch.period && MSGIN_OFFSET(ti) <= ti->ti_maxsynch.offset) || MSGIN_OFFSET(ti) == 0) { if ((offset = MSGIN_OFFSET(ti)) != 0) period = MSGIN_PERIOD(ti); s = offset ? "synchronous" : "async"; } else { /* XXX: * Target seems to be brain damaged. * Force async transfer. */ ti->ti_maxsynch.period = 0; ti->ti_maxsynch.offset = 0; device_printf(slp->sl_dev, "target brain damaged. async transfer\n"); return EINVAL; } ti->ti_maxsynch.period = period; ti->ti_maxsynch.offset = offset; error = (*slp->sl_funcs->scsi_low_msg) (slp, ti, SCSI_LOW_MSG_SYNCH); if (error != 0) { /* XXX: * Current period and offset are not acceptable * for our adapter. * The adapter changes max synch and max offset. */ device_printf(slp->sl_dev, "synch neg failed. retry synch msg neg ...\n"); return error; } ti->ti_osynch = ti->ti_maxsynch; if (offset > 0) { ti->ti_setup_msg_done |= SCSI_LOW_MSG_SYNCH; } /* inform data */ if ((slp->sl_show_result & SHOW_SYNCH_NEG) != 0) { #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && (cb->ccb_flags & CCB_SENSE) != 0) return 0; #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ device_printf(slp->sl_dev, "(%d:*): <%s> offset %d period %dns ", ti->ti_id, s, offset, period * 4); if (period != 0) { speed = 1000 * 10 / (period * 4); printf("%d.%d M/s", speed / 10, speed % 10); } printf("\n"); } return 0; } static int scsi_low_wide(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; int error; ti->ti_width = MSGIN_WIDTHP(ti); error = (*slp->sl_funcs->scsi_low_msg) (slp, ti, SCSI_LOW_MSG_WIDE); if (error != 0) { /* XXX: * Current width is not acceptable for our adapter. * The adapter changes max width. */ device_printf(slp->sl_dev, "wide neg failed. retry wide msg neg ...\n"); return error; } ti->ti_owidth = ti->ti_width; if (ti->ti_width > SCSI_LOW_BUS_WIDTH_8) { ti->ti_setup_msg_done |= (SCSI_LOW_MSG_SYNCH | SCSI_LOW_MSG_WIDE); } /* inform data */ if ((slp->sl_show_result & SHOW_WIDE_NEG) != 0) { #ifdef SCSI_LOW_NEGOTIATE_BEFORE_SENSE struct slccb *cb = slp->sl_Qnexus; if (cb != NULL && (cb->ccb_flags & CCB_SENSE) != 0) return 0; #endif /* SCSI_LOW_NEGOTIATE_BEFORE_SENSE */ device_printf(slp->sl_dev, "(%d:*): transfer width %d bits\n", ti->ti_id, 1 << (3 + ti->ti_width)); } return 0; } static int scsi_low_msginfunc_simple_qtag(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; scsi_low_tag_t etag = (scsi_low_tag_t) ti->ti_msgin[1]; if (slp->sl_Qnexus != NULL) { if (slp->sl_Qnexus->ccb_tag != etag) { slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: qtag mismatch"); } } else if (scsi_low_establish_ccb(ti, slp->sl_Lnexus, etag) == NULL) { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id)) return 0; #endif /* SCSI_LOW_DEBUG */ slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT_QTAG, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: taged ccb not found"); } return 0; } static int scsi_low_msginfunc_i_wide_residue(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct slccb *cb = slp->sl_Qnexus; int res = (int) ti->ti_msgin[1]; if (cb == NULL || res <= 0 || (ti->ti_width == SCSI_LOW_BUS_WIDTH_16 && res > 1) || (ti->ti_width == SCSI_LOW_BUS_WIDTH_32 && res > 3)) return EINVAL; if (slp->sl_scp.scp_datalen + res > cb->ccb_scp.scp_datalen) return EINVAL; slp->sl_scp.scp_datalen += res; slp->sl_scp.scp_data -= res; scsi_low_data_finish(slp); return 0; } static int scsi_low_msginfunc_ext(slp) struct scsi_low_softc *slp; { struct slccb *cb = slp->sl_Qnexus; struct lun_info *li = slp->sl_Lnexus; struct targ_info *ti = slp->sl_Tnexus; int count, retry; u_int32_t *ptr; if (ti->ti_msginptr == 2) { ti->ti_msginlen = ti->ti_msgin[1] + 2; return 0; } switch (MKMSG_EXTEND(ti->ti_msgin[1], ti->ti_msgin[2])) { case MKMSG_EXTEND(MSG_EXTEND_MDPLEN, MSG_EXTEND_MDPCODE): if (cb == NULL) break; ptr = (u_int32_t *)(&ti->ti_msgin[3]); count = (int) htonl((long) (*ptr)); if(slp->sl_scp.scp_datalen - count < 0 || slp->sl_scp.scp_datalen - count > cb->ccb_scp.scp_datalen) break; slp->sl_scp.scp_datalen -= count; slp->sl_scp.scp_data += count; return 0; case MKMSG_EXTEND(MSG_EXTEND_SYNCHLEN, MSG_EXTEND_SYNCHCODE): if (li == NULL) break; retry = scsi_low_synch(slp); if (retry != 0 || (ti->ti_emsgflags & SCSI_LOW_MSG_SYNCH) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_SYNCH, 0); #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id)) { scsi_low_test_atten(slp, ti, SCSI_LOW_MSG_SYNCH); } #endif /* SCSI_LOW_DEBUG */ return 0; case MKMSG_EXTEND(MSG_EXTEND_WIDELEN, MSG_EXTEND_WIDECODE): if (li == NULL) break; retry = scsi_low_wide(slp); if (retry != 0 || (ti->ti_emsgflags & SCSI_LOW_MSG_WIDE) == 0) scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_WIDE, 0); return 0; default: break; } scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return EINVAL; } static int scsi_low_msginfunc_parity(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; /* only I -> T, invalid! */ scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); return 0; } static int scsi_low_msginfunc_msg_reject(slp) struct scsi_low_softc *slp; { struct targ_info *ti = slp->sl_Tnexus; struct scsi_low_msgout_data *mdp; u_int msgflags; if (ti->ti_emsgflags != 0) { device_printf(slp->sl_dev, "msg flags [0x%x] rejected\n", ti->ti_emsgflags); msgflags = SCSI_LOW_MSG_REJECT; mdp = &scsi_low_msgout_data[0]; for ( ; mdp->md_flags != SCSI_LOW_MSG_ALL; mdp ++) { if ((ti->ti_emsgflags & mdp->md_flags) != 0) { ti->ti_emsgflags &= ~mdp->md_flags; if (mdp->md_errfunc != NULL) (*mdp->md_errfunc) (slp, msgflags); break; } } return 0; } else { SCSI_LOW_INFO(slp, ti, "MSGIN: rejected msg not found"); slp->sl_error |= MSGERR; } return EINVAL; } int scsi_low_msgin(slp, ti, c) struct scsi_low_softc *slp; struct targ_info *ti; u_int c; { struct scsi_low_msgin_data *sdp; struct lun_info *li; u_int8_t msg; #ifdef SCSI_LOW_DIAGNOSTIC if (ti != slp->sl_Tnexus) { scsi_low_print(slp, NULL); panic("scsi_low_msgin: Target nexus inconsistent"); } #endif /* SCSI_LOW_DIAGNOSTIC */ /* * Phase changes, clear the pointer. */ if (ti->ti_ophase != ti->ti_phase) { MSGINPTR_CLR(ti); ti->ti_msgin_parity_error = 0; slp->sl_ph_count ++; if (slp->sl_ph_count > SCSI_LOW_MAX_PHCHANGES) { device_printf(slp->sl_dev, "too many phase changes\n"); slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); } } /* * Store a current messages byte into buffer and * wait for the completion of the current msg. */ ti->ti_msgin[ti->ti_msginptr ++] = (u_int8_t) c; if (ti->ti_msginptr >= SCSI_LOW_MAX_MSGLEN) { ti->ti_msginptr = SCSI_LOW_MAX_MSGLEN - 1; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_REJECT, 0); } /* * Check parity errors. */ if ((c & SCSI_LOW_DATA_PE) != 0) { ti->ti_msgin_parity_error ++; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_PARITY, 0); goto out; } if (ti->ti_msgin_parity_error != 0) goto out; /* * Calculate messages length. */ msg = ti->ti_msgin[0]; if (msg < MSGIN_DATA_LAST) sdp = &scsi_low_msgin_data[msg]; else sdp = &scsi_low_msgin_data[MSGIN_DATA_LAST]; if (ti->ti_msginlen == 0) { ti->ti_msginlen = sdp->md_len; } /* * Check comletion. */ if (ti->ti_msginptr < ti->ti_msginlen) return EJUSTRETURN; /* * Do process. */ if ((msg & MSG_IDENTIFY) == 0) { if (((*sdp->md_msgfunc) (slp)) == EJUSTRETURN) return EJUSTRETURN; } else { li = slp->sl_Lnexus; if (li == NULL) { li = scsi_low_alloc_li(ti, MSGCMD_LUN(msg), 0); if (li == NULL) goto badlun; slp->sl_Lnexus = li; (*slp->sl_funcs->scsi_low_establish_lun_nexus) (slp); } else { if (MSGCMD_LUN(msg) != li->li_lun) goto badlun; } if (slp->sl_Qnexus == NULL && li->li_nqio == 0) { if (!scsi_low_establish_ccb(ti, li, SCSI_LOW_UNKTAG)) { #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_NEXUS_CHECK, ti->ti_id) != 0) { goto out; } #endif /* SCSI_LOW_DEBUG */ goto badlun; } } } goto out; /* * Msg process completed, reset msgin pointer and assert ATN if desired. */ badlun: slp->sl_error |= FATALIO; scsi_low_assert_msg(slp, ti, SCSI_LOW_MSG_ABORT, 0); SCSI_LOW_INFO(slp, ti, "MSGIN: identify wrong"); out: if (ti->ti_msginptr < ti->ti_msginlen) return EJUSTRETURN; #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_write(&ti->ti_log_msgin, &ti->ti_msgin[0], ti->ti_msginlen); #endif /* SCSI_LOW_DIAGNOSTIC */ MSGINPTR_CLR(ti); return 0; } /********************************************************** * disconnect **********************************************************/ int scsi_low_disconnected(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct slccb *cb = slp->sl_Qnexus; /* check phase completion */ switch (slp->sl_msgphase) { case MSGPH_RESET: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); scsi_low_reset_nexus_target(slp, slp->sl_Tnexus, 0); goto io_resume; case MSGPH_ABORT: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); scsi_low_reset_nexus_lun(slp, slp->sl_Lnexus, 0); goto io_resume; case MSGPH_TERM: scsi_low_statusin(slp, slp->sl_Tnexus, ST_GOOD); scsi_low_msginfunc_cc(slp); goto io_resume; case MSGPH_DISC: if (cb != NULL) { struct lun_info *li; li = cb->li; TAILQ_INSERT_TAIL(&li->li_discq, cb, ccb_chain); cb->ccb_flags |= CCB_DISCQ; cb->ccb_error |= slp->sl_error; li->li_disc ++; ti->ti_disc ++; slp->sl_disc ++; } #ifdef SCSI_LOW_STATICS scsi_low_statics.nexus_disconnected ++; #endif /* SCSI_LOW_STATICS */ #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_DISC, ti->ti_id) != 0) { printf("## SCSI_LOW_DISCONNECTED ===============\n"); scsi_low_print(slp, NULL); } #endif /* SCSI_LOW_DEBUG */ break; case MSGPH_NULL: slp->sl_error |= FATALIO; if (ti->ti_phase == PH_SELSTART) slp->sl_error |= SELTIMEOUTIO; else slp->sl_error |= UBFERR; /* fall through */ case MSGPH_LCTERM: case MSGPH_CMDC: io_resume: if (cb == NULL) break; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_TEST_GO(SCSI_LOW_ATTEN_CHECK, ti->ti_id)) { if (cb->ccb_omsgoutflag == SCSI_LOW_MSG_NOOP && (cb->ccb_msgoutflag != 0 || (ti->ti_msgflags & SCSI_LOW_MSG_NOOP))) { scsi_low_info(slp, ti, "ATTEN CHECK FAILED"); } } #endif /* SCSI_LOW_DEBUG */ cb->ccb_error |= slp->sl_error; if (scsi_low_done(slp, cb) == SCSI_LOW_DONE_RETRY) { cb->ccb_flags |= CCB_STARTQ; TAILQ_INSERT_HEAD(&slp->sl_start, cb, ccb_chain); } break; } scsi_low_bus_release(slp, ti); scsi_low_start(slp); return 1; } /********************************************************** * TAG operations **********************************************************/ static int scsi_low_alloc_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; scsi_low_tag_t etag; if (cb->ccb_otag != SCSI_LOW_UNKTAG) return 0; #ifndef SCSI_LOW_ALT_QTAG_ALLOCATE etag = ffs(li->li_qtagbits); if (etag == 0) return ENOSPC; li->li_qtagbits &= ~(1 << (etag - 1)); cb->ccb_otag = etag; return 0; #else /* SCSI_LOW_ALT_QTAG_ALLOCATE */ for (etag = li->li_qd ; li->li_qd < SCSI_LOW_MAXNEXUS; li->li_qd ++) if (li->li_qtagarray[li->li_qd] == 0) goto found; for (li->li_qd = 0; li->li_qd < etag; li->li_qd ++) if (li->li_qtagarray[li->li_qd] == 0) goto found; return ENOSPC; found: li->li_qtagarray[li->li_qd] ++; cb->ccb_otag = (li->li_qd ++); return 0; #endif /* SCSI_LOW_ALT_QTAG_ALLOCATE */ } static int scsi_low_dealloc_qtag(cb) struct slccb *cb; { struct lun_info *li = cb->li; scsi_low_tag_t etag; if (cb->ccb_otag == SCSI_LOW_UNKTAG) return 0; #ifndef SCSI_LOW_ALT_QTAG_ALLOCATE etag = cb->ccb_otag - 1; #ifdef SCSI_LOW_DIAGNOSTIC if (etag >= sizeof(li->li_qtagbits) * NBBY) panic("scsi_low_dealloc_tag: illegal tag"); #endif /* SCSI_LOW_DIAGNOSTIC */ li->li_qtagbits |= (1 << etag); #else /* SCSI_LOW_ALT_QTAG_ALLOCATE */ etag = cb->ccb_otag; #ifdef SCSI_LOW_DIAGNOSTIC if (etag >= SCSI_LOW_MAXNEXUS) panic("scsi_low_dealloc_tag: illegal tag"); #endif /* SCSI_LOW_DIAGNOSTIC */ li->li_qtagarray[etag] --; #endif /* SCSI_LOW_ALT_QTAG_ALLOCATE */ cb->ccb_otag = SCSI_LOW_UNKTAG; return 0; } static struct slccb * scsi_low_revoke_ccb(slp, cb, fdone) struct scsi_low_softc *slp; struct slccb *cb; int fdone; { struct targ_info *ti = cb->ti; struct lun_info *li = cb->li; #ifdef SCSI_LOW_DIAGNOSTIC if ((cb->ccb_flags & (CCB_STARTQ | CCB_DISCQ)) == (CCB_STARTQ | CCB_DISCQ)) { panic("%s: ccb in both queue", device_get_nameunit(slp->sl_dev)); } #endif /* SCSI_LOW_DIAGNOSTIC */ if ((cb->ccb_flags & CCB_STARTQ) != 0) { TAILQ_REMOVE(&slp->sl_start, cb, ccb_chain); } if ((cb->ccb_flags & CCB_DISCQ) != 0) { TAILQ_REMOVE(&li->li_discq, cb, ccb_chain); li->li_disc --; ti->ti_disc --; slp->sl_disc --; } cb->ccb_flags &= ~(CCB_STARTQ | CCB_DISCQ | CCB_SENSE | CCB_CLEARQ | CCB_INTERNAL); if (fdone != 0 && (cb->ccb_rcnt ++ >= slp->sl_max_retry || (cb->ccb_flags & CCB_NORETRY) != 0)) { cb->ccb_error |= FATALIO; cb->ccb_flags &= ~CCB_AUTOSENSE; if (scsi_low_done(slp, cb) != SCSI_LOW_DONE_COMPLETE) panic("%s: done ccb retried", device_get_nameunit(slp->sl_dev)); return NULL; } else { cb->ccb_error |= PENDINGIO; scsi_low_deactivate_qtag(cb); scsi_low_ccb_message_retry(cb); cb->ccb_tc = cb->ccb_tcmax = SCSI_LOW_MIN_TOUT; return cb; } } static void scsi_low_reset_nexus_lun(slp, li, fdone) struct scsi_low_softc *slp; struct lun_info *li; int fdone; { struct slccb *cb, *ncb, *ecb; if (li == NULL) return; ecb = NULL; for (cb = TAILQ_FIRST(&li->li_discq); cb != NULL; cb = ncb) { ncb = TAILQ_NEXT(cb, ccb_chain); cb = scsi_low_revoke_ccb(slp, cb, fdone); if (cb != NULL) { /* * presumely keep ordering of io */ cb->ccb_flags |= CCB_STARTQ; if (ecb == NULL) { TAILQ_INSERT_HEAD(&slp->sl_start,\ cb, ccb_chain); } else { TAILQ_INSERT_AFTER(&slp->sl_start,\ ecb, cb, ccb_chain); } ecb = cb; } } } /************************************************************** * Qurik setup **************************************************************/ static void scsi_low_calcf_lun(li) struct lun_info *li; { struct targ_info *ti = li->li_ti; struct scsi_low_softc *slp = ti->ti_sc; u_int cfgflags, diskflags; if (li->li_flags_valid == SCSI_LOW_LUN_FLAGS_ALL_VALID) cfgflags = li->li_cfgflags; else cfgflags = 0; diskflags = li->li_diskflags & li->li_quirks; /* disconnect */ li->li_flags &= ~SCSI_LOW_DISC; if ((slp->sl_cfgflags & CFG_NODISC) == 0 && (diskflags & SCSI_LOW_DISK_DISC) != 0 && (cfgflags & SCSI_LOW_DISC) != 0) li->li_flags |= SCSI_LOW_DISC; /* parity */ li->li_flags |= SCSI_LOW_NOPARITY; if ((slp->sl_cfgflags & CFG_NOPARITY) == 0 && (diskflags & SCSI_LOW_DISK_PARITY) != 0 && (cfgflags & SCSI_LOW_NOPARITY) == 0) li->li_flags &= ~SCSI_LOW_NOPARITY; /* qtag */ if ((slp->sl_cfgflags & CFG_NOQTAG) == 0 && (cfgflags & SCSI_LOW_QTAG) != 0 && (diskflags & SCSI_LOW_DISK_QTAG) != 0) { li->li_flags |= SCSI_LOW_QTAG; li->li_maxnexus = SCSI_LOW_MAXNEXUS; li->li_maxnqio = li->li_maxnexus; } else { li->li_flags &= ~SCSI_LOW_QTAG; li->li_maxnexus = 0; li->li_maxnqio = li->li_maxnexus; } /* cmd link */ li->li_flags &= ~SCSI_LOW_LINK; if ((cfgflags & SCSI_LOW_LINK) != 0 && (diskflags & SCSI_LOW_DISK_LINK) != 0) li->li_flags |= SCSI_LOW_LINK; /* compatible flags */ li->li_flags &= ~SCSI_LOW_SYNC; if (ti->ti_maxsynch.offset > 0) li->li_flags |= SCSI_LOW_SYNC; #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_CALCF, ti->ti_id) != 0) { scsi_low_calcf_show(li); } #endif /* SCSI_LOW_DEBUG */ } static void scsi_low_calcf_target(ti) struct targ_info *ti; { struct scsi_low_softc *slp = ti->ti_sc; u_int offset, period, diskflags; diskflags = ti->ti_diskflags & ti->ti_quirks; /* synch */ if ((slp->sl_cfgflags & CFG_ASYNC) == 0 && (diskflags & SCSI_LOW_DISK_SYNC) != 0) { offset = ti->ti_maxsynch.offset; period = ti->ti_maxsynch.period; if (offset == 0 || period == 0) offset = period = 0; } else { offset = period = 0; } ti->ti_maxsynch.offset = offset; ti->ti_maxsynch.period = period; /* wide */ if ((diskflags & SCSI_LOW_DISK_WIDE_32) == 0 && ti->ti_width > SCSI_LOW_BUS_WIDTH_16) ti->ti_width = SCSI_LOW_BUS_WIDTH_16; if ((diskflags & SCSI_LOW_DISK_WIDE_16) == 0 && ti->ti_width > SCSI_LOW_BUS_WIDTH_8) ti->ti_width = SCSI_LOW_BUS_WIDTH_8; if (ti->ti_flags_valid == SCSI_LOW_TARG_FLAGS_ALL_VALID) { if (ti->ti_maxsynch.offset != ti->ti_osynch.offset || ti->ti_maxsynch.period != ti->ti_osynch.period) ti->ti_setup_msg |= SCSI_LOW_MSG_SYNCH; if (ti->ti_width != ti->ti_owidth) ti->ti_setup_msg |= (SCSI_LOW_MSG_WIDE | SCSI_LOW_MSG_SYNCH); ti->ti_osynch = ti->ti_maxsynch; ti->ti_owidth = ti->ti_width; } #ifdef SCSI_LOW_DEBUG if (SCSI_LOW_DEBUG_GO(SCSI_LOW_DEBUG_CALCF, ti->ti_id) != 0) { device_printf(slp->sl_dev, "(%d:*): max period(%dns) offset(%d) width(%d)\n", ti->ti_id, ti->ti_maxsynch.period * 4, ti->ti_maxsynch.offset, ti->ti_width); } #endif /* SCSI_LOW_DEBUG */ } static void scsi_low_calcf_show(li) struct lun_info *li; { struct targ_info *ti = li->li_ti; struct scsi_low_softc *slp = ti->ti_sc; device_printf(slp->sl_dev, "(%d:%d): period(%d ns) offset(%d) width(%d) flags 0x%b\n", ti->ti_id, li->li_lun, ti->ti_maxsynch.period * 4, ti->ti_maxsynch.offset, ti->ti_width, li->li_flags, SCSI_LOW_BITS); } #ifdef SCSI_LOW_START_UP_CHECK /************************************************************** * scsi world start up **************************************************************/ static int scsi_low_poll(struct scsi_low_softc *, struct slccb *); static int scsi_low_start_up(slp) struct scsi_low_softc *slp; { struct targ_info *ti; struct lun_info *li; struct slccb *cb; int target, lun; device_printf(slp->sl_dev, "scsi_low: probing all devices ....\n"); for (target = 0; target < slp->sl_ntargs; target ++) { if (target == slp->sl_hostid) { if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { device_printf(slp->sl_dev, "scsi_low: target %d (host card)\n", target); } continue; } if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { device_printf(slp->sl_dev, "scsi_low: target %d lun ", target); } ti = slp->sl_ti[target]; for (lun = 0; lun < slp->sl_nluns; lun ++) { if ((cb = SCSI_LOW_ALLOC_CCB(1)) == NULL) break; cb->osdep = NULL; cb->bp = NULL; li = scsi_low_alloc_li(ti, lun, 1); scsi_low_enqueue(slp, ti, li, cb, CCB_AUTOSENSE | CCB_POLLED, 0); scsi_low_poll(slp, cb); if (li->li_state != SCSI_LOW_LUN_OK) break; if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { printf("%d ", lun); } } if ((slp->sl_show_result & SHOW_PROBE_RES) != 0) { printf("\n"); } } return 0; } static int scsi_low_poll(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { int tcount; tcount = 0; while (slp->sl_nio > 0) { DELAY((1000 * 1000) / SCSI_LOW_POLL_HZ); (*slp->sl_funcs->scsi_low_poll) (slp); if (tcount ++ < SCSI_LOW_POLL_HZ / SCSI_LOW_TIMEOUT_HZ) continue; tcount = 0; scsi_low_timeout_check(slp); } return 0; } #endif /* SCSI_LOW_START_UP_CHECK */ /********************************************************** * DEBUG SECTION **********************************************************/ #ifdef SCSI_LOW_DEBUG static void scsi_low_test_abort(slp, ti, li) struct scsi_low_softc *slp; struct targ_info *ti; struct lun_info *li; { struct slccb *acb; if (li->li_disc > 1) { acb = TAILQ_FIRST(&li->li_discq); if (scsi_low_abort_ccb(slp, acb) == 0) { device_printf(slp->sl_dev, "aborting ccb(0x%lx) start\n", (u_long) acb); } } } static void scsi_low_test_atten(slp, ti, msg) struct scsi_low_softc *slp; struct targ_info *ti; u_int msg; { if (slp->sl_ph_count < SCSI_LOW_MAX_ATTEN_CHECK) scsi_low_assert_msg(slp, ti, msg, 0); else device_printf(slp->sl_dev, "atten check OK\n"); } static void scsi_low_test_cmdlnk(slp, cb) struct scsi_low_softc *slp; struct slccb *cb; { #define SCSI_LOW_CMDLNK_NOK (CCB_INTERNAL | CCB_SENSE | CCB_CLEARQ) if ((cb->ccb_flags & SCSI_LOW_CMDLNK_NOK) != 0) return; memcpy(cb->ccb_scsi_cmd, slp->sl_scp.scp_cmd, slp->sl_scp.scp_cmdlen); cb->ccb_scsi_cmd[slp->sl_scp.scp_cmdlen - 1] |= 1; slp->sl_scp.scp_cmd = cb->ccb_scsi_cmd; } #endif /* SCSI_LOW_DEBUG */ /* static */ void scsi_low_info(slp, ti, s) struct scsi_low_softc *slp; struct targ_info *ti; u_char *s; { if (slp == NULL) slp = LIST_FIRST(&sl_tab); if (s == NULL) s = "no message"; printf(">>>>> SCSI_LOW_INFO(0x%lx): %s\n", (u_long) slp->sl_Tnexus, s); if (ti == NULL) { TAILQ_FOREACH(ti, &slp->sl_titab, ti_chain) { scsi_low_print(slp, ti); } } else { scsi_low_print(slp, ti); } } static u_char *phase[] = { "FREE", "ARBSTART", "SELSTART", "SELECTED", "CMDOUT", "DATA", "MSGIN", "MSGOUT", "STATIN", "DISC", "RESEL" }; void scsi_low_print(slp, ti) struct scsi_low_softc *slp; struct targ_info *ti; { struct lun_info *li; struct slccb *cb; struct sc_p *sp; if (ti == NULL || ti == slp->sl_Tnexus) { ti = slp->sl_Tnexus; li = slp->sl_Lnexus; cb = slp->sl_Qnexus; } else { li = LIST_FIRST(&ti->ti_litab); cb = TAILQ_FIRST(&li->li_discq); } sp = &slp->sl_scp; device_printf(slp->sl_dev, "=== NEXUS T(0x%lx) L(0x%lx) Q(0x%lx) NIO(%d) ===\n", (u_long) ti, (u_long) li, (u_long) cb, slp->sl_nio); /* target stat */ if (ti != NULL) { u_int flags = 0, maxnqio = 0, nqio = 0; int lun = CAM_LUN_WILDCARD; if (li != NULL) { lun = li->li_lun; flags = li->li_flags; maxnqio = li->li_maxnqio; nqio = li->li_nqio; } device_printf(slp->sl_dev, "(%d:%d) ph<%s> => ph<%s> DISC(%d) QIO(%d:%d)\n", ti->ti_id, lun, phase[(int) ti->ti_ophase], phase[(int) ti->ti_phase], ti->ti_disc, nqio, maxnqio); if (cb != NULL) { printf("CCB: cmd[0] 0x%x clen 0x%x dlen 0x%x<0x%x stat 0x%x err %b\n", (u_int) cb->ccb_scp.scp_cmd[0], cb->ccb_scp.scp_cmdlen, cb->ccb_datalen, cb->ccb_scp.scp_datalen, (u_int) cb->ccb_sscp.scp_status, cb->ccb_error, SCSI_LOW_ERRORBITS); } printf("MSGIN: ptr(%x) [%x][%x][%x][%x][%x] attention: %d\n", (u_int) (ti->ti_msginptr), (u_int) (ti->ti_msgin[0]), (u_int) (ti->ti_msgin[1]), (u_int) (ti->ti_msgin[2]), (u_int) (ti->ti_msgin[3]), (u_int) (ti->ti_msgin[4]), slp->sl_atten); printf("MSGOUT: msgflags 0x%x [%x][%x][%x][%x][%x] msgoutlen %d C_FLAGS: %b\n", (u_int) ti->ti_msgflags, (u_int) (ti->ti_msgoutstr[0]), (u_int) (ti->ti_msgoutstr[1]), (u_int) (ti->ti_msgoutstr[2]), (u_int) (ti->ti_msgoutstr[3]), (u_int) (ti->ti_msgoutstr[4]), ti->ti_msgoutlen, flags, SCSI_LOW_BITS); #ifdef SCSI_LOW_DIAGNOSTIC scsi_low_msg_log_show(&ti->ti_log_msgin, "MIN LOG ", 2); scsi_low_msg_log_show(&ti->ti_log_msgout, "MOUT LOG", 2); #endif /* SCSI_LOW_DIAGNOSTIC */ } printf("SCB: daddr 0x%lx dlen 0x%x stat 0x%x err %b\n", (u_long) sp->scp_data, sp->scp_datalen, (u_int) sp->scp_status, slp->sl_error, SCSI_LOW_ERRORBITS); } Index: stable/11/sys/dev/advansys/advansys.c =================================================================== --- stable/11/sys/dev/advansys/advansys.c (revision 314380) +++ stable/11/sys/dev/advansys/advansys.c (revision 314381) @@ -1,1409 +1,1405 @@ /*- * Generic driver for the Advanced Systems Inc. SCSI controllers * Product specific probe and attach routines can be found in: * * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 * i386/eisa/adv_eisa.c ABP742, ABP752 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, * ABP970, ABP970U * * Copyright (c) 1996-2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1997 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void adv_action(struct cam_sim *sim, union ccb *ccb); static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error); static void adv_intr_locked(struct adv_softc *adv); static void adv_poll(struct cam_sim *sim); static void adv_run_doneq(struct adv_softc *adv); static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv); static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv); static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline void adv_set_state(struct adv_softc *adv, adv_state state); static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); } else { cinfo = adv_alloc_ccb_info(adv); } return (cinfo); } static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo->state = ACCB_FREE; SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); } static __inline void adv_set_state(struct adv_softc *adv, adv_state state) { if (adv->state == 0) xpt_freeze_simq(adv->sim, /*count*/1); adv->state |= state; } static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb) { if (adv->state != 0) adv_clear_state_really(adv, ccb); } static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { int openings; openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; if (openings >= adv->openings_needed) { adv->state &= ~ADV_RESOURCE_SHORTAGE; adv->openings_needed = 0; } } if ((adv->state & ADV_IN_TIMEOUT) != 0) { struct adv_ccb_info *cinfo; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { struct ccb_hdr *ccb_h; /* * We now traverse our list of pending CCBs * and reinstate their timeouts. */ ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo = ccb_h->ccb_cinfo_ptr; callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, ccb_h, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } adv->state &= ~ADV_IN_TIMEOUT; device_printf(adv->dev, "No longer in timeout\n"); } } if (adv->state == 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } void adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t* physaddr; physaddr = (bus_addr_t*)arg; *physaddr = segs->ds_addr; } static void adv_action(struct cam_sim *sim, union ccb *ccb) { struct adv_softc *adv; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); adv = (struct adv_softc *)cam_sim_softc(sim); mtx_assert(&adv->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; int error; ccb_h = &ccb->ccb_h; csio = &ccb->csio; cinfo = adv_get_ccb_info(adv); if (cinfo == NULL) panic("XXX Handle CCB info error!!!"); ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; error = bus_dmamap_load_ccb(adv->buffer_dmat, cinfo->dmamap, ccb, adv_execute_ccb, csio, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ adv_set_state(adv, ADV_BUSDMA_BLOCK); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ - case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; target_bit_vector targ_mask; struct adv_transinfo *tconf; u_int update_type; cts = &ccb->cts; targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); update_type = 0; /* * The user must specify which type of settings he wishes * to change. */ if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; update_type |= ADV_TRANS_GOAL; } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].user; update_type |= ADV_TRANS_USER; } else { ccb->ccb_h.status = CAM_REQ_INVALID; break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((update_type & ADV_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) adv->disc_enable |= targ_mask; else adv->disc_enable &= ~targ_mask; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->cmd_qng_enabled |= targ_mask; else adv->cmd_qng_enabled &= ~targ_mask; } } if ((update_type & ADV_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_VALID_DISC) != 0) adv->user_disc_enable |= targ_mask; else adv->user_disc_enable &= ~targ_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->user_cmd_qng_enabled |= targ_mask; else adv->user_cmd_qng_enabled &= ~targ_mask; } } /* * If the user specifies either the sync rate, or offset, * but not both, the unspecified parameter defaults to its * current value in transfer negotiations. */ if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { /* * If the user provided a sync rate but no offset, * use the current offset. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = tconf->offset; /* * If the user provided an offset but no sync rate, * use the current sync rate. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = tconf->period; adv_period_offset_to_sdtr(adv, &spi->sync_period, &spi->sync_offset, cts->ccb_h.target_id); adv_set_syncrate(adv, /*struct cam_path */NULL, cts->ccb_h.target_id, spi->sync_period, spi->sync_offset, update_type); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; struct adv_transinfo *tconf; target_bit_vector target_mask; cts = &ccb->cts; target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; if ((adv->disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { tconf = &adv->tinfo[cts->ccb_h.target_id].user; if ((adv->user_disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->user_cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tconf->period; spi->sync_offset = tconf->offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; cam_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { adv_stop_execution(adv); adv_reset_bus(adv, /*initiate_reset*/TRUE); adv_start_execution(adv); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = adv->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * Currently, the output of bus_dmammap_load suits our needs just * fine, but should it change, we'd need to do something here. */ #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; struct cam_sim *sim; struct adv_softc *adv; struct adv_ccb_info *cinfo; struct adv_scsi_q scsiq; struct adv_sg_head sghead; csio = (struct ccb_scsiio *)arg; ccb_h = &csio->ccb_h; sim = xpt_path_sim(ccb_h->path); adv = (struct adv_softc *)cam_sim_softc(sim); cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); /* * Setup our done routine to release the simq on * the next ccb that completes. */ if ((adv->state & ADV_BUSDMA_BLOCK) != 0) adv->state |= ADV_BUSDMA_BLOCK_CLEARED; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { /* XXX Need phystovirt!!!! */ /* How about pmap_kenter??? */ scsiq.cdbptr = csio->cdb_io.cdb_ptr; } else { scsiq.cdbptr = csio->cdb_io.cdb_ptr; } } else { scsiq.cdbptr = csio->cdb_io.cdb_bytes; } /* * Build up the request */ scsiq.q1.status = 0; scsiq.q1.q_no = 0; scsiq.q1.cntl = 0; scsiq.q1.sg_queue_cnt = 0; scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); scsiq.q1.target_lun = ccb_h->target_lun; scsiq.q1.sense_len = csio->sense_len; scsiq.q1.extra_bytes = 0; scsiq.q2.ccb_index = cinfo - adv->ccb_infos; scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, ccb_h->target_lun); scsiq.q2.flag = 0; scsiq.q2.cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) scsiq.q2.tag_code = csio->tag_action; else scsiq.q2.tag_code = 0; scsiq.q2.vm_id = 0; if (nsegments != 0) { bus_dmasync_op_t op; scsiq.q1.data_addr = dm_segs->ds_addr; scsiq.q1.data_cnt = dm_segs->ds_len; if (nsegments > 1) { scsiq.q1.cntl |= QC_SG_HEAD; sghead.entry_cnt = sghead.entry_to_copy = nsegments; sghead.res = 0; sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); scsiq.sg_head = &sghead; } else { scsiq.sg_head = NULL; } if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); } else { scsiq.q1.data_addr = 0; scsiq.q1.data_cnt = 0; scsiq.sg_head = NULL; } /* * Last time we need to check if this SCB needs to * be aborted. */ if (ccb_h->status != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { /* Temporary resource shortage */ adv_set_state(adv, ADV_RESOURCE_SHORTAGE); if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); csio->ccb_h.status = CAM_REQUEUE_REQ; adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } cinfo->state |= ACCB_ACTIVE; ccb_h->status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); /* Schedule our timeout */ callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, csio, 0); } static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv) { int error; struct adv_ccb_info *cinfo; cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; cinfo->state = ACCB_FREE; callout_init_mtx(&cinfo->timer, &adv->lock, 0); error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, &cinfo->dmamap); if (error != 0) { device_printf(adv->dev, "Unable to allocate CCB info " "dmamap - error %d\n", error); return (NULL); } adv->ccb_infos_allocated++; return (cinfo); } static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { callout_drain(&cinfo->timer); bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); } void adv_timeout(void *arg) { union ccb *ccb; struct adv_softc *adv; struct adv_ccb_info *cinfo, *cinfo2; ccb = (union ccb *)arg; adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; mtx_assert(&adv->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("Timed out\n"); /* Have we been taken care of already?? */ if (cinfo == NULL || cinfo->state == ACCB_FREE) { return; } adv_stop_execution(adv); if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { struct ccb_hdr *ccb_h; /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ adv_set_state(adv, ADV_IN_TIMEOUT); /* This CCB is the CCB representing our recovery actions */ cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo2 = ccb_h->ccb_cinfo_ptr; callout_stop(&cinfo2->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } /* XXX Should send a BDR */ /* Attempt an abort as our first tact */ xpt_print_path(ccb->ccb_h.path); printf("Attempting abort\n"); adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb, CAM_CMD_TIMEOUT, /*queued_only*/FALSE); callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb); } else { /* Our attempt to perform an abort failed, go for a reset */ xpt_print_path(ccb->ccb_h.path); printf("Resetting bus\n"); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; adv_reset_bus(adv, /*initiate_reset*/TRUE); } adv_start_execution(adv); } struct adv_softc * adv_alloc(device_t dev, struct resource *res, long offset) { struct adv_softc *adv = device_get_softc(dev); /* * Allocate a storage area for us */ LIST_INIT(&adv->pending_ccbs); SLIST_INIT(&adv->free_ccb_infos); adv->dev = dev; adv->res = res; adv->reg_off = offset; mtx_init(&adv->lock, "adv", NULL, MTX_DEF); return(adv); } void adv_free(struct adv_softc *adv) { switch (adv->init_level) { case 6: { struct adv_ccb_info *cinfo; while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); adv_destroy_ccb_info(adv, cinfo); } bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); } case 5: bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, adv->sense_dmamap); case 4: bus_dma_tag_destroy(adv->sense_dmat); case 3: bus_dma_tag_destroy(adv->buffer_dmat); case 2: bus_dma_tag_destroy(adv->parent_dmat); case 1: if (adv->ccb_infos != NULL) free(adv->ccb_infos, M_DEVBUF); case 0: mtx_destroy(&adv->lock); break; } } int adv_init(struct adv_softc *adv) { struct adv_eeprom_config eeprom_config; int checksum, i; int max_sync; u_int16_t config_lsw; u_int16_t config_msw; mtx_lock(&adv->lock); adv_lib_init(adv); /* * Stop script execution. */ adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); adv_stop_execution(adv); if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to halt adapter. Initialization failed\n"); return (1); } ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to set program counter. Initialization failed\n"); return (1); } config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { config_msw &= ~ADV_CFG_MSW_CLR_MASK; /* * XXX The Linux code flags this as an error, * but what should we report to the user??? * It seems that clearing the config register * makes this error recoverable. */ ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } /* Suck in the configuration from the EEProm */ checksum = adv_get_eeprom_config(adv, &eeprom_config); if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { /* * XXX The Linux code sets a warning level for this * condition, yet nothing of meaning is printed to * the user. What does this mean??? */ if (adv->chip_version == 3) { if (eeprom_config.cfg_lsw != config_lsw) eeprom_config.cfg_lsw = config_lsw; if (eeprom_config.cfg_msw != config_msw) { eeprom_config.cfg_msw = config_msw; } } } if (checksum == eeprom_config.chksum) { /* Range/Sanity checking */ if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; } if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; } if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { eeprom_config.max_tag_qng = eeprom_config.max_total_qng; } if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; } adv->max_openings = eeprom_config.max_total_qng; adv->user_disc_enable = eeprom_config.disc_enable; adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); adv->control = eeprom_config.cntl; for (i = 0; i <= ADV_MAX_TID; i++) { u_int8_t sync_data; if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) sync_data = 0; else sync_data = eeprom_config.sdtr_data[i]; adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw = eeprom_config.cfg_lsw; eeprom_config.cfg_msw = config_msw; } else { u_int8_t sync_data; device_printf(adv->dev, "Warning EEPROM Checksum mismatch. " "Using default device parameters\n"); /* Set reasonable defaults since we can't read the EEPROM */ adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; adv->disc_enable = TARGET_BIT_VECTOR_SET; adv->user_disc_enable = TARGET_BIT_VECTOR_SET; adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->scsi_id = 7; adv->control = 0xFFFF; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) /* Default to no Ultra to support the 3030 */ adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); for (i = 0; i <= ADV_MAX_TID; i++) { adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; } config_msw &= ~ADV_CFG_MSW_CLR_MASK; config_lsw |= ADV_CFG_LSW_HOST_INT_ON; if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) /* 25ns or 10MHz */ max_sync = 25; else /* Unlimited */ max_sync = 0; for (i = 0; i <= ADV_MAX_TID; i++) { if (adv->tinfo[i].user.period < max_sync) adv->tinfo[i].user.period = max_sync; } if (adv_test_external_lram(adv) == 0) { if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { eeprom_config.max_total_qng = ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eeprom_config.cfg_msw |= 0x0800; config_msw |= 0x0800; eeprom_config.max_total_qng = ADV_MAX_PCI_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; } adv->max_openings = eeprom_config.max_total_qng; } ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); #if 0 /* * Don't write the eeprom data back for now. * I'd rather not mess up the user's card. We also don't * fully sanitize the eeprom settings above for the write-back * to be 100% correct. */ if (adv_set_eeprom_config(adv, &eeprom_config) != 0) device_printf(adv->dev, "WARNING! Failure writing to EEPROM.\n"); #endif adv_set_chip_scsiid(adv, adv->scsi_id); if (adv_init_lram_and_mcode(adv)) { mtx_unlock(&adv->lock); return (1); } adv->disc_enable = adv->user_disc_enable; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); for (i = 0; i <= ADV_MAX_TID; i++) { /* * Start off in async mode. */ adv_set_syncrate(adv, /*struct cam_path */NULL, i, /*period*/0, /*offset*/0, ADV_TRANS_CUR); /* * Enable the use of tagged commands on all targets. * This allows the kernel driver to make up it's own mind * as it sees fit to tag queue instead of having the * firmware try and second guess the tag_code settins. */ adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, adv->max_openings); } adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); device_printf(adv->dev, "AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", (adv->type & ADV_ULTRA) && (max_sync == 0) ? "Ultra SCSI" : "SCSI", adv->scsi_id, adv->max_openings); mtx_unlock(&adv->lock); return (0); } void adv_intr(void *arg) { struct adv_softc *adv; adv = arg; mtx_lock(&adv->lock); adv_intr_locked(adv); mtx_unlock(&adv->lock); } void adv_intr_locked(struct adv_softc *adv) { u_int16_t chipstat; u_int16_t saved_ram_addr; u_int8_t ctrl_reg; u_int8_t saved_ctrl_reg; u_int8_t host_flag; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); chipstat = ADV_INW(adv, ADV_CHIP_STATUS); /* Is it for us? */ if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) return; ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | ADV_CC_SINGLE_STEP | ADV_CC_DIAG | ADV_CC_TEST)); if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { device_printf(adv->dev, "Detected Bus Reset\n"); adv_reset_bus(adv, /*initiate_reset*/FALSE); return; } if ((chipstat & ADV_CSW_INT_PENDING) != 0) { saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag | ADV_HOST_FLAG_IN_ISR); adv_ack_interrupt(adv); if ((chipstat & ADV_CSW_HALTED) != 0 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { adv_isr_chip_halted(adv); saved_ctrl_reg &= ~ADV_CC_HALT; } else { adv_run_doneq(adv); } ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); #ifdef DIAGNOSTIC if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) panic("adv_intr: Unable to set LRAM addr"); #endif adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); } ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); } static void adv_run_doneq(struct adv_softc *adv) { struct adv_q_done_info scsiq; u_int doneq_head; u_int done_qno; doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) + ADV_SCSIQ_B_FWD); while (done_qno != ADV_QLINK_END) { union ccb* ccb; struct adv_ccb_info *cinfo; u_int done_qaddr; u_int sg_queue_cnt; done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Pull status from this request */ sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, adv->max_dma_count); /* Mark it as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, scsiq.q_status & ~(QS_READY|QS_ABORTED)); /* Process request based on retrieved info */ if ((scsiq.cntl & QC_SG_HEAD) != 0) { u_int i; /* * S/G based request. Free all of the queue * structures that contained S/G information. */ for (i = 0; i < sg_queue_cnt; i++) { done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); #ifdef DIAGNOSTIC if (done_qno == ADV_QLINK_END) { panic("adv_qdone: Corrupted SG " "list encountered"); } #endif done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Mark SG queue as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, QS_FREE); } } else sg_queue_cnt = 0; #ifdef DIAGNOSTIC if (adv->cur_active < (sg_queue_cnt + 1)) panic("adv_qdone: Attempting to free more " "queues than are active"); #endif adv->cur_active -= sg_queue_cnt + 1; if ((scsiq.q_status != QS_DONE) && (scsiq.q_status & QS_ABORTED) == 0) panic("adv_qdone: completed scsiq with unknown status"); scsiq.remain_bytes += scsiq.extra_bytes; if ((scsiq.d3.done_stat == QD_WITH_ERROR) && (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { scsiq.d3.done_stat = QD_NO_ERROR; scsiq.d3.host_stat = QHSTA_NO_ERROR; } } cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; ccb = cinfo->ccb; ccb->csio.resid = scsiq.remain_bytes; adv_done(adv, ccb, scsiq.d3.done_stat, scsiq.d3.host_stat, scsiq.d3.scsi_stat, scsiq.q_no); doneq_head = done_qno; done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); } adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); } void adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, u_int host_stat, u_int scsi_status, u_int q_no) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; LIST_REMOVE(&ccb->ccb_h, sim_links.le); callout_stop(&cinfo->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); } switch (done_stat) { case QD_NO_ERROR: if (host_stat == QHSTA_NO_ERROR) { ccb->ccb_h.status = CAM_REQ_CMP; break; } xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done without error, " "but host status non-zero(%x)\n", host_stat); /*FALLTHROUGH*/ case QD_WITH_ERROR: switch (host_stat) { case QHSTA_M_TARGET_STATUS_BUSY: case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: /* * Assume that if we were a tagged transaction * the target reported queue full. Otherwise, * report busy. The firmware really should just * pass the original status back up to us even * if it thinks the target was in error for * returning this status as no other transactions * from this initiator are in effect, but this * ignores multi-initiator setups and there is * evidence that the firmware gets its per-device * transaction counts screwed up occasionally. */ ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) scsi_status = SCSI_STATUS_QUEUE_FULL; else scsi_status = SCSI_STATUS_BUSY; adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); /*FALLTHROUGH*/ case QHSTA_M_NO_AUTO_REQ_SENSE: case QHSTA_NO_ERROR: ccb->csio.scsi_status = scsi_status; switch (scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: ccb->ccb_h.status |= CAM_AUTOSNS_VALID; /* Structure copy */ ccb->csio.sense_data = adv->sense_buffers[q_no - 1]; /* FALLTHROUGH */ case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; } break; case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_BAD_BUS_PHASE_SEQ: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: /* The SCSI bus hung in a phase */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; adv_reset_bus(adv, /*initiate_reset*/TRUE); break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_D_QDONE_SG_LIST_CORRUPTED: case QHSTA_D_ASC_DVC_ERROR_CODE_SET: case QHSTA_D_HOST_ABORT_FAILED: case QHSTA_D_EXE_SCSI_Q_FAILED: case QHSTA_D_ASPI_NO_BUF_POOL: case QHSTA_M_BAD_TAG_CODE: case QHSTA_D_LRAM_CMP_ERROR: case QHSTA_M_MICRO_CODE_ERROR_HALT: default: panic("%s: Unhandled Host status error %x", device_get_nameunit(adv->dev), host_stat); /* NOTREACHED */ } break; case QD_ABORTED_BY_HOST: /* Don't clobber any, more explicit, error codes we've set */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status = CAM_REQ_ABORTED; break; default: xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done with unknown status %x:%x\n", done_stat, host_stat); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } adv_clear_state(adv, ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adv_free_ccb_info(adv, cinfo); /* * Null this out so that we catch driver bugs that cause a * ccb to be completed twice. */ ccb->ccb_h.ccb_cinfo_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } /* * Function to poll for command completion when * interrupts are disabled (crash dumps) */ static void adv_poll(struct cam_sim *sim) { adv_intr_locked(cam_sim_softc(sim)); } /* * Attach all the sub-devices we can find */ int adv_attach(adv) struct adv_softc *adv; { struct ccb_setasync csa; struct cam_devq *devq; int max_sg; /* * Allocate an array of ccb mapping structures. We put the * index of the ccb_info structure into the queue representing * a transaction and use it for mapping the queue to the * upper level SCSI transaction it represents. */ adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings, M_DEVBUF, M_NOWAIT); if (adv->ccb_infos == NULL) return (ENOMEM); adv->init_level++; /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. * * The ASC boards use chains of "queues" (the transactional * resources on the board) to represent long S/G lists. * The first queue represents the command and holds a * single address and data pair. The queues that follow * can each hold ADV_SG_LIST_PER_Q entries. Given the * total number of queues, we can express the largest * transaction we can map. We reserve a few queues for * error recovery. Take those into account as well. * * There is a way to take an interrupt to download the * next batch of S/G entries if there are more than 255 * of them (the counter in the queue structure is a u_int8_t). * We don't use this feature, so limit the S/G list size * accordingly. */ max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; if (max_sg > 255) max_sg = 255; /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_MAXPHYS, /* nsegments */ max_sg, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->buffer_dmat) != 0) { return (ENXIO); } adv->init_level++; /* DMA tag for our sense buffers */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ sizeof(struct scsi_sense_data) * adv->max_openings, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->sense_dmat) != 0) { return (ENXIO); } adv->init_level++; /* Allocation for our sense buffers */ if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { return (ENOMEM); } adv->init_level++; /* And permanently map them */ bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, adv->sense_buffers, sizeof(struct scsi_sense_data)*adv->max_openings, adv_map, &adv->sense_physbase, /*flags*/0); adv->init_level++; /* * Fire up the chip */ if (adv_start_chip(adv) != 1) { device_printf(adv->dev, "Unable to start on board processor. Aborting.\n"); return (ENXIO); } /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(adv->max_openings); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry. */ adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq); if (adv->sim == NULL) return (ENOMEM); /* * Register the bus. * * XXX Twin Channel EISA Cards??? */ mtx_lock(&adv->lock); if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) { cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(adv->sim)); cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = advasync; csa.callback_arg = adv; xpt_action((union ccb *)&csa); mtx_unlock(&adv->lock); return (0); } MODULE_DEPEND(adv, cam, 1, 1, 1); Index: stable/11/sys/dev/aha/aha.c =================================================================== --- stable/11/sys/dev/aha/aha.c (revision 314380) +++ stable/11/sys/dev/aha/aha.c (revision 314381) @@ -1,1824 +1,1820 @@ /* * Generic register and struct definitions for the Adaptech 154x/164x * SCSI host adapters. Product specific probe and attach routines can * be found in: * aha 1542A/1542B/1542C/1542CF/1542CP aha_isa.c * aha 1640 aha_mca.c */ /*- * Copyright (c) 1998 M. Warner Losh. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from bt.c written by: * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRVERB(x) do { if (bootverbose) device_printf x; } while (0) /* Macro to determine that a rev is potentially a new valid one * so that the driver doesn't keep breaking on new revs as it * did for the CF and CP. */ #define PROBABLY_NEW_BOARD(REV) (REV > 0x43 && REV < 0x56) /* MailBox Management functions */ static __inline void ahanextinbox(struct aha_softc *aha); static __inline void ahanextoutbox(struct aha_softc *aha); #define aha_name(aha) device_get_nameunit(aha->dev) static __inline void ahanextinbox(struct aha_softc *aha) { if (aha->cur_inbox == aha->last_inbox) aha->cur_inbox = aha->in_boxes; else aha->cur_inbox++; } static __inline void ahanextoutbox(struct aha_softc *aha) { if (aha->cur_outbox == aha->last_outbox) aha->cur_outbox = aha->out_boxes; else aha->cur_outbox++; } #define ahautoa24(u,s3) \ (s3)[0] = ((u) >> 16) & 0xff; \ (s3)[1] = ((u) >> 8) & 0xff; \ (s3)[2] = (u) & 0xff; #define aha_a24tou(s3) \ (((s3)[0] << 16) | ((s3)[1] << 8) | (s3)[2]) /* CCB Management functions */ static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb); static __inline struct aha_ccb* ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr); static __inline uint32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb) { return (aha->aha_ccb_physbase + (uint32_t)((caddr_t)accb - (caddr_t)aha->aha_ccb_array)); } static __inline struct aha_ccb * ahaccbptov(struct aha_softc *aha, uint32_t ccb_addr) { return (aha->aha_ccb_array + + ((struct aha_ccb*)(uintptr_t)ccb_addr - (struct aha_ccb*)(uintptr_t)aha->aha_ccb_physbase)); } static struct aha_ccb* ahagetccb(struct aha_softc *aha); static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb); static void ahaallocccbs(struct aha_softc *aha); static bus_dmamap_callback_t ahaexecuteccb; static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code); static void aha_intr_locked(struct aha_softc *aha); /* Host adapter command functions */ static int ahareset(struct aha_softc* aha, int hard_reset); /* Initialization functions */ static int ahainitmboxes(struct aha_softc *aha); static bus_dmamap_callback_t ahamapmboxes; static bus_dmamap_callback_t ahamapccbs; static bus_dmamap_callback_t ahamapsgs; /* Transfer Negotiation Functions */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_accb_ptr spriv_ptr0 #define ccb_aha_ptr spriv_ptr1 static void ahaaction(struct cam_sim *sim, union ccb *ccb); static void ahapoll(struct cam_sim *sim); /* Our timeout handler */ static void ahatimeout(void *arg); /* Exported functions */ void aha_alloc(struct aha_softc *aha) { SLIST_INIT(&aha->free_aha_ccbs); LIST_INIT(&aha->pending_ccbs); SLIST_INIT(&aha->sg_maps); aha->ccb_sg_opcode = INITIATOR_SG_CCB_WRESID; aha->ccb_ccb_opcode = INITIATOR_CCB_WRESID; mtx_init(&aha->lock, "aha", NULL, MTX_DEF); } void aha_free(struct aha_softc *aha) { switch (aha->init_level) { default: case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&aha->sg_maps, links); bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(aha->sg_dmat); } case 7: bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap); case 6: bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array, aha->ccb_dmamap); case 5: bus_dma_tag_destroy(aha->ccb_dmat); case 4: bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap); case 3: bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes, aha->mailbox_dmamap); case 2: bus_dma_tag_destroy(aha->buffer_dmat); case 1: bus_dma_tag_destroy(aha->mailbox_dmat); case 0: break; } mtx_destroy(&aha->lock); } /* * Probe the adapter and verify that the card is an Adaptec. */ int aha_probe(struct aha_softc* aha) { u_int status; u_int intstat; int error; board_id_data_t board_id; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = aha_inb(aha, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY | STATUS_REG_RSVD)) != 0) { PRVERB((aha->dev, "status reg test failed %x\n", status)); return (ENXIO); } intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { PRVERB((aha->dev, "Failed Intstat Reg Test\n")); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and fetch the board ID and ensure we aren't * looking at a BusLogic. */ if ((error = ahareset(aha, /*hard_reset*/TRUE)) != 0) { PRVERB((aha->dev, "Failed Reset\n")); return (ENXIO); } /* * Get the board ID. We use this to see if we're dealing with * a buslogic card or an aha card (or clone). */ error = aha_cmd(aha, AOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (uint8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { PRVERB((aha->dev, "INQUIRE failed %x\n", error)); return (ENXIO); } aha->fw_major = board_id.firmware_rev_major; aha->fw_minor = board_id.firmware_rev_minor; aha->boardid = board_id.board_type; /* * The Buslogic cards have an id of either 0x41 or 0x42. So * if those come up in the probe, we test the geometry register * of the board. Adaptec boards that are this old will not have * this register, and return 0xff, while buslogic cards will return * something different. * * It appears that for reasons unknow, for the for the * aha-1542B cards, we need to wait a little bit before trying * to read the geometry register. I picked 10ms since we have * reports that a for loop to 1000 did the trick, and this * errs on the side of conservatism. Besides, no one will * notice a 10mS delay here, even the 1542B card users :-) * * Some compatible cards return 0 here. Some cards also * seem to return 0x7f. * * XXX I'm not sure how this will impact other cloned cards * * This really should be replaced with the esetup command, since * that appears to be more reliable. This becomes more and more * true over time as we discover more cards that don't read the * geometry register consistently. */ if (aha->boardid <= 0x42) { /* Wait 10ms before reading */ DELAY(10000); status = aha_inb(aha, GEOMETRY_REG); if (status != 0xff && status != 0x00 && status != 0x7f) { PRVERB((aha->dev, "Geometry Register test failed %#x\n", status)); return (ENXIO); } } return (0); } /* * Pull the boards setup information and record it in our softc. */ int aha_fetch_adapter_info(struct aha_softc *aha) { setup_data_t setup_info; config_data_t config_data; uint8_t length_param; int error; struct aha_extbios extbios; switch (aha->boardid) { case BOARD_1540_16HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 16 head BIOS"); break; case BOARD_1540_64HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 64 head BIOS"); break; case BOARD_1542: snprintf(aha->model, sizeof(aha->model), "1540/1542 64 head BIOS"); break; case BOARD_1640: snprintf(aha->model, sizeof(aha->model), "1640"); break; case BOARD_1740: snprintf(aha->model, sizeof(aha->model), "1740A/1742A/1744"); break; case BOARD_1542C: snprintf(aha->model, sizeof(aha->model), "1542C"); break; case BOARD_1542CF: snprintf(aha->model, sizeof(aha->model), "1542CF"); break; case BOARD_1542CP: snprintf(aha->model, sizeof(aha->model), "1542CP"); break; default: snprintf(aha->model, sizeof(aha->model), "Unknown"); break; } /* * If we are a new type of 1542 board (anything newer than a 1542C) * then disable the extended bios so that the * mailbox interface is unlocked. * This is also true for the 1542B Version 3.20. First Adaptec * board that supports >1Gb drives. * No need to check the extended bios flags as some of the * extensions that cause us problems are not flagged in that byte. */ if (PROBABLY_NEW_BOARD(aha->boardid) || (aha->boardid == 0x41 && aha->fw_major == 0x31 && aha->fw_minor >= 0x34)) { error = aha_cmd(aha, AOP_RETURN_EXT_BIOS_INFO, NULL, /*paramlen*/0, (u_char *)&extbios, sizeof(extbios), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_RETURN_EXT_BIOS_INFO - Failed."); return (error); } error = aha_cmd(aha, AOP_MBOX_IF_ENABLE, (uint8_t *)&extbios, /*paramlen*/2, NULL, 0, DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "AOP_MBOX_IF_ENABLE - Failed."); return (error); } } if (aha->boardid < 0x41) device_printf(aha->dev, "Warning: aha-1542A won't work.\n"); aha->max_sg = 17; /* Need >= 17 to do 64k I/O */ aha->diff_bus = 0; aha->extended_lun = 0; aha->extended_trans = 0; aha->max_ccbs = 16; /* Determine Sync/Wide/Disc settings */ length_param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { aha->sync_permitted = ALL_TARGETS; } aha->disc_permitted = ALL_TARGETS; /* We need as many mailboxes as we can have ccbs */ aha->num_boxes = aha->max_ccbs; /* Determine our SCSI ID */ error = aha_cmd(aha, AOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (uint8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "aha_fetch_adapter_info - Failed Get Config\n"); return (error); } aha->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int aha_init(struct aha_softc* aha) { /* Announce the Adapter */ device_printf(aha->dev, "AHA-%s FW Rev. %c.%c (ID=%x) ", aha->model, aha->fw_major, aha->fw_minor, aha->boardid); if (aha->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", aha->scsi_id, aha->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ AHA_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &aha->lock, &aha->buffer_dmat) != 0) { goto error_exit; } aha->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->mailbox_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(aha->mailbox_dmat, (void **)&aha->out_boxes, BUS_DMA_NOWAIT, &aha->mailbox_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->mailbox_dmat, aha->mailbox_dmamap, aha->out_boxes, aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), ahamapmboxes, aha, /*flags*/0); aha->init_level++; aha->in_boxes = (aha_mbox_in_t *)&aha->out_boxes[aha->num_boxes]; ahainitmboxes(aha); /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ aha->max_ccbs * sizeof(struct aha_ccb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->ccb_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(aha->ccb_dmat, (void **)&aha->aha_ccb_array, BUS_DMA_NOWAIT, &aha->ccb_dmamap) != 0) goto error_exit; aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->ccb_dmat, aha->ccb_dmamap, aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb), ahamapccbs, aha, /*flags*/0); aha->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ aha->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_24BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &aha->sg_dmat) != 0) goto error_exit; aha->init_level++; /* Perform initial CCB allocation */ bzero(aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb)); ahaallocccbs(aha); if (aha->num_ccbs == 0) { device_printf(aha->dev, "aha_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to probe) */ return (0); error_exit: return (ENXIO); } int aha_attach(struct aha_softc *aha) { int tagged_dev_openings; struct cam_devq *devq; /* * We don't do tagged queueing, since the aha cards don't * support it. */ tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(aha->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aha->sim = cam_sim_alloc(ahaaction, ahapoll, "aha", aha, device_get_unit(aha->dev), &aha->lock, 2, tagged_dev_openings, devq); if (aha->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&aha->lock); if (xpt_bus_register(aha->sim, aha->dev, 0) != CAM_SUCCESS) { cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } if (xpt_create_path(&aha->path, /*periph*/NULL, cam_sim_path(aha->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); return (ENXIO); } mtx_unlock(&aha->lock); return (0); } static void ahaallocccbs(struct aha_softc *aha) { struct aha_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; aha_sg_t *segs; int newcount; int i; next_ccb = &aha->aha_ccb_array[aha->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links); bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahamapsgs, aha, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t))); for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = ACCB_FREE; callout_init_mtx(&next_ccb->timer, &aha->lock, 0); error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links); segs += AHA_NSEG; physaddr += (AHA_NSEG * sizeof(aha_sg_t)); next_ccb++; aha->num_ccbs++; } /* Reserve a CCB for error recovery */ if (aha->recovery_accb == NULL) { aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs); SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); } } static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb) { if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb->flags & ACCB_ACTIVE) != 0) LIST_REMOVE(&accb->ccb->ccb_h, sim_links.le); if (aha->resource_shortage != 0 && (accb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { accb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aha->resource_shortage = FALSE; } accb->flags = ACCB_FREE; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, accb, links); aha->active_ccbs--; } static struct aha_ccb* ahagetccb(struct aha_softc *aha) { struct aha_ccb* accb; if (!dumping) mtx_assert(&aha->lock, MA_OWNED); if ((accb = SLIST_FIRST(&aha->free_aha_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } else if (aha->num_ccbs < aha->max_ccbs) { ahaallocccbs(aha); accb = SLIST_FIRST(&aha->free_aha_ccbs); if (accb == NULL) device_printf(aha->dev, "Can't malloc ACCB\n"); else { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } } return (accb); } static void ahaaction(struct cam_sim *sim, union ccb *ccb) { struct aha_softc *aha; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahaaction\n")); aha = (struct aha_softc *)cam_sim_softc(sim); mtx_assert(&aha->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aha_ccb *accb; struct aha_hccb *hccb; /* * Get an accb to use. */ if ((accb = ahagetccb(aha)) == NULL) { aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &accb->hccb; /* * So we can find the ACCB when an abort is requested */ accb->ccb = ccb; ccb->ccb_h.ccb_accb_ptr = accb; ccb->ccb_h.ccb_aha_ptr = aha; /* * Put all the arguments for the xfer in the accb */ hccb->target = ccb->ccb_h.target_id; hccb->lun = ccb->ccb_h.target_lun; hccb->ahastat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; int error; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = aha->ccb_ccb_opcode; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) != 0; hccb->dataout = (ccb->ccb_h.flags & CAM_DIR_OUT) != 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* * If we have any data to send with this command, * map it into bus space. */ error = bus_dmamap_load_ccb( aha->buffer_dmat, accb->dmamap, ccb, ahaexecuteccb, accb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the * controller queue until our mapping is * returned. */ xpt_freeze_simq(aha->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; ahaexecuteccb(accb, NULL, 0, 0); } break; } - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; u_int target_mask = 0x01 << ccb->ccb_h.target_id; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_USER_SETTINGS) { spi->flags = 0; if ((aha->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if ((aha->sync_permitted & target_mask) != 0) { if (aha->boardid >= BOARD_1542CF) spi->sync_period = 25; else spi->sync_period = 50; } else { spi->sync_period = 0; } if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; } else { ahafetchtransinfo(aha, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; uint32_t size_mb; uint32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (aha->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ ahareset(aha, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aha->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; if (error != 0) { if (error != EFBIG) device_printf(aha->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahafreeccb(aha, accb); xpt_done(ccb); return; } if (nseg != 0) { aha_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = accb->sg_list; while (dm_segs < end_seg) { ahautoa24(dm_segs->ds_len, sg->len); ahautoa24(dm_segs->ds_addr, sg->addr); sg++; dm_segs++; } if (nseg > 1) { accb->hccb.opcode = aha->ccb_sg_opcode; ahautoa24((sizeof(aha_sg_t) * nseg), accb->hccb.data_len); ahautoa24(accb->sg_list_phys, accb->hccb.data_addr); } else { bcopy(accb->sg_list->len, accb->hccb.data_len, 3); bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); } else { accb->hccb.opcode = INITIATOR_CCB; ahautoa24(0, accb->hccb.data_len); ahautoa24(0, accb->hccb.data_addr); } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); xpt_done(ccb); return; } accb->flags = ACCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&accb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, ahatimeout, accb, 0); /* Tell the adapter about this command */ if (aha->cur_outbox->action_code != AMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(aha->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!", aha->active_ccbs, aha->max_ccbs); callout_stop(&accb->timer); if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } paddr = ahaccbvtop(aha, accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } void aha_intr(void *arg) { struct aha_softc *aha; aha = arg; mtx_lock(&aha->lock); aha_intr_locked(aha); mtx_unlock(&aha->lock); } void aha_intr_locked(struct aha_softc *aha) { u_int intstat; uint32_t paddr; while (((intstat = aha_inb(aha, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { aha->latched_status = aha_inb(aha, STATUS_REG); aha->command_cmp = TRUE; } aha_outb(aha, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (aha->cur_inbox->comp_code != AMBI_FREE) { paddr = aha_a24tou(aha->cur_inbox->ccb_addr); ahadone(aha, ahaccbptov(aha, paddr), aha->cur_inbox->comp_code); aha->cur_inbox->comp_code = AMBI_FREE; ahanextinbox(aha); } } if ((intstat & SCSI_BUS_RESET) != 0) { ahareset(aha, /*hardreset*/FALSE); } } } static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = accb->ccb; csio = &accb->ccb->csio; if ((accb->flags & ACCB_ACTIVE) == 0) { device_printf(aha->dev, "ahadone - Attempt to free non-active ACCB %p\n", (void *)accb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); } if (accb == aha->recovery_accb) { /* * The recovery ACCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occurred */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aha->sim), accb->hccb.target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; if (pending_accb->hccb.target == accb->hccb.target) { pending_accb->hccb.ahastat = AHASTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); ahadone(aha, pending_accb, AMBI_ERROR); } else { callout_reset_sbt(&pending_accb->timer, SBT_1MS * ccb_h->timeout, 0, ahatimeout, pending_accb, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(aha->dev, "No longer in timeout\n"); return; } callout_stop(&accb->timer); switch (comp_code) { case AMBI_FREE: device_printf(aha->dev, "ahadone - CCB completed with free status!\n"); break; case AMBI_NOT_FOUND: device_printf(aha->dev, "ahadone - CCB Abort failed to find CCB\n"); break; case AMBI_ABORT: case AMBI_ERROR: /* An error occurred */ if (accb->hccb.opcode < INITIATOR_CCB_WRESID) csio->resid = 0; else csio->resid = aha_a24tou(accb->hccb.data_len); switch(accb->hccb.ahastat) { case AHASTAT_DATARUN_ERROR: { if (csio->resid <= 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ } case AHASTAT_NOERROR: csio->scsi_status = accb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* * The aha writes the sense data at different * offsets based on the scsi cmd len */ bcopy((caddr_t) &accb->hccb.scsi_cdb + accb->hccb.cmd_len, (caddr_t) &csio->sense_data, accb->hccb.sense_len); break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } break; case AHASTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case AHASTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case AHASTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case AHASTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", aha_name(aha)); break; case AHASTAT_INVALID_OPCODE: if (accb->hccb.opcode < INITIATOR_CCB_WRESID) panic("%s: Invalid CCB Opcode %x hccb = %p", aha_name(aha), accb->hccb.opcode, &accb->hccb); device_printf(aha->dev, "AHA-1540A compensation failed\n"); xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); csio->ccb_h.status = CAM_REQUEUE_REQ; break; case AHASTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", aha_name(aha)); break; case AHASTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", aha_name(aha)); break; case AHASTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case AHASTAT_HA_BDR: if ((accb->flags & ACCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; case AMBI_OK: /* All completed without incident */ /* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */ /* I don't think so since it works???? */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; } } static int ahareset(struct aha_softc* aha, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; uint8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; aha_outb(aha, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { PRVERB((aha->dev, "ahareset - Diagnostic Active failed to " "assert. status = %#x\n", status)); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: ahareset - Diagnostic Active failed to drop. " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "ahareset - Host adapter failed to " "come ready. status = 0x%x\n", status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { device_printf(aha->dev, "ahareset - Adapter failed diag\n"); if ((status & DATAIN_REG_READY) != 0) device_printf(aha->dev, "ahareset - Host Adapter " "Error code = 0x%x\n", aha_inb(aha, DATAIN_REG)); return (ENXIO); } /* If we've attached to the XPT, tell it about the event */ if (aha->path != NULL) xpt_async(AC_BUS_RESET, aha->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&aha->pending_ccbs)) != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; pending_accb->hccb.ahastat = AHASTAT_HA_SCSI_BUS_RESET; ahadone(aha, pending_accb, AMBI_ERROR); } /* If we've allocated mailboxes, initialize them */ /* Must be done after we've aborted our queue, or aha_cmd fails */ if (aha->init_level > 4) ahainitmboxes(aha); return (0); } /* * Send a command to the adapter. */ int aha_cmd(struct aha_softc *aha, aha_op_t opcode, uint8_t *params, u_int param_len, uint8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; /* * All commands except for the "start mailbox" and the "enable * outgoing mailbox read interrupt" commands cannot be issued * while there are pending transactions. Freeze our SIMQ * and wait for all completions to occur if necessary. */ timeout = 10000; while (LIST_FIRST(&aha->pending_ccbs) != NULL && --timeout) { /* Fire the interrupt handler in case interrupts are blocked */ aha_intr(aha); DELAY(10); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter idle\n"); return (ETIMEDOUT); } aha->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)aha_inb(aha, DATAIN_REG); DELAY(100); } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout waiting for adapter" " ready, status = 0x%x\n", status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ aha_outb(aha, COMMAND_REG, opcode); /* * Wait for up to 1sec to get the parameter list sent */ timeout = 10000; while (param_len && --timeout) { DELAY(100); status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (aha->command_cmp != 0) { saved_status = aha->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { aha_outb(aha, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout sending parameters, " "status = 0x%x\n", status); error = ETIMEDOUT; } /* * For all other commands, we wait for any output data * and the final comand completion interrupt. */ while (cmd_complete == 0 && --cmd_timeout) { status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); if (aha->command_cmp != 0) { cmd_complete = 1; saved_status = aha->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } if ((status & DATAIN_REG_READY) != 0) { uint8_t data; data = aha_inb(aha, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { device_printf(aha->dev, "aha_cmd - Discarded reply data " "byte for opcode 0x%x\n", opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } DELAY(100); } if (cmd_timeout == 0) { device_printf(aha->dev, "aha_cmd: Timeout: status = 0x%x, " "intstat = 0x%x, reply_len = %d\n", status, intstat, reply_len); return (ETIMEDOUT); } /* * Clear any pending interrupts. Block interrupts so our * interrupt handler is not re-entered. */ aha_intr(aha); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { PRVERB((aha->dev, "Invalid Command 0x%x\n", opcode)); /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ DELAY(1000); status = aha_inb(aha, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) ahareset(aha, /*hard_reset*/FALSE); return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ PRVERB((aha->dev, "Controller did not accept full argument " "list (%d > 0)\n", param_len)); return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ PRVERB((aha->dev, "data received mismatch (%d != %d)\n", reply_len, reply_buf_size)); return (EMSGSIZE); } /* We were successful */ return (0); } static int ahainitmboxes(struct aha_softc *aha) { int error; init_24b_mbox_params_t init_mbox; bzero(aha->in_boxes, sizeof(aha_mbox_in_t) * aha->num_boxes); bzero(aha->out_boxes, sizeof(aha_mbox_out_t) * aha->num_boxes); aha->cur_inbox = aha->in_boxes; aha->last_inbox = aha->in_boxes + aha->num_boxes - 1; aha->cur_outbox = aha->out_boxes; aha->last_outbox = aha->out_boxes + aha->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_mboxes = aha->num_boxes; ahautoa24(aha->mailbox_physbase, init_mbox.base_addr); error = aha_cmd(aha, AOP_INITIALIZE_MBOX, (uint8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("ahainitmboxes: Initialization command failed\n"); return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings* cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int sync_period; int error; uint8_t param; targ_syncinfo_t sync_info; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); /* * Inquire Setup Information. This command retreives * the sync info for older models. */ param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (uint8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(aha->dev, "ahafetchtransinfo - Inquire Setup Info Failed %d\n", error); return; } sync_info = setup_info.syncinfo[targ_offset]; if (sync_info.sync == 0) spi->sync_offset = 0; else spi->sync_offset = sync_info.offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (aha->boardid >= BOARD_1542CF) sync_period = 1000; else sync_period = 2000; sync_period += 500 * sync_info.period; /* Convert ns value to standard SCSI sync rate */ if (spi->sync_offset != 0) spi->sync_period = scsi_calc_syncparam(sync_period); else spi->sync_period = 0; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void ahamapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->mailbox_physbase = segs->ds_addr; } static void ahamapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->aha_ccb_physbase = segs->ds_addr; } static void ahamapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; SLIST_FIRST(&aha->sg_maps)->sg_physaddr = segs->ds_addr; } static void ahapoll(struct cam_sim *sim) { aha_intr_locked(cam_sim_softc(sim)); } static void ahatimeout(void *arg) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; uint32_t paddr; struct ccb_hdr *ccb_h; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; mtx_assert(&aha->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)accb); if ((accb->flags & ACCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)accb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((accb->flags & ACCB_DEVICE_RESET) == 0) { if ((accb->flags & ACCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aha->sim, /*count*/1); accb->flags |= ACCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; callout_stop(&pending_accb->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((accb->flags & ACCB_DEVICE_RESET) != 0 || aha->cur_outbox->action_code != AMBO_FREE) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; ahareset(aha, /*hardreset*/TRUE); device_printf(aha->dev, "No longer in timeout\n"); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ accb->flags |= ACCB_DEVICE_RESET; callout_reset(&accb->timer, 2 * hz, ahatimeout, accb); aha->recovery_accb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ aha->recovery_accb->hccb.datain = TRUE; aha->recovery_accb->hccb.dataout = TRUE; aha->recovery_accb->hccb.ahastat = 0; aha->recovery_accb->hccb.sdstat = 0; aha->recovery_accb->hccb.target = ccb->ccb_h.target_id; /* Tell the adapter about this command */ paddr = ahaccbvtop(aha, aha->recovery_accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } } int aha_detach(struct aha_softc *aha) { mtx_lock(&aha->lock); xpt_async(AC_LOST_DEVICE, aha->path, NULL); xpt_free_path(aha->path); xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); mtx_unlock(&aha->lock); /* XXX: Drain all timers? */ return (0); } MODULE_DEPEND(aha, cam, 1, 1, 1); Index: stable/11/sys/dev/ahci/ahci.c =================================================================== --- stable/11/sys/dev/ahci/ahci.c (revision 314380) +++ stable/11/sys/dev/ahci/ahci.c (revision 314381) @@ -1,2744 +1,2740 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" #include #include #include #include #include /* local prototypes */ static void ahci_intr(void *data); static void ahci_intr_one(void *data); static void ahci_intr_one_edge(void *data); static int ahci_ch_init(device_t dev); static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); static void ahci_ch_intr(void *arg); static void ahci_ch_intr_direct(void *arg); static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_execute_transaction(struct ahci_slot *slot); static void ahci_timeout(struct ahci_slot *slot); static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); static void ahci_dmainit(device_t dev); static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_dmafini(device_t dev); static void ahci_slotsalloc(device_t dev); static void ahci_slotsfree(device_t dev); static void ahci_reset(struct ahci_channel *ch); static void ahci_start(struct ahci_channel *ch, int fbs); static void ahci_stop(struct ahci_channel *ch); static void ahci_clo(struct ahci_channel *ch); static void ahci_start_fr(struct ahci_channel *ch); static void ahci_stop_fr(struct ahci_channel *ch); static int ahci_sata_connect(struct ahci_channel *ch); static int ahci_sata_phy_reset(struct ahci_channel *ch); static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); static void ahci_issue_recovery(struct ahci_channel *ch); static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); static void ahciaction(struct cam_sim *sim, union ccb *ccb); static void ahcipoll(struct cam_sim *sim); static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 int ahci_ctlr_setup(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Clear interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); /* Configure CCC */ if (ctlr->ccc) { ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); ATA_OUTL(ctlr->r_mem, AHCI_CCCC, (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | (4 << AHCI_CCCC_CC_SHIFT) | AHCI_CCCC_EN); ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; if (bootverbose) { device_printf(dev, "CCC with %dms/4cmd enabled on vector %d\n", ctlr->ccc, ctlr->cccv); } } /* Enable AHCI interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); return (0); } int ahci_ctlr_reset(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int timeout; /* Enable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); /* Reset AHCI controller */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); for (timeout = 1000; timeout > 0; timeout--) { DELAY(1000); if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) break; } if (timeout == 0) { device_printf(dev, "AHCI controller reset failure\n"); return (ENXIO); } /* Reenable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { /* * Restore capability field. * This is write to a read-only register to restore its state. * On fully standard-compliant hardware this is not needed and * this operation shall not take place. See ahci_pci.c for * platforms using this quirk. */ ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); } return (0); } int ahci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i, speed, unit; uint32_t u, version; device_t child; ctlr->dev = dev; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { ahci_free_mem(dev); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } /* Get the HW capabilities */ version = ATA_INL(ctlr->r_mem, AHCI_VS); ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); if (version >= 0x00010200) ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); if (ctlr->caps & AHCI_CAP_EMS) ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); if (ctlr->quirks & AHCI_Q_FORCE_PI) { /* * Enable ports. * The spec says that BIOS sets up bits corresponding to * available ports. On platforms where this information * is missing, the driver can define available ports on its own. */ int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; int nmask = (1 << nports) - 1; ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", nports, nmask); } ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ if ((ctlr->quirks & AHCI_Q_ALTSIG) && (ctlr->caps & AHCI_CAP_SPM) == 0) ctlr->quirks |= AHCI_Q_NOBSYRES; if (ctlr->quirks & AHCI_Q_1CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->ichannels &= 0x01; } if (ctlr->quirks & AHCI_Q_2CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 1; ctlr->ichannels &= 0x03; } if (ctlr->quirks & AHCI_Q_4CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 3; ctlr->ichannels &= 0x0f; } ctlr->channels = MAX(flsl(ctlr->ichannels), (ctlr->caps & AHCI_CAP_NPMASK) + 1); if (ctlr->quirks & AHCI_Q_NOPMP) ctlr->caps &= ~AHCI_CAP_SPM; if (ctlr->quirks & AHCI_Q_NONCQ) ctlr->caps &= ~AHCI_CAP_SNCQ; if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); /* Create controller-wide DMA tag. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &ctlr->dma_tag)) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (ENXIO); } ahci_ctlr_setup(dev); /* Setup interrupts. */ if ((error = ahci_setup_interrupt(dev)) != 0) { bus_dma_tag_destroy(ctlr->dma_tag); ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } i = 0; for (u = ctlr->ichannels; u != 0; u >>= 1) i += (u & 1); ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); resource_int_value(device_get_name(dev), device_get_unit(dev), "direct", &ctlr->direct); /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), ((version >> 4) & 0xf0) + (version & 0x0f), (ctlr->caps & AHCI_CAP_NPMASK) + 1, ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?"))), (ctlr->caps & AHCI_CAP_SPM) ? "supported" : "not supported", (ctlr->caps & AHCI_CAP_FBSS) ? " with FBS" : ""); if (ctlr->quirks != 0) { device_printf(dev, "quirks=0x%b\n", ctlr->quirks, AHCI_Q_BIT_STRING); } if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?")))); printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", (ctlr->caps & AHCI_CAP_NPMASK) + 1); } if (bootverbose && version >= 0x00010200) { device_printf(dev, "Caps2:%s%s%s%s%s%s\n", (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "ahcich", -1); if (child == NULL) { device_printf(dev, "failed to add channel device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)unit); if ((ctlr->ichannels & (1 << unit)) == 0) device_disable(child); } if (ctlr->caps & AHCI_CAP_EMS) { child = device_add_child(dev, "ahciem", -1); if (child == NULL) device_printf(dev, "failed to add enclosure device\n"); else device_set_ivars(child, (void *)(intptr_t)-1); } bus_generic_attach(dev); return (0); } int ahci_detach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ for (i = 0; i < ctlr->numirqs; i++) { if (ctlr->irqs[i].r_irq) { bus_teardown_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); } } bus_dma_tag_destroy(ctlr->dma_tag); /* Free memory. */ rman_fini(&ctlr->sc_iomem); ahci_free_mem(dev); return (0); } void ahci_free_mem(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Release memory resources */ if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); if (ctlr->r_msix_table) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_tab_rid, ctlr->r_msix_table); if (ctlr->r_msix_pba) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_pba_rid, ctlr->r_msix_pba); ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; } int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Check for single MSI vector fallback. */ if (ctlr->numirqs > 1 && (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { device_printf(dev, "Falling back to one MSI\n"); ctlr->numirqs = 1; } /* Ensure we don't overrun irqs. */ if (ctlr->numirqs > AHCI_MAX_IRQS) { device_printf(dev, "Too many irqs %d > %d (clamping)\n", ctlr->numirqs, AHCI_MAX_IRQS); ctlr->numirqs = AHCI_MAX_IRQS; } /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; else if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; else if (ctlr->channels > ctlr->numirqs && i == ctlr->numirqs - 1) ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; else ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : ahci_intr_one), &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return (ENXIO); } if (ctlr->numirqs > 1) { bus_describe_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle, ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? "ch%d" : "%d", i); } } return (0); } /* * Common case interrupt handler. */ static void ahci_intr(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; u_int32_t is, ise = 0; void *arg; int unit; if (irq->mode == AHCI_IRQ_MODE_ALL) { unit = 0; if (ctlr->ccc) is = ctlr->ichannels; else is = ATA_INL(ctlr->r_mem, AHCI_IS); } else { /* AHCI_IRQ_MODE_AFTER */ unit = irq->r_irq_rid - 1; is = ATA_INL(ctlr->r_mem, AHCI_IS); is &= (0xffffffff << unit); } /* CCC interrupt is edge triggered. */ if (ctlr->ccc) ise = 1 << ctlr->cccv; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ise |= is; if (ise != 0) ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); for (; unit < ctlr->channels; unit++) { if ((is & (1 << unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, is); ATA_RBL(ctlr->r_mem, AHCI_IS); } /* * Simplified interrupt handler for multivector MSI mode. */ static void ahci_intr_one(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); ATA_RBL(ctlr->r_mem, AHCI_IS); } static void ahci_intr_one_edge(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; /* Some controllers have edge triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); ATA_RBL(ctlr->r_mem, AHCI_IS); } struct resource * ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ahci_controller *ctlr = device_get_softc(dev); struct resource *res; rman_res_t st; int offset, size, unit; unit = (intptr_t)device_get_ivars(child); res = NULL; switch (type) { case SYS_RES_MEMORY: if (unit >= 0) { offset = AHCI_OFFSET + (unit << 7); size = 128; } else if (*rid == 0) { offset = AHCI_EM_CTL; size = 4; } else { offset = (ctlr->emloc & 0xffff0000) >> 14; size = (ctlr->emloc & 0x0000ffff) << 2; if (*rid != 1) { if (*rid == 2 && (ctlr->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) offset += size; else break; } } st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + size - 1, size, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 128, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irqs[0].r_irq; break; } return (res); } int ahci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return (ENOENT); return (0); } return (EINVAL); } int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("ahci.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } int ahci_print_child(device_t dev, device_t child) { int retval, channel; retval = bus_print_child_header(dev, child); channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) retval += printf(" at channel %d", channel); retval += bus_print_child_footer(dev, child); return (retval); } int ahci_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { int channel; channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) snprintf(buf, buflen, "channel=%d", channel); return (0); } bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child) { struct ahci_controller *ctlr = device_get_softc(dev); return (ctlr->dma_tag); } static int ahci_ch_probe(device_t dev) { device_set_desc_copy(dev, "AHCI channel"); return (BUS_PROBE_DEFAULT); } static int ahci_ch_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ahci_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; u_int32_t version; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->caps = ctlr->caps; ch->caps2 = ctlr->caps2; ch->start = ctlr->ch_start; ch->quirks = ctlr->quirks; ch->vendorid = ctlr->vendorid; ch->deviceid = ctlr->deviceid; ch->subvendorid = ctlr->subvendorid; ch->subdeviceid = ctlr->subdeviceid; ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); STAILQ_INIT(&ch->doneq); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); /* JMicron external ports (0) sometimes limited */ if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) sata_rev = 1; if (ch->quirks & AHCI_Q_SATA2) sata_rev = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = ch->numslots; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN; } rid = 0; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); version = ATA_INL(ctlr->r_mem, AHCI_VS); if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) ch->chcaps |= AHCI_P_CMD_FBSCP; if (ch->caps2 & AHCI_CAP2_SDS) ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s\n", (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); } ahci_dmainit(dev); ahci_slotsalloc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, ch, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(ch->numslots); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), (struct mtx *)&ch->mtx, (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, ahci_ch_pm, ch); } mtx_unlock(&ch->mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int ahci_ch_detach(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ahci_ch_deinit(dev); ahci_slotsfree(dev); ahci_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int ahci_ch_init(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); uint64_t work; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Setup work areas */ work = ch->dma.work_bus + AHCI_CL_OFFSET; ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); work = ch->dma.rfis_bus; ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); /* Activate the channel and power/spin up device */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); ahci_start_fr(ch); ahci_start(ch, 1); return (0); } static int ahci_ch_deinit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset command register. */ ahci_stop(ch); ahci_stop_fr(ch); ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); /* Allow everything, including partial and slumber modes. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); /* Request slumber mode transition and give some time to get there. */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); DELAY(100); /* Disable PHY. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } static int ahci_ch_suspend(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); ahci_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_resume(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); ahci_reset(ch); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t ahcich_devclass; static device_method_t ahcich_methods[] = { DEVMETHOD(device_probe, ahci_ch_probe), DEVMETHOD(device_attach, ahci_ch_attach), DEVMETHOD(device_detach, ahci_ch_detach), DEVMETHOD(device_suspend, ahci_ch_suspend), DEVMETHOD(device_resume, ahci_ch_resume), DEVMETHOD_END }; static driver_t ahcich_driver = { "ahcich", ahcich_methods, sizeof(struct ahci_channel) }; DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); struct ahci_dc_cb_args { bus_addr_t maddr; int error; }; static void ahci_dmainit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_dc_cb_args dcba; size_t rfsize; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, BUS_DMA_ZERO, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* FIS receive area. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) rfsize = 4096; else rfsize = 256; if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag)) goto error; if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, &ch->dma.rfis_map)) goto error; if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); goto error; } ch->dma.rfis_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); ahci_dmafini(dev); } static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void ahci_dmafini(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.rfis_bus) { bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); ch->dma.rfis_bus = 0; ch->dma.rfis = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void ahci_slotsalloc(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; slot->ch = ch; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void ahci_slotsfree(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) { if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); union ccb *ccb; if (bootverbose) { if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) device_printf(ch->dev, "CONNECT requested\n"); else device_printf(ch->dev, "DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return (0); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return (0); } xpt_rescan(ccb); return (1); } return (0); } static void ahci_cpd_check_events(struct ahci_channel *ch) { u_int32_t status; union ccb *ccb; device_t dev; if (ch->pm_level == 0) return; status = ATA_INL(ch->r_mem, AHCI_P_CMD); if ((status & AHCI_P_CMD_CPD) == 0) return; if (bootverbose) { dev = ch->dev; if (status & AHCI_P_CMD_CPS) { device_printf(dev, "COLD CONNECT requested\n"); } else device_printf(dev, "COLD DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void ahci_notify_events(struct ahci_channel *ch, u_int32_t status) { struct cam_path *dpath; int i; if (ch->caps & AHCI_CAP_SSNTF) ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); if (bootverbose) device_printf(ch->dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void ahci_done(struct ahci_channel *ch, union ccb *ccb) { mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || ch->batch == 0) { xpt_done(ccb); return; } STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); } static void ahci_ch_intr(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t istatus; /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ahci_ch_intr_main(ch, istatus); mtx_unlock(&ch->mtx); } static void ahci_ch_intr_direct(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; struct ccb_hdr *ccb_h; uint32_t istatus; STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ch->batch = 1; ahci_ch_intr_main(ch, istatus); ch->batch = 0; /* * Prevent the possibility of issues caused by processing the queue * while unlocked below by moving the contents to a local queue. */ STAILQ_CONCAT(&tmp_doneq, &ch->doneq); mtx_unlock(&ch->mtx); while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); xpt_done_direct((union ccb *)ccb_h); } } static void ahci_ch_pm(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t work; if (ch->numrslots != 0) return; work = ATA_INL(ch->r_mem, AHCI_P_CMD); if (ch->pm_level == 4) work |= AHCI_P_CMD_PARTIAL; else work |= AHCI_P_CMD_SLUMBER; ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); } static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) { uint32_t cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; /* Clear interrupt statuses. */ ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ if (ch->numtslots != 0) cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); else cstatus = 0; if (ch->numrslots != ch->numtslots) cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); /* Read SNTF in one of possible ways. */ if ((istatus & AHCI_P_IX_SDB) && (ch->pm_present || ch->curr[0].atapi != 0)) { if (ch->caps & AHCI_CAP_SSNTF) sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); else if (ch->fbs_enabled) { u_int8_t *fis = ch->dma.rfis + 0x58; for (i = 0; i < 16; i++) { if (fis[1] & 0x80) { fis[1] &= 0x7f; sntf |= 1 << i; } fis += 256; } } else { u_int8_t *fis = ch->dma.rfis + 0x58; if (fis[1] & 0x80) sntf = (1 << (fis[1] & 0x0f)); } } /* Process PHY events */ if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { serr = ATA_INL(ch->r_mem, AHCI_P_SERR); if (serr) { ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); reset = ahci_phy_check_events(ch, serr); } } /* Process cold presence detection events */ if ((istatus & AHCI_P_IX_CPD) && !reset) ahci_cpd_check_events(ch); /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { if (ch->quirks & AHCI_Q_NOCCS) { /* * ASMedia chips sometimes report failed commands as * completed. Count all running commands as failed. */ cstatus |= ch->rslots; /* They also report wrong CCS, so try to guess one. */ ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; } else { ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; } //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); port = -1; if (ch->fbs_enabled) { uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); if (fbs & AHCI_P_FBS_SDE) { port = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; } else { for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } } } err = ch->rslots & cstatus; } else { ccs = 0; err = 0; port = -1; } /* Complete all successful commands. */ ok = ch->rslots & ~cstatus; for (i = 0; i < ch->numslots; i++) { if ((ok >> i) & 1) ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); } /* On error, complete the rest of commands with error statuses. */ if (err) { if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: reqests in loading state. */ if (((err >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (istatus & AHCI_P_IX_TFE) { if (port != -2) { /* Task File Error */ if (ch->numtslotspd[ ch->slot[i].ccb->ccb_h.target_id] == 0) { /* Untagged operation. */ if (i == ccs) et = AHCI_ERR_TFE; else et = AHCI_ERR_INNOCENT; } else { /* Tagged operation. */ et = AHCI_ERR_NCQ; } } else { et = AHCI_ERR_TFE; ch->fatalerr = 1; } } else if (istatus & AHCI_P_IX_IF) { if (ch->numtslots == 0 && i != ccs && port != -2) et = AHCI_ERR_INNOCENT; else et = AHCI_ERR_SATA; } else et = AHCI_ERR_INVALID; ahci_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); } /* Process NOTIFY events */ if (sntf) ahci_notify_events(ch, sntf); } /* Must be called with channel locked. */ static int ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) { int t = ccb->ccb_h.target_id; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0xffffffff >> (32 - ch->curr[t].tags))) == 0) return (1); /* If we have FBS */ if (ch->fbs_enabled) { /* Tagged command while untagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) return (1); } else { /* Tagged command while untagged are active. */ if (ch->numrslots != 0 && ch->numtslots == 0) return (1); /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } } else { /* If we have FBS */ if (ch->fbs_enabled) { /* Untagged command while tagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) return (1); } else { /* Untagged command while tagged are active. */ if (ch->numrslots != 0 && ch->numtslots != 0) return (1); } } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) { struct ahci_slot *slot; int tag, tags; /* Choose empty slot. */ tags = ch->numslots; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; if (ch->lastslot + 1 < tags) tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); else tag = 0; if (tag == 0 || tag + ch->lastslot >= tags) tag = ffs(~ch->oslots) - 1; else tag += ch->lastslot; ch->lastslot = tag; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << tag); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << tag); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = AHCI_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, ahci_dmasetprd, slot, 0); } else { slot->dma.nsegs = 0; ahci_execute_transaction(slot); } } /* Locked by busdma engine. */ static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_dma_prd *prd; int i; if (error) { device_printf(ch->dev, "DMA load error\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); } slot->dma.nsegs = nsegs; bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); ahci_execute_transaction(slot); } /* Must be called with channel locked. */ static void ahci_execute_transaction(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_cmd_list *clp; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int fis_size, i, softreset; uint8_t *fis = ch->dma.rfis + 0x40; uint8_t val; /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } /* Setup the command list entry */ clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); clp->cmd_flags = htole16( (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | (ccb->ccb_h.func_code == XPT_SCSI_IO ? (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | (fis_size / sizeof(u_int32_t)) | (port << 12)); clp->prd_length = htole16(slot->dma.nsegs); /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { if (ccb->ataio.cmd.control & ATA_A_RESET) { softreset = 1; /* Kick controller into sane state */ ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 0); clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; } else { softreset = 2; /* Prepare FIS receive area for check. */ for (i = 0; i < 20; i++) fis[i] = 0xff; } } else softreset = 0; clp->bytecount = 0; clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); /* Set ACTIVE bit for NCQ commands. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); } /* If FBS is enabled, set PMP port. */ if (ch->fbs_enabled) { ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | (port << AHCI_P_FBS_DEV_SHIFT)); } /* Issue command to the controller. */ slot->state = AHCI_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); /* Device reset commands doesn't interrupt. Poll them. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { int count, timeout = ccb->ccb_h.timeout * 100; enum ahci_err_type et = AHCI_ERR_NONE; for (count = 0; count < timeout; count++) { DELAY(10); if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) break; if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && softreset != 1) { #if 0 device_printf(ch->dev, "Poll error on slot %d, TFD: %04x\n", slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); #endif et = AHCI_ERR_TFE; break; } /* Workaround for ATI SB600/SB700 chipsets. */ if (ccb->ccb_h.target_id == 15 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) && (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { et = AHCI_ERR_TIMEOUT; break; } } /* * Marvell HBAs with non-RAID firmware do not wait for * readiness after soft reset, so we have to wait here. * Marvell RAIDs do not have this problem, but instead * sometimes forget to update FIS receive area, breaking * this wait. */ if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && softreset == 2 && et == AHCI_ERR_NONE) { for ( ; count < timeout; count++) { bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); val = fis[2]; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); if ((val & ATA_S_BUSY) == 0) break; DELAY(10); } } if (timeout && (count >= timeout)) { device_printf(ch->dev, "Poll timeout on slot %d port %d\n", slot->slot, port); device_printf(ch->dev, "is %08x cs %08x ss %08x " "rs %08x tfd %02x serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); et = AHCI_ERR_TIMEOUT; } /* Kick controller into sane state and enable FBS. */ if (softreset == 2) ch->eslots |= (1 << slot->slot); ahci_end_transaction(slot, et); return; } /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void ahci_process_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void ahci_rearm_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < AHCI_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void ahci_timeout(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; device_t dev = ch->dev; uint32_t sstatus; int ccs; int i; /* Check for stale timeout. */ if (slot->state < AHCI_SLOT_RUNNING) return; /* Check if slot was not being executed last time we checked. */ if (slot->state < AHCI_SLOT_EXECUTING) { /* Check if slot started executing. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || ch->fbs_enabled || ch->wrongccs) slot->state = AHCI_SLOT_EXECUTING; else if ((ch->rslots & (1 << ccs)) == 0) { ch->wrongccs = 1; slot->state = AHCI_SLOT_EXECUTING; } callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); return; } device_printf(dev, "Timeout on slot %d port %d\n", slot->slot, slot->ccb->ccb_h.target_id & 0x0f); device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " "serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); /* Handle frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ ch->fatalerr = 1; /* Handle command with timeout. */ ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } } else { /* With FBS we wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) ahci_process_timeout(ch); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } } /* Must be called with channel locked. */ static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) { struct ahci_channel *ch = slot->ch; union ccb *ccb = slot->ccb; struct ahci_cmd_list *clp; int lastto; uint32_t sig; bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == AHCI_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { u_int8_t *fis = ch->dma.rfis + 0x40; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); if (ch->fbs_enabled) { fis += ccb->ccb_h.target_id * 256; res->status = fis[2]; res->error = fis[3]; } else { uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); res->status = tfd; res->error = tfd >> 8; } res->lba_low = fis[4]; res->lba_mid = fis[5]; res->lba_high = fis[6]; res->device = fis[7]; res->lba_low_exp = fis[8]; res->lba_mid_exp = fis[9]; res->lba_high_exp = fis[10]; res->sector_count = fis[12]; res->sector_count_exp = fis[13]; /* * Some weird controllers do not return signature in * FIS receive area. Read it from PxSIG register. */ if ((ch->quirks & AHCI_Q_ALTSIG) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { sig = ATA_INL(ch->r_mem, AHCI_P_SIG); res->lba_high = sig >> 24; res->lba_mid = sig >> 16; res->lba_low = sig >> 8; res->sector_count = sig; } } else bzero(res, sizeof(*res)); if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->ataio.resid = ccb->ataio.dxfer_len - le32toh(clp->bytecount); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->csio.resid = ccb->csio.dxfer_len - le32toh(clp->bytecount); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } if (et != AHCI_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case AHCI_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case AHCI_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case AHCI_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case AHCI_ERR_TFE: case AHCI_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case AHCI_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case AHCI_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != AHCI_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was first request of reset sequence and there is no error, * proceed to second request. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) && et == AHCI_ERR_NONE) { ccb->ataio.cmd.control &= ~ATA_A_RESET; ahci_begin_transaction(ch, ccb); return; } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { ahci_process_read_log(ch, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { ahci_process_request_sense(ch, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == AHCI_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else ahci_done(ch, ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { ahci_reset(ch); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 1); } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) ahci_issue_recovery(ch); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != AHCI_ERR_TIMEOUT) ahci_rearm_timeout(ch); /* Unfreeze frozen command. */ if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; ahci_begin_transaction(ch, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void ahci_issue_recovery(struct ahci_channel *ch) { union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(ch->dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } ahci_reset(ch); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(ch->dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); ahci_begin_transaction(ch, ccb); } static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) { uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(ch->dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_AHCI); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) { int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_start(struct ahci_channel *ch, int fbs) { u_int32_t cmd; /* Run the channel start callback, if any. */ if (ch->start) ch->start(ch); /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); /* Clear any interrupts pending on this channel */ ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); /* Configure FIS-based switching if supported. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) { ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; ATA_OUTL(ch->r_mem, AHCI_P_FBS, ch->fbs_enabled ? AHCI_P_FBS_EN : 0); } /* Start operations on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd &= ~AHCI_P_CMD_PMA; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | (ch->pm_present ? AHCI_P_CMD_PMA : 0)); } static void ahci_stop(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all activity on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); /* Wait for activity stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); ch->eslots = 0; } static void ahci_clo(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Issue Command List Override if supported */ if (ch->caps & AHCI_CAP_SCLO) { cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd |= AHCI_P_CMD_CLO; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "executing CLO failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); } } static void ahci_stop_fr(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); /* Wait for FIS reception stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI FR engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); } static void ahci_start_fr(struct ahci_channel *ch) { u_int32_t cmd; /* Start FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); } static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0) { int timeout = 0; uint32_t val; while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & (ATA_S_BUSY | ATA_S_DRQ)) { if (timeout > t) { if (t != 0) { device_printf(ch->dev, "AHCI reset: device not ready after %dms " "(tfd = %08x)\n", MAX(t, 0) + t0, val); } return (EBUSY); } DELAY(1000); timeout++; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device ready after %dms\n", timeout + t0); return (0); } static void ahci_reset_to(void *arg) { struct ahci_channel *ch = arg; if (ch->resetting == 0) return; ch->resetting--; if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, (310 - ch->resetting) * 100) == 0) { ch->resetting = 0; ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { ahci_clo(ch); ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void ahci_reset(struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(ch->dev, "AHCI reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(ch); for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->wrongccs = 0; ch->fatalerr = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset and reconnect PHY, */ if (!ahci_sata_phy_reset(ch)) { if (bootverbose) device_printf(ch->dev, "AHCI reset: device not found\n"); ch->devices = 0; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_PRC | AHCI_P_IX_PC)); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device found\n"); /* Wait for clearing busy status. */ if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { if (dumping) ahci_clo(ch); else ch->resetting = 310; } ch->devices = 1; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_TFE | AHCI_P_IX_HBF | AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); else { ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); } } static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) { u_int8_t *fis = &ctp->cfis[0]; bzero(fis, 20); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->acmd, ccb->csio.cdb_len); bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] = tag << 3; } else { fis[12] = ccb->ataio.cmd.sector_count; } fis[13] = ccb->ataio.cmd.sector_count_exp; fis[15] = ATA_A_4BIT; } else { fis[15] = ccb->ataio.cmd.control; } if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } return (20); } static int ahci_sata_connect(struct ahci_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); return (1); } static int ahci_sata_phy_reset(struct ahci_channel *ch) { int sata_rev; uint32_t val; if (ch->listening) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val |= AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 0; } sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_RESET | val | ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); DELAY(1000); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); if (!ahci_sata_connect(ch)) { if (ch->caps & AHCI_CAP_SSS) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val &= ~AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 1; } else if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } return (1); } static int ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) { if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; ahci_done(ch, ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; ahci_done(ch, ccb); return (-1); } return (0); } static void ahciaction(struct cam_sim *sim, union ccb *ccb) { struct ahci_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ahci_channel *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ahci_check_ids(ch, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (ahci_check_collision(ch, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } ahci_begin_transaction(ch, ccb); return; - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(ch->numslots, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; uint32_t status; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->caps2 & AHCI_CAP2_APST) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; } if ((ch->caps & AHCI_CAP_SNCQ) && (ch->quirks & AHCI_Q_NOAA) == 0) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ahci_reset(ch); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (ch->caps & AHCI_CAP_SNCQ) cpi->hba_inquiry |= PI_TAG_ABLE; if (ch->caps & AHCI_CAP_SPM) cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_ATA_EXT; cpi->hba_eng_cnt = 0; if (ch->caps & AHCI_CAP_SPM) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (ch->quirks & AHCI_Q_MAXIO_64K) cpi->maxio = min(cpi->maxio, 128 * 512); cpi->hba_vendor = ch->vendorid; cpi->hba_device = ch->deviceid; cpi->hba_subvendor = ch->subvendorid; cpi->hba_subdevice = ch->subdeviceid; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ahci_done(ch, ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); uint32_t istatus; /* Read interrupt statuses and process if any. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus != 0) ahci_ch_intr_main(ch, istatus); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; ahci_reset_to(ch); } } MODULE_VERSION(ahci, 1); MODULE_DEPEND(ahci, cam, 1, 1, 1); Index: stable/11/sys/dev/arcmsr/arcmsr.c =================================================================== --- stable/11/sys/dev/arcmsr/arcmsr.c (revision 314380) +++ stable/11/sys/dev/arcmsr/arcmsr.c (revision 314381) @@ -1,4569 +1,4562 @@ /* ******************************************************************************** ** OS : FreeBSD ** FILE NAME : arcmsr.c ** BY : Erich Chen, Ching Huang ** Description: SCSI RAID Device Driver for ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) ** SATA/SAS RAID HOST Adapter ******************************************************************************** ******************************************************************************** ** ** Copyright (C) 2002 - 2012, Areca Technology Corporation All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** ** History ** ** REV# DATE NAME DESCRIPTION ** 1.00.00.00 03/31/2004 Erich Chen First release ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support ** clean unused function ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling, ** firmware version check ** and firmware update notify for hardware bug fix ** handling if none zero high part physical address ** of srb resource ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy ** add iop message xfer ** with scsi pass-through command ** add new device id of sas raid adapters ** code fit for SPARC64 & PPC ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report ** and cause g_vfs_done() read write error ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x ** bus_dmamem_alloc() with BUS_DMA_ZERO ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, ** prevent cam_periph_error removing all LUN devices of one Target id ** for any one LUN device failed ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout ** 02/14/2011 Ching Huang Modified pktRequestCount ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter ** 1.20.00.26 12/14/2012 Ching Huang Added support ARC1214,1224,1264,1284 ** 1.20.00.27 05/06/2013 Ching Huang Fixed out standing cmd full on ARC-12x4 ** 1.20.00.28 09/13/2013 Ching Huang Removed recursive mutex in arcmsr_abort_dr_ccbs ** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883 ** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203 ****************************************************************************************** */ #include __FBSDID("$FreeBSD$"); #if 0 #define ARCMSR_DEBUG1 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version >= 500005 #include #include #include #include #include #else #include #include #include #endif #if !defined(CAM_NEW_TRAN_CODE) && __FreeBSD_version >= 700025 #define CAM_NEW_TRAN_CODE 1 #endif #if __FreeBSD_version > 500000 #define arcmsr_callout_init(a) callout_init(a, /*mpsafe*/1); #else #define arcmsr_callout_init(a) callout_init(a); #endif #define ARCMSR_DRIVER_VERSION "arcmsr version 1.30.00.00 2015-11-30" #include /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb); static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb); static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb); static int arcmsr_probe(device_t dev); static int arcmsr_attach(device_t dev); static int arcmsr_detach(device_t dev); static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); static void arcmsr_iop_parking(struct AdapterControlBlock *acb); static int arcmsr_shutdown(device_t dev); static void arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); static void arcmsr_free_resource(struct AdapterControlBlock *acb); static void arcmsr_bus_reset(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer); static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb); static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); static void arcmsr_iop_reset(struct AdapterControlBlock *acb); static void arcmsr_report_sense_info(struct CommandControlBlock *srb); static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg); static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb); static int arcmsr_resume(device_t dev); static int arcmsr_suspend(device_t dev); static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); static void arcmsr_polling_devmap(void *arg); static void arcmsr_srb_timeout(void *arg); static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb); #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb); #endif /* ************************************************************************** ************************************************************************** */ static void UDELAY(u_int32_t us) { DELAY(us); } /* ************************************************************************** ************************************************************************** */ static bus_dmamap_callback_t arcmsr_map_free_srb; static bus_dmamap_callback_t arcmsr_execute_srb; /* ************************************************************************** ************************************************************************** */ static d_open_t arcmsr_open; static d_close_t arcmsr_close; static d_ioctl_t arcmsr_ioctl; static device_method_t arcmsr_methods[]={ DEVMETHOD(device_probe, arcmsr_probe), DEVMETHOD(device_attach, arcmsr_attach), DEVMETHOD(device_detach, arcmsr_detach), DEVMETHOD(device_shutdown, arcmsr_shutdown), DEVMETHOD(device_suspend, arcmsr_suspend), DEVMETHOD(device_resume, arcmsr_resume), #if __FreeBSD_version >= 803000 DEVMETHOD_END #else { 0, 0 } #endif }; static driver_t arcmsr_driver={ "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) }; static devclass_t arcmsr_devclass; DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0); MODULE_DEPEND(arcmsr, pci, 1, 1, 1); MODULE_DEPEND(arcmsr, cam, 1, 1, 1); #ifndef BUS_DMA_COHERENT #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ #endif #if __FreeBSD_version >= 501000 static struct cdevsw arcmsr_cdevsw={ #if __FreeBSD_version >= 503000 .d_version = D_VERSION, #endif #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) .d_flags = D_NEEDGIANT, #endif .d_open = arcmsr_open, /* open */ .d_close = arcmsr_close, /* close */ .d_ioctl = arcmsr_ioctl, /* ioctl */ .d_name = "arcmsr", /* name */ }; #else #define ARCMSR_CDEV_MAJOR 180 static struct cdevsw arcmsr_cdevsw = { arcmsr_open, /* open */ arcmsr_close, /* close */ noread, /* read */ nowrite, /* write */ arcmsr_ioctl, /* ioctl */ nopoll, /* poll */ nommap, /* mmap */ nostrategy, /* strategy */ "arcmsr", /* name */ ARCMSR_CDEV_MAJOR, /* major */ nodump, /* dump */ nopsize, /* psize */ 0 /* flags */ }; #endif /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_open(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_open(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_open(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (0); } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_close(dev_t dev, int flags, int fmt, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_close(dev_t dev, int flags, int fmt, struct thread *proc) #else static int arcmsr_close(struct cdev *dev, int flags, int fmt, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return 0; } /* ************************************************************************** ************************************************************************** */ #if __FreeBSD_version < 500005 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct proc *proc) #else #if __FreeBSD_version < 503000 static int arcmsr_ioctl(dev_t dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #else static int arcmsr_ioctl(struct cdev *dev, u_long ioctl_cmd, caddr_t arg, int flags, struct thread *proc) #endif #endif { #if __FreeBSD_version < 503000 struct AdapterControlBlock *acb = dev->si_drv1; #else int unit = dev2unit(dev); struct AdapterControlBlock *acb = devclass_get_softc(arcmsr_devclass, unit); #endif if(acb == NULL) { return ENXIO; } return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); } /* ********************************************************************** ********************************************************************** */ static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) { u_int32_t intmask_org = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* disable all outbound interrupt */ intmask_org = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, 0); /* disable all interrupt */ } break; case ACB_ADAPTER_TYPE_C: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); } break; case ACB_ADAPTER_TYPE_D: { /* disable all outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); } break; } return (intmask_org); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) { u_int32_t mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_D: { /* enable outbound Post Queue, outbound doorbell Interrupt */ mask = ARCMSR_HBDMU_ALL_INT_ENABLE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | mask); CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); acb->outbound_int_enable = mask; } break; } } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; do { for(Index=0; Index < 100; Index++) { if(READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ********************************************************************** ********************************************************************** */ static u_int8_t arcmsr_hbd_wait_msgint_ready(struct AdapterControlBlock *acb) { u_int32_t Index; u_int8_t Retries = 0x00; do { for(Index=0; Index < 100; Index++) { if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR);/*clear interrupt*/ return TRUE; } UDELAY(10000); }/*max 1 seconds*/ }while(Retries++ < 20);/*max 20 sec*/ return (FALSE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hba_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); do { if(arcmsr_hbb_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) { int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); do { if(arcmsr_hbc_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_hbd_cache(struct AdapterControlBlock *acb) { int retry_count = 30; /* enlarge wait flush adapter cache time: 10 minute */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); do { if(arcmsr_hbd_wait_msgint_ready(acb)) { break; } else { retry_count--; } }while(retry_count != 0); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_flush_hbd_cache(acb); } break; } } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_suspend(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); /* flush controller */ arcmsr_iop_parking(acb); /* disable all outbound interrupt */ arcmsr_disable_allintr(acb); return(0); } /* ******************************************************************************* ******************************************************************************* */ static int arcmsr_resume(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); arcmsr_iop_init(acb); return(0); } /* ********************************************************************************* ********************************************************************************* */ static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) { struct AdapterControlBlock *acb; u_int8_t target_id, target_lun; struct cam_sim *sim; sim = (struct cam_sim *) cb_arg; acb =(struct AdapterControlBlock *) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: target_id = xpt_path_target_id(path); target_lun = xpt_path_lun_id(path); if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { break; } // printf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); break; default: break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_report_sense_info(struct CommandControlBlock *srb) { union ccb *pccb = srb->pccb; pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; if(pccb->csio.sense_len) { memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ pccb->ccb_h.status |= CAM_AUTOSNS_VALID; } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_hbd_allcmd(struct AdapterControlBlock *acb) { CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_abort_hbc_allcmd(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_abort_hbd_allcmd(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) { struct AdapterControlBlock *acb = srb->acb; union ccb *pccb = srb->pccb; if(srb->srb_flags & SRB_FLAG_TIMER_START) callout_stop(&srb->ccb_callout); if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_POSTREAD; } else { op = BUS_DMASYNC_POSTWRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } if(stand_flag == 1) { atomic_subtract_int(&acb->srboutstandingcount, 1); if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( acb->srboutstandingcount < (acb->maxOutstanding -10))) { acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } if(srb->srb_state != ARCMSR_SRB_TIMEOUT) arcmsr_free_srb(srb); acb->pktReturnCount++; xpt_done(pccb); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) { int target, lun; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; if(error == FALSE) { if(acb->devstate[target][lun] == ARECA_RAID_GONE) { acb->devstate[target][lun] = ARECA_RAID_GOOD; } srb->pccb->ccb_h.status |= CAM_REQ_CMP; arcmsr_srb_complete(srb, 1); } else { switch(srb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { if(acb->devstate[target][lun] == ARECA_RAID_GOOD) { printf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); } acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 1); } break; case SCSISTAT_CHECK_CONDITION: { acb->devstate[target][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(srb); arcmsr_srb_complete(srb, 1); } break; default: printf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknown DeviceStatus=0x%x \n" , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); acb->devstate[target][lun] = ARECA_RAID_GONE; srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; /*unknown error or crc error just for retry*/ arcmsr_srb_complete(srb, 1); break; } } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) { struct CommandControlBlock *srb; /* check if command done with no error*/ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/ break; case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: default: srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ break; } if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_TIMEOUT) { arcmsr_free_srb(srb); printf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb); return; } printf("arcmsr%d: return srb has been completed\n" "srb='%p' srb_state=0x%x outstanding srb count=%d \n", acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount); return; } arcmsr_report_srb_state(acb, srb, error); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_srb_timeout(void *arg) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb; int target, lun; u_int8_t cmd; target = srb->pccb->ccb_h.target_id; lun = srb->pccb->ccb_h.target_lun; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); if(srb->srb_state == ARCMSR_SRB_START) { cmd = scsiio_cdb_ptr(&srb->pccb->csio)[0]; srb->srb_state = ARCMSR_SRB_TIMEOUT; srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n", acb->pci_unit, target, lun, cmd, srb); } ARCMSR_LOCK_RELEASE(&acb->isr_lock); #ifdef ARCMSR_DEBUG1 arcmsr_dump_data(acb); #endif } /* ********************************************************************** ********************************************************************** */ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i=0; u_int32_t flag_srb; u_int16_t error; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t outbound_intstatus; /*clear and abort all outbound posted Q*/ outbound_intstatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; /*clear all outbound posted Q*/ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if((flag_srb = phbbmu->done_qbuffer[i]) != 0) { phbbmu->done_qbuffer[i] = 0; error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } phbbmu->post_qbuffer[i] = 0; }/*drain reply FIFO*/ phbbmu->doneq_index = 0; phbbmu->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } } break; case ACB_ADAPTER_TYPE_D: { arcmsr_hbd_postqueue_isr(acb); } break; } } /* **************************************************************************** **************************************************************************** */ static void arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb; u_int32_t intmask_org; u_int32_t i=0; if(acb->srboutstandingcount>0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: scsi id=%d lun=%jx srb='%p' aborted\n" , acb->pci_unit, srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); } } /* enable all outbound interrupt */ arcmsr_enable_allintr(acb, intmask_org); } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t *dm_segs, u_int32_t nseg) { struct ARCMSR_CDB *arcmsr_cdb = &srb->arcmsr_cdb; u_int8_t *psge = (u_int8_t *)&arcmsr_cdb->u; u_int32_t address_lo, address_hi; union ccb *pccb = srb->pccb; struct ccb_scsiio *pcsio = &pccb->csio; u_int32_t arccdbsize = 0x30; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->Bus = 0; arcmsr_cdb->TargetID = pccb->ccb_h.target_id; arcmsr_cdb->LUN = pccb->ccb_h.target_lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (u_int8_t)pcsio->cdb_len; bcopy(scsiio_cdb_ptr(pcsio), arcmsr_cdb->Cdb, pcsio->cdb_len); if(nseg != 0) { struct AdapterControlBlock *acb = srb->acb; bus_dmasync_op_t op; u_int32_t length, i, cdb_sgcount = 0; if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; srb->srb_flags |= SRB_FLAG_WRITE; } bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); for(i=0; i < nseg; i++) { /* Get the physical address of the current data pointer */ length = arcmsr_htole32(dm_segs[i].ds_len); address_lo = arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); address_hi = arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); if(address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof(struct SG32ENTRY); arccdbsize += sizeof(struct SG32ENTRY); } else { u_int32_t sg64s_size = 0, tmplength = length; while(1) { u_int64_t span4G, length0; struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; span4G = (u_int64_t)address_lo + tmplength; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; if(span4G > 0x100000000) { /*see if cross 4G boundary*/ length0 = 0x100000000-address_lo; pdma_sg->length = (u_int32_t)length0 | IS_SG64_ADDR; address_hi = address_hi+1; address_lo = 0; tmplength = tmplength - (u_int32_t)length0; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); cdb_sgcount++; } else { pdma_sg->length = tmplength | IS_SG64_ADDR; sg64s_size += sizeof(struct SG64ENTRY); psge += sizeof(struct SG64ENTRY); break; } } arccdbsize += sg64s_size; } cdb_sgcount++; } arcmsr_cdb->sgcount = (u_int8_t)cdb_sgcount; arcmsr_cdb->DataLength = pcsio->dxfer_len; if( arccdbsize > 256) { arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; } } else { arcmsr_cdb->DataLength = 0; } srb->arc_cdb_size = arccdbsize; arcmsr_cdb->msgPages = (arccdbsize/256) + ((arccdbsize % 256) ? 1 : 0); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) { u_int32_t cdb_phyaddr_low = (u_int32_t) srb->cdb_phyaddr_low; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&srb->arcmsr_cdb; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); atomic_add_int(&acb->srboutstandingcount, 1); srb->srb_state = ARCMSR_SRB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); } else { CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_phyaddr_low); } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; int ending_index, index; index = phbbmu->postq_index; ending_index = ((index+1) % ARCMSR_MAX_HBB_POSTQUEUE); phbbmu->post_qbuffer[ending_index] = 0; if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { phbbmu->post_qbuffer[index] = cdb_phyaddr_low | ARCMSR_SRBPOST_FLAG_SGL_BSIZE; } else { phbbmu->post_qbuffer[index] = cdb_phyaddr_low; } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->postq_index = index; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); } break; case ACB_ADAPTER_TYPE_C: { u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; arc_cdb_size = (srb->arc_cdb_size > 0x300) ? 0x300 : srb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr_low | ((arc_cdb_size-1) >> 6) | 1); cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; if(cdb_phyaddr_hi32) { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } else { CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); } } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int16_t index_stripped; u_int16_t postq_index; struct InBound_SRB *pinbound_srb; ARCMSR_LOCK_ACQUIRE(&acb->postDone_lock); postq_index = phbdmu->postq_index; pinbound_srb = (struct InBound_SRB *)&phbdmu->post_qbuffer[postq_index & 0xFF]; pinbound_srb->addressHigh = srb->cdb_phyaddr_high; pinbound_srb->addressLow = srb->cdb_phyaddr_low; pinbound_srb->length = srb->arc_cdb_size >> 2; arcmsr_cdb->Context = srb->cdb_phyaddr_low; if (postq_index & 0x4000) { index_stripped = postq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = postq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->postq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } CHIP_REG_WRITE32(HBD_MessageUnit, 0, inboundlist_write_pointer, postq_index); ARCMSR_LOCK_RELEASE(&acb->postDone_lock); } break; } } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer=NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_rbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_rbuffer; } break; } return(qbuffer); } /* ************************************************************************ ************************************************************************ */ static struct QBUFFER *arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) { struct QBUFFER *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct HBA_MessageUnit *phbamu = (struct HBA_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct HBC_MessageUnit *phbcmu = (struct HBC_MessageUnit *)acb->pmu; qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; qbuffer = (struct QBUFFER *)&phbdmu->phbdmu->message_wbuffer; } break; } return(qbuffer); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* let IOP know data has been read */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_C: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_D: { /* let IOP know data has been read */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_C: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); } break; case ACB_ADAPTER_TYPE_D: { /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_IN_READY); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags &= ~ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_stop_hbd_bgrb(acb); } break; } } /* ************************************************************************ ************************************************************************ */ static void arcmsr_poll(struct cam_sim *psim) { struct AdapterControlBlock *acb; int mutex; acb = (struct AdapterControlBlock *)cam_sim_softc(psim); mutex = mtx_owned(&acb->isr_lock); if( mutex == 0 ) ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); if( mutex == 0 ) ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data_D(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t iop_len, data_len; iop_data = (u_int32_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; if ( iop_len > 0 ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return (0); data_len = iop_len; while(data_len >= 4) { *buf2++ = *iop_data++; data_len -= 4; } if(data_len) *buf2 = *iop_data; buf2 = (u_int32_t *)buf1; } while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *buf1; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; buf1++; iop_len--; } if(buf2) free( (u_int8_t *)buf2, M_DEVBUF); /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb, struct QBUFFER *prbuffer) { u_int8_t *pQbuffer; u_int8_t *iop_data; u_int32_t iop_len; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer)); } iop_data = (u_int8_t *)prbuffer->data; iop_len = (u_int32_t)prbuffer->data_len; while (iop_len > 0) { pQbuffer = &acb->rqbuffer[acb->rqbuf_lastindex]; *pQbuffer = *iop_data; acb->rqbuf_lastindex++; /* if last, index number set it to 0 */ acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } /* let IOP know data has been read */ arcmsr_iop_message_read(acb); return (1); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER *prbuffer; int my_empty_len; /*check this iop data if overflow my rqbuffer*/ ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); prbuffer = arcmsr_get_iop_rqbuffer(acb); my_empty_len = (acb->rqbuf_lastindex - acb->rqbuf_firstindex - 1) & (ARCMSR_MAX_QBUFFER-1); if(my_empty_len >= prbuffer->data_len) { if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } else { acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer_D(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *buf1 = 0; u_int32_t *iop_data, *buf2 = 0; u_int32_t allxfer_len = 0, data_len; if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { buf1 = malloc(128, M_DEVBUF, M_NOWAIT | M_ZERO); buf2 = (u_int32_t *)buf1; if( buf1 == NULL) return; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int32_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *buf1 = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; buf1++; allxfer_len++; } pwbuffer->data_len = allxfer_len; data_len = allxfer_len; buf1 = (u_int8_t *)buf2; while(data_len >= 4) { *iop_data++ = *buf2++; data_len -= 4; } if(data_len) *iop_data = *buf2; free( buf1, M_DEVBUF); arcmsr_iop_message_wrote(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb) { u_int8_t *pQbuffer; struct QBUFFER *pwbuffer; u_int8_t *iop_data; int32_t allxfer_len=0; if(acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { arcmsr_Write_data_2iop_wqbuffer_D(acb); return; } if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (u_int8_t *)pwbuffer->data; while((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; *iop_data = *pQbuffer; acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; /* ***************************************************************** ** check if there are any mail packages from user space program ** in my post bag, now is the time to send them into Areca's firmware ***************************************************************** */ if(acb->wqbuf_firstindex != acb->wqbuf_lastindex) { arcmsr_Write_data_2iop_wqbuffer(acb); } if(acb->wqbuf_firstindex == acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) { /* if (ccb->ccb_h.status != CAM_REQ_CMP) printf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x," "failure status=%x\n", ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb->ccb_h.status); else printf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); */ xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) { struct cam_path *path; union ccb *ccb; if ((ccb = (union ccb *)xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&path, NULL, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } /* printf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ bzero(ccb, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = arcmsr_rescanLun_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); } static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) { struct CommandControlBlock *srb; u_int32_t intmask_org; int i; /* disable all outbound interrupts */ intmask_org = arcmsr_disable_allintr(acb); for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if (srb->srb_state == ARCMSR_SRB_START) { if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); printf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { u_int32_t devicemap; u_int32_t target, lun; u_int32_t deviceMapCurrent[4]={0}; u_int8_t *pDevMap; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_B: devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_C: devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; case ACB_ADAPTER_TYPE_D: devicemap = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); for (target = 0; target < 4; target++) { deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); devicemap += 4; } break; } if(acb->acb_flags & ACB_F_BUS_HANG_ON) { acb->acb_flags &= ~ACB_F_BUS_HANG_ON; } /* ** adapter posted CONFIG message ** copy the new map, note if there are differences with the current map */ pDevMap = (u_int8_t *)&deviceMapCurrent[0]; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { if (*pDevMap != acb->device_map[target]) { u_int8_t difference, bit_check; difference = *pDevMap ^ acb->device_map[target]; for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) { bit_check = (1 << lun); /*check bit from 0....31*/ if(difference & bit_check) { if(acb->device_map[target] & bit_check) {/* unit departed */ printf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); arcmsr_abort_dr_ccbs(acb, target, lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GONE; } else {/* unit arrived */ printf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun); arcmsr_rescan_lun(acb, target, lun); acb->devstate[target][lun] = ARECA_RAID_GOOD; } } } /* printf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ acb->device_map[target] = *pDevMap; } pDevMap++; } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* clear interrupts */ WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_message_isr(struct AdapterControlBlock *acb) { u_int32_t outbound_message; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); outbound_message = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[0]); if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) arcmsr_dr_handle( acb ); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, doorbell_status); /* clear doorbell interrupt */ if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_doorbell_isr(struct AdapterControlBlock *acb) { u_int32_t doorbell_status; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ while( doorbell_status & ARCMSR_HBDMU_F0_DOORBELL_CAUSE ) { if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if(doorbell_status & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbd_message_isr(acb); /* messenger of "driver to iop commands" */ } doorbell_status = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_F0_DOORBELL_CAUSE; if(doorbell_status) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, doorbell_status); /* clear doorbell interrupt */ } } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) { /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; u_int32_t flag_srb; int index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); index = phbbmu->doneq_index; while((flag_srb = phbbmu->done_qbuffer[index]) != 0) { phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); } /*drain reply FIFO*/ } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { u_int32_t flag_srb,throttling = 0; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); do { flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); if (flag_srb == 0xFFFFFFFF) break; /* check if command done with no error*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; arcmsr_drain_donequeue(acb, flag_srb, error); throttling++; if(throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); throttling = 0; } } while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR); } /* ********************************************************************** ** ********************************************************************** */ static uint16_t arcmsr_get_doneq_index(struct HBD_MessageUnit0 *phbdmu) { uint16_t doneq_index, index_stripped; doneq_index = phbdmu->doneq_index; if (doneq_index & 0x4000) { index_stripped = doneq_index & 0xFF; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? (index_stripped | 0x4000) : index_stripped; } else { index_stripped = doneq_index; index_stripped += 1; index_stripped %= ARCMSR_MAX_HBD_POSTQUEUE; phbdmu->doneq_index = index_stripped ? index_stripped : (index_stripped | 0x4000); } return (phbdmu->doneq_index); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; u_int32_t outbound_write_pointer; u_int32_t addressLow; uint16_t doneq_index; u_int16_t error; /* ***************************************************************************** ** areca cdb command done ***************************************************************************** */ if((CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause) & ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT) == 0) return; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; while ((doneq_index & 0xFF) != (outbound_write_pointer & 0xFF)) { doneq_index = arcmsr_get_doneq_index(phbdmu); addressLow = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; error = (addressLow & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; arcmsr_drain_donequeue(acb, addressLow, error); /*Check if command done with no error */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; } CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_interrupt_cause, ARCMSR_HBDMU_OUTBOUND_LIST_INTERRUPT_CLEAR); CHIP_REG_READ32(HBD_MessageUnit, 0, outboundlist_interrupt_cause); /*Dummy ioread32 to force pci flush */ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_intStatus; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_intStatus = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; if(!outbound_intStatus) { /*it must be share irq*/ return; } CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus); /*clear interrupt*/ /* MU doorbell interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } /* MU post queue interrupts*/ if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { arcmsr_hba_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; /* ********************************************* ** check outbound intstatus ********************************************* */ outbound_doorbell = READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & acb->outbound_int_enable; if(!outbound_doorbell) { /*it must be share irq*/ return; } WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell); WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); /* MU ioctl transfer doorbell interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } /* MU post queue interrupts*/ if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbb_message_isr(acb); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR); if(!host_interrupt_status) { /*it must be share irq*/ return; } do { /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(acb); } host_interrupt_status = CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)); } /* ********************************************************************** ********************************************************************** */ static void arcmsr_handle_hbd_isr( struct AdapterControlBlock *acb) { u_int32_t host_interrupt_status; u_int32_t intmask_org; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = CHIP_REG_READ32(HBD_MessageUnit, 0, host_int_status) & acb->outbound_int_enable; if(!(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_INT)) { /*it must be share irq*/ return; } /* disable outbound interrupt */ intmask_org = CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable) ; /* disable outbound message0 int */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, ARCMSR_HBDMU_ALL_INT_DISABLE); /* MU doorbell interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_DOORBELL_INT) { arcmsr_hbd_doorbell_isr(acb); } /* MU post queue interrupts*/ if(host_interrupt_status & ARCMSR_HBDMU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hbd_postqueue_isr(acb); } /* enable all outbound interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, pcief0_int_enable, intmask_org | ARCMSR_HBDMU_ALL_INT_ENABLE); // CHIP_REG_READ32(HBD_MessageUnit, 0, pcief0_int_enable); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_handle_hba_isr(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_handle_hbb_isr(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_handle_hbc_isr(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_handle_hbd_isr(acb); break; default: printf("arcmsr%d: interrupt service," " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_intr_handler(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); arcmsr_interrupt(acb); ARCMSR_LOCK_RELEASE(&acb->isr_lock); } /* ****************************************************************************** ****************************************************************************** */ static void arcmsr_polling_devmap(void *arg) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); } break; case ACB_ADAPTER_TYPE_C: CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); break; case ACB_ADAPTER_TYPE_D: CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); break; } if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) { callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ } } /* ******************************************************************************* ** ******************************************************************************* */ static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { u_int32_t intmask_org; if(acb != NULL) { /* stop adapter background rebuild */ if(acb->acb_flags & ACB_F_MSG_START_BGRB) { intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_allintr(acb, intmask_org); } } } /* *********************************************************************** ** ************************************************************************ */ u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; u_int32_t retvalue = EINVAL; pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) arg; if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { return retvalue; } ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); switch(ioctl_cmd) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; u_int32_t allxfer_len=0; while((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { /*copy READ QBUFFER to srb*/ pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpQbuffer++; allxfer_len++; } if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; /*check if data xfer length of this request will overflow my array qbuffer */ wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if(wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } else { my_empty_len = (wqbuf_firstindex - wqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); if(my_empty_len >= user_len) { while(user_len > 0) { /*copy srb data to wqbuffer*/ pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ ptmpuserbuffer++; user_len--; } /*post fist Qbuffer*/ if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } else { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; } } retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); /*signature, let IOP know data has been readed */ } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |ACB_F_MESSAGE_RQBUFFER_CLEARED |ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_HELLO: { u_int8_t *hello_string = "Hello! I am ARCMSR"; u_int8_t *puserbuffer = (u_int8_t *)pcmdmessagefld->messagedatabuffer; if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_ERROR; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return ENOIOCTL; } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: { arcmsr_iop_parking(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { arcmsr_flush_adapter_cache(acb); retvalue = ARCMSR_MESSAGE_SUCCESS; } break; } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); return (retvalue); } /* ************************************************************************** ************************************************************************** */ static void arcmsr_free_srb(struct CommandControlBlock *srb) { struct AdapterControlBlock *acb; acb = srb->acb; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); srb->srb_state = ARCMSR_SRB_DONE; srb->srb_flags = 0; acb->srbworkingQ[acb->workingsrb_doneindex] = srb; acb->workingsrb_doneindex++; acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; ARCMSR_LOCK_RELEASE(&acb->srb_lock); } /* ************************************************************************** ************************************************************************** */ struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb) { struct CommandControlBlock *srb = NULL; u_int32_t workingsrb_startindex, workingsrb_doneindex; ARCMSR_LOCK_ACQUIRE(&acb->srb_lock); workingsrb_doneindex = acb->workingsrb_doneindex; workingsrb_startindex = acb->workingsrb_startindex; srb = acb->srbworkingQ[workingsrb_startindex]; workingsrb_startindex++; workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; if(workingsrb_doneindex != workingsrb_startindex) { acb->workingsrb_startindex = workingsrb_startindex; } else { srb = NULL; } ARCMSR_LOCK_RELEASE(&acb->srb_lock); return(srb); } /* ************************************************************************** ************************************************************************** */ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *pccb) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; uint8_t *ptr = scsiio_cdb_ptr(&pccb->csio); u_int32_t controlcode = (u_int32_t ) ptr[5] << 24 | (u_int32_t ) ptr[6] << 16 | (u_int32_t ) ptr[7] << 8 | (u_int32_t ) ptr[8]; /* 4 bytes: Areca io control code */ if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { buffer = pccb->csio.data_ptr; transfer_len = pccb->csio.dxfer_len; } else { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { u_int8_t *pQbuffer; u_int8_t *ptmpQbuffer = pcmdmessagefld->messagedatabuffer; int32_t allxfer_len = 0; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; *ptmpQbuffer = *pQbuffer; acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER *prbuffer; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); if(arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0) acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } pcmdmessagefld->cmdmessage.Length = allxfer_len; pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; retvalue = ARCMSR_MESSAGE_SUCCESS; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; u_int8_t *pQbuffer; u_int8_t *ptmpuserbuffer = pcmdmessagefld->messagedatabuffer; user_len = pcmdmessagefld->cmdmessage.Length; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { arcmsr_Write_data_2iop_wqbuffer(acb); /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; *pQbuffer = *ptmpuserbuffer; acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_Write_data_2iop_wqbuffer(acb); } } else { /* has error report sensedata */ if(pccb->csio.sense_len) { ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; /* AdditionalSenseLength */ ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; /* AdditionalSenseCode */ } retvalue = ARCMSR_MESSAGE_FAIL; } } ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { u_int8_t *pQbuffer = acb->rqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { u_int8_t *pQbuffer = acb->wqbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { u_int8_t *pQbuffer; ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof (struct QBUFFER)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); } break; case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } break; case ARCMSR_MESSAGE_SAY_GOODBYE: arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: return (retvalue); } /* ********************************************************************* ********************************************************************* */ static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct CommandControlBlock *srb = (struct CommandControlBlock *)arg; struct AdapterControlBlock *acb = (struct AdapterControlBlock *)srb->acb; union ccb *pccb; int target, lun; pccb = srb->pccb; target = pccb->ccb_h.target_id; lun = pccb->ccb_h.target_lun; acb->pktRequestCount++; if(error != 0) { if(error != EFBIG) { printf("arcmsr%d: unexpected error %x" " returned from 'bus_dmamap_load' \n" , acb->pci_unit, error); } if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; } arcmsr_srb_complete(srb, 0); return; } if(nseg > ARCMSR_MAX_SG_ENTRIES) { pccb->ccb_h.status |= CAM_REQ_TOO_BIG; arcmsr_srb_complete(srb, 0); return; } if(acb->acb_flags & ACB_F_BUS_RESET) { printf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; arcmsr_srb_complete(srb, 0); return; } if(acb->devstate[target][lun] == ARECA_RAID_GONE) { u_int8_t block_cmd, cmd; cmd = scsiio_cdb_ptr(&pccb->csio)[0]; block_cmd = cmd & 0x0f; if(block_cmd == 0x08 || block_cmd == 0x0a) { printf("arcmsr%d:block 'read/write' command " "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n" , acb->pci_unit, cmd, target, lun); pccb->ccb_h.status |= CAM_DEV_NOT_THERE; arcmsr_srb_complete(srb, 0); return; } } if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if(nseg != 0) { bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); } arcmsr_srb_complete(srb, 0); return; } if(acb->srboutstandingcount >= acb->maxOutstanding) { if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) == 0) { xpt_freeze_simq(acb->psim, 1); acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; } pccb->ccb_h.status &= ~CAM_SIM_QUEUED; pccb->ccb_h.status |= CAM_REQUEUE_REQ; arcmsr_srb_complete(srb, 0); return; } pccb->ccb_h.status |= CAM_SIM_QUEUED; arcmsr_build_srb(srb, dm_segs, nseg); arcmsr_post_srb(acb, srb); if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) { arcmsr_callout_init(&srb->ccb_callout); callout_reset_sbt(&srb->ccb_callout, SBT_1MS * (pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)), 0, arcmsr_srb_timeout, srb, 0); srb->srb_flags |= SRB_FLAG_TIMER_START; } } /* ***************************************************************************************** ***************************************************************************************** */ static u_int8_t arcmsr_seek_cmd2abort(union ccb *abortccb) { struct CommandControlBlock *srb; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; u_int32_t intmask_org; int i = 0; acb->num_aborts++; /* *************************************************************************** ** It is the upper layer do abort command this lock just prior to calling us. ** First determine if we currently own this command. ** Start by searching the device queue. If not found ** at all, and the system wanted us to just abort the ** command return success. *************************************************************************** */ if(acb->srboutstandingcount != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { if(srb->pccb == abortccb) { srb->srb_state = ARCMSR_SRB_ABORTED; printf("arcmsr%d:scsi id=%d lun=%jx abort srb '%p'" "outstanding command \n" , acb->pci_unit, abortccb->ccb_h.target_id , (uintmax_t)abortccb->ccb_h.target_lun, srb); arcmsr_polling_srbdone(acb, srb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); return (TRUE); } } } /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); } return(FALSE); } /* **************************************************************************** **************************************************************************** */ static void arcmsr_bus_reset(struct AdapterControlBlock *acb) { int retry = 0; acb->num_resets++; acb->acb_flags |= ACB_F_BUS_RESET; while(acb->srboutstandingcount != 0 && retry < 400) { arcmsr_interrupt(acb); UDELAY(25000); retry++; } arcmsr_iop_reset(acb); acb->acb_flags &= ~ACB_F_BUS_RESET; } /* ************************************************************************** ************************************************************************** */ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, union ccb *pccb) { if (pccb->ccb_h.target_lun) { pccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(pccb); return; } pccb->ccb_h.status |= CAM_REQ_CMP; switch (scsiio_cdb_ptr(&pccb->csio)[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer = pccb->csio.data_ptr; inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[3] = 0; inqdata[4] = 31; /* length of additional data */ inqdata[5] = 0; inqdata[6] = 0; inqdata[7] = 0; strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ memcpy(buffer, inqdata, sizeof(inqdata)); xpt_done(pccb); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, pccb)) { pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } xpt_done(pccb); } break; default: xpt_done(pccb); } } /* ********************************************************************* ********************************************************************* */ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb) { struct AdapterControlBlock *acb; acb = (struct AdapterControlBlock *) cam_sim_softc(psim); if(acb == NULL) { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); return; } switch (pccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct CommandControlBlock *srb; int target = pccb->ccb_h.target_id; int error; if (pccb->ccb_h.flags & CAM_CDB_PHYS) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); return; } if(target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, pccb); return; } if((srb = arcmsr_get_freesrb(acb)) == NULL) { pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(pccb); return; } pccb->ccb_h.arcmsr_ccbsrb_ptr = srb; pccb->ccb_h.arcmsr_ccbacb_ptr = acb; srb->pccb = pccb; error = bus_dmamap_load_ccb(acb->dm_segs_dmat , srb->dm_segs_dmamap , pccb , arcmsr_execute_srb, srb, /*flags*/0); if(error == EINPROGRESS) { xpt_freeze_simq(acb->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } - case XPT_TARGET_IO: { - /* target mode not yet support vendor specific commands. */ - pccb->ccb_h.status |= CAM_REQ_CMP; - xpt_done(pccb); - break; - } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ARCMSR_MAX_TARGETID; /* 0-16 */ cpi->max_lun = ARCMSR_MAX_TARGETLUN; /* 0-7 */ cpi->initiator_id = ARCMSR_SCSI_INITIATOR_ID; /* 255 */ cpi->bus_id = cam_sim_bus(psim); strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); #ifdef CAM_NEW_TRAN_CODE if(acb->adapter_bus_speed == ACB_BUS_SPEED_12G) cpi->base_transfer_speed = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cpi->base_transfer_speed = 600000; else cpi->base_transfer_speed = 300000; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } cpi->protocol = PROTO_SCSI; #endif cpi->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_ABORT: { union ccb *pabort_ccb; pabort_ccb = pccb->cab.abort_ccb; switch (pabort_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: - case XPT_IMMED_NOTIFY: case XPT_CONT_TARGET_IO: if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; xpt_done(pabort_ccb); pccb->ccb_h.status |= CAM_REQ_CMP; } else { xpt_print_path(pabort_ccb->ccb_h.path); printf("Not found\n"); pccb->ccb_h.status |= CAM_PATH_INVALID; } break; case XPT_SCSI_IO: pccb->ccb_h.status |= CAM_UA_ABORT; break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; break; } xpt_done(pccb); break; } case XPT_RESET_BUS: case XPT_RESET_DEV: { u_int32_t i; arcmsr_bus_reset(acb); for (i=0; i < 500; i++) { DELAY(1000); } pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_TERM_IO: { pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } cts = &pccb->cts; #ifdef CAM_NEW_TRAN_CODE { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings_sas *sas; scsi = &cts->proto_specific.scsi; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; scsi->valid = CTS_SCSI_VALID_TQ; cts->protocol = PROTO_SCSI; if((acb->vendor_device_id == PCIDevVenIDARC1880) || (acb->vendor_device_id == PCIDevVenIDARC1680) || (acb->vendor_device_id == PCIDevVenIDARC1214)) { cts->protocol_version = SCSI_REV_SPC2; cts->transport_version = 0; cts->transport = XPORT_SAS; sas = &cts->xport_specific.sas; sas->valid = CTS_SAS_VALID_SPEED; if (acb->adapter_bus_speed == ACB_BUS_SPEED_12G) sas->bitrate = 1200000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G) sas->bitrate = 600000; else if(acb->adapter_bus_speed == ACB_BUS_SPEED_3G) sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport_version = 2; cts->transport = XPORT_SPI; spi = &cts->xport_specific.spi; spi->flags = CTS_SPI_FLAGS_DISC_ENB; if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) spi->sync_period = 1; else spi->sync_period = 2; spi->sync_offset = 32; spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; spi->valid = CTS_SPI_VALID_DISC | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; } } #else { cts->flags = (CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); if (acb->adapter_bus_speed == ACB_BUS_SPEED_6G) cts->sync_period = 1; else cts->sync_period = 2; cts->sync_offset = 32; cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } #endif pccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(pccb); break; } case XPT_SET_TRAN_SETTINGS: { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } case XPT_CALC_GEOMETRY: if(pccb->ccb_h.target_id == 16) { pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; xpt_done(pccb); break; } #if __FreeBSD_version >= 500000 cam_calc_geometry(&pccb->ccg, 1); #else { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &pccb->ccg; if (ccg->block_size == 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } if(((1024L * 1024L)/ccg->block_size) < 0) { pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size); if(size_mb > 1024 ) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; pccb->ccb_h.status |= CAM_REQ_CMP; } #endif xpt_done(pccb); break; default: pccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(pccb); break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; acb->acb_flags |= ACB_F_MSG_START_BGRB; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_hbd_bgrb(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MSG_START_BGRB; CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); break; case ACB_ADAPTER_TYPE_D: arcmsr_start_hbd_bgrb(acb); break; } } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if((flag_srb = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) == 0xFFFFFFFF) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; int index; u_int16_t error; polling_ccb_retry: poll_count++; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { index = phbbmu->doneq_index; if((flag_srb = phbbmu->done_qbuffer[index]) == 0) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } goto polling_ccb_retry; } } phbbmu->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ phbbmu->doneq_index = index; /* check if command done with no error*/ srb = (struct CommandControlBlock *) (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'" "poll command abort successfully \n" , acb->pci_unit , srb->pccb->ccb_h.target_id , (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'" "srboutstandingcount=%d \n" , acb->pci_unit , srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int16_t error; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ** ********************************************************************** */ static void arcmsr_polling_hbd_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { struct HBD_MessageUnit0 *phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; struct CommandControlBlock *srb; u_int32_t flag_srb, poll_srb_done=0, poll_count=0; u_int32_t outbound_write_pointer; u_int16_t error, doneq_index; polling_ccb_retry: poll_count++; bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); while(1) { outbound_write_pointer = phbdmu->done_qbuffer[0].addressLow; doneq_index = phbdmu->doneq_index; if ((outbound_write_pointer & 0xFF) == (doneq_index & 0xFF)) { if(poll_srb_done) { break;/*chip FIFO no ccb for completion already*/ } else { UDELAY(25000); if ((poll_count > 100) && (poll_srb != NULL)) { break; } if (acb->srboutstandingcount == 0) { break; } goto polling_ccb_retry; } } doneq_index = arcmsr_get_doneq_index(phbdmu); flag_srb = phbdmu->done_qbuffer[(doneq_index & 0xFF)+1].addressLow; /* check if command done with no error*/ srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/ error = (flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1) ? TRUE : FALSE; CHIP_REG_WRITE32(HBD_MessageUnit, 0, outboundlist_read_pointer, doneq_index); if (poll_srb != NULL) poll_srb_done = (srb == poll_srb) ? 1:0; if((srb->acb != acb) || (srb->srb_state != ARCMSR_SRB_START)) { if(srb->srb_state == ARCMSR_SRB_ABORTED) { printf("arcmsr%d: scsi id=%d lun=%jx srb='%p'poll command abort successfully \n" , acb->pci_unit, srb->pccb->ccb_h.target_id, (uintmax_t)srb->pccb->ccb_h.target_lun, srb); srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); continue; } printf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" , acb->pci_unit, srb, acb->srboutstandingcount); continue; } arcmsr_report_srb_state(acb, srb, error); } /*drain reply FIFO*/ } /* ********************************************************************** ********************************************************************** */ static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_polling_hba_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_polling_hbb_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_polling_hbc_srbdone(acb, poll_srb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_polling_hbd_srbdone(acb, poll_srb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i=0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i=0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBB_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBB_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_hbd_config(struct AdapterControlBlock *acb) { char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; size_t iop_firm_model = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ size_t iop_firm_version = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ size_t iop_device_map = offsetof(struct HBD_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); int i; if(CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE) CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, ARCMSR_HBDMU_IOP2DRV_MESSAGE_CMD_DONE_CLEAR); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); } i = 0; while(i < 8) { *acb_firm_model = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); /* 8 bytes firm_model, 15, 60-67*/ acb_firm_model++; i++; } i = 0; while(i < 16) { *acb_firm_version = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); /* 16 bytes firm_version, 17, 68-83*/ acb_firm_version++; i++; } i = 0; while(i < 16) { *acb_device_map = bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); acb_device_map++; i++; } printf("Areca RAID adapter%d: %s F/W version %s \n", acb->pci_unit, acb->firm_model, acb->firm_version); acb->firm_request_len = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ acb->firm_numbers_queue = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ acb->firm_sdram_size = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ acb->firm_ide_channels = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ acb->firm_cfg_version = CHIP_REG_READ32(HBD_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ if(acb->firm_numbers_queue > ARCMSR_MAX_HBD_POSTQUEUE) acb->maxOutstanding = ARCMSR_MAX_HBD_POSTQUEUE - 1; else acb->maxOutstanding = acb->firm_numbers_queue - 1; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_get_hba_config(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_get_hbb_config(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_get_hbc_config(acb); } break; case ACB_ADAPTER_TYPE_D: { arcmsr_get_hbd_config(acb); } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) { int timeout=0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; while ((READ_CHIP_REG32(0, phbbmu->iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); } break; case ACB_ADAPTER_TYPE_C: { while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; case ACB_ADAPTER_TYPE_D: { while ((CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBDMU_MESSAGE_FIRMWARE_OK) == 0) { if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ { printf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); return; } UDELAY(15000); /* wait 15 milli-seconds */ } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) { u_int32_t outbound_doorbell; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell_clear); /* Dummy read to force pci flush */ CHIP_REG_READ32(HBC_MessageUnit, 0, inbound_doorbell); /* Dummy read to force pci flush */ } break; case ACB_ADAPTER_TYPE_D: { /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell); CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ); } break; } } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) { unsigned long srb_phyaddr; u_int32_t srb_phyaddr_hi32; u_int32_t srb_phyaddr_lo32; /* ******************************************************************** ** here we need to tell iop 331 our freesrb.HighPart ** if freesrb.HighPart is not zero ******************************************************************** */ srb_phyaddr = (unsigned long) acb->srb_phyaddr.phyaddr; srb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; srb_phyaddr_lo32 = acb->srb_phyaddr.B.phyadd_low; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hba_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ case ACB_ADAPTER_TYPE_B: { u_int32_t post_queue_phyaddr; struct HBB_MessageUnit *phbbmu; phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->postq_index = 0; phbbmu->doneq_index = 0; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); return FALSE; } post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBB_MessageUnit, post_qbuffer); CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); return FALSE; } WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); return FALSE; } } break; case ACB_ADAPTER_TYPE_C: { if(srb_phyaddr_hi32 != 0) { CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); if(!arcmsr_hbc_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } } break; case ACB_ADAPTER_TYPE_D: { u_int32_t post_queue_phyaddr, done_queue_phyaddr; struct HBD_MessageUnit0 *phbdmu; phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->postq_index = 0; phbdmu->doneq_index = 0x40FF; post_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, post_qbuffer); done_queue_phyaddr = srb_phyaddr_lo32 + ARCMSR_SRBS_POOL_SIZE + offsetof(struct HBD_MessageUnit0, done_qbuffer); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[3], done_queue_phyaddr); /* doneQ base */ CHIP_REG_WRITE32(HBD_MessageUnit, 0, msgcode_rwbuffer[4], 0x100); CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); if(!arcmsr_hbd_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); return FALSE; } } break; } return (TRUE); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu; WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_MESSAGE_ACTIVE_EOI_MODE); if(!arcmsr_hbb_wait_msgint_ready(acb)) { printf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); return; } } break; } } /* ********************************************************************** ********************************************************************** */ static void arcmsr_iop_init(struct AdapterControlBlock *acb) { u_int32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); arcmsr_get_firmware_spec(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue, outbound doorbell Interrupt */ arcmsr_enable_allintr(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } /* ********************************************************************** ********************************************************************** */ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct AdapterControlBlock *acb = arg; struct CommandControlBlock *srb_tmp; u_int32_t i; unsigned long srb_phyaddr = (unsigned long)segs->ds_addr; acb->srb_phyaddr.phyaddr = srb_phyaddr; srb_tmp = (struct CommandControlBlock *)acb->uncacheptr; for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { if(bus_dmamap_create(acb->dm_segs_dmat, /*flags*/0, &srb_tmp->dm_segs_dmamap) != 0) { acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; printf("arcmsr%d:" " srb dmamap bus_dmamap_create error\n", acb->pci_unit); return; } if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D)) { srb_tmp->cdb_phyaddr_low = srb_phyaddr; srb_tmp->cdb_phyaddr_high = (u_int32_t)((srb_phyaddr >> 16) >> 16); } else srb_tmp->cdb_phyaddr_low = srb_phyaddr >> 5; srb_tmp->acb = acb; acb->srbworkingQ[i] = acb->psrb_pool[i] = srb_tmp; srb_phyaddr = srb_phyaddr + SRB_SIZE; srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE); } acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr; } /* ************************************************************************ ************************************************************************ */ static void arcmsr_free_resource(struct AdapterControlBlock *acb) { /* remove the control device */ if(acb->ioctl_dev != NULL) { destroy_dev(acb->ioctl_dev); } bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_init(struct AdapterControlBlock *acb) { ARCMSR_LOCK_INIT(&acb->isr_lock, "arcmsr isr lock"); ARCMSR_LOCK_INIT(&acb->srb_lock, "arcmsr srb lock"); ARCMSR_LOCK_INIT(&acb->postDone_lock, "arcmsr postQ lock"); ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr RW buffer lock"); } /* ************************************************************************ ************************************************************************ */ static void arcmsr_mutex_destroy(struct AdapterControlBlock *acb) { ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); ARCMSR_LOCK_DESTROY(&acb->postDone_lock); ARCMSR_LOCK_DESTROY(&acb->srb_lock); ARCMSR_LOCK_DESTROY(&acb->isr_lock); } /* ************************************************************************ ************************************************************************ */ static u_int32_t arcmsr_initialize(device_t dev) { struct AdapterControlBlock *acb = device_get_softc(dev); u_int16_t pci_command; int i, j,max_coherent_size; u_int32_t vendor_dev_id; vendor_dev_id = pci_get_devid(dev); acb->vendor_device_id = vendor_dev_id; acb->sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch (vendor_dev_id) { case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: { acb->adapter_type = ACB_ADAPTER_TYPE_C; if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883) acb->adapter_bus_speed = ACB_BUS_SPEED_12G; else acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; case PCIDevVenIDARC1214: { acb->adapter_type = ACB_ADAPTER_TYPE_D; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBD_MessageUnit0)); } break; case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1203: { acb->adapter_type = ACB_ADAPTER_TYPE_B; acb->adapter_bus_speed = ACB_BUS_SPEED_6G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE + (sizeof(struct HBB_MessageUnit)); } break; case PCIDevVenIDARC1110: case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1210: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: { acb->adapter_type = ACB_ADAPTER_TYPE_A; acb->adapter_bus_speed = ACB_BUS_SPEED_3G; max_coherent_size = ARCMSR_SRBS_POOL_SIZE; } break; default: { printf("arcmsr%d:" " unknown RAID adapter type \n", device_get_unit(dev)); return ENOMEM; } } #if __FreeBSD_version >= 700000 if(bus_dma_tag_create( /*PCI parent*/ bus_get_dma_tag(dev), #else if(bus_dma_tag_create( /*PCI parent*/ NULL, #endif /*alignemnt*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->parent_dmat) != 0) { printf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, #ifdef PAE /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, #else /*lowaddr*/ BUS_SPACE_MAXADDR, #endif /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &acb->isr_lock, #endif &acb->dm_segs_dmat) != 0) { bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENOMEM; } /* DMA tag for our srb structures.... Allocate the freesrb memory */ if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, /*alignment*/ 0x20, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ max_coherent_size, /*nsegments*/ 1, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, #if __FreeBSD_version >= 501102 /*lockfunc*/ NULL, /*lockarg*/ NULL, #endif &acb->srb_dmat) != 0) { bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); return ENXIO; } /* Allocation for our srbs */ if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); return ENXIO; } /* And permanently map them */ if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { bus_dma_tag_destroy(acb->srb_dmat); bus_dma_tag_destroy(acb->dm_segs_dmat); bus_dma_tag_destroy(acb->parent_dmat); printf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); return ENXIO; } pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= PCIM_CMD_BUSMASTEREN; pci_command |= PCIM_CMD_PERRESPEN; pci_command |= PCIM_CMD_MWRICEN; /* Enable Busmaster */ pci_write_config(dev, PCIR_COMMAND, pci_command, 2); switch(acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_B: { struct HBB_MessageUnit *phbbmu; struct CommandControlBlock *freesrb; u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; vm_offset_t mem_base[]={0,0}; u_long size; if (vendor_dev_id == PCIDevVenIDARC1203) size = sizeof(struct HBB_DOORBELL_1203); else size = sizeof(struct HBB_DOORBELL); for(i=0; i < 2; i++) { if(i == 0) { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } else { acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i], RF_ACTIVE); } if(acb->sys_res_arcmsr[i] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); return ENXIO; } mem_base[i] = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); if(mem_base[i] == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); return ENXIO; } acb->btag[i] = rman_get_bustag(acb->sys_res_arcmsr[i]); acb->bhandle[i] = rman_get_bushandle(acb->sys_res_arcmsr[i]); } freesrb = (struct CommandControlBlock *)acb->uncacheptr; acb->pmu = (struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE); phbbmu = (struct HBB_MessageUnit *)acb->pmu; phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)mem_base[0]; phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)mem_base[1]; if (vendor_dev_id == PCIDevVenIDARC1203) { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL_1203, iop2drv_doorbell_mask); } else { phbbmu->drv2iop_doorbell = offsetof(struct HBB_DOORBELL, drv2iop_doorbell); phbbmu->drv2iop_doorbell_mask = offsetof(struct HBB_DOORBELL, drv2iop_doorbell_mask); phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell); phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask); } } break; case ACB_ADAPTER_TYPE_C: { u_int32_t rid0 = PCIR_BAR(1); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)mem_base0; } break; case ACB_ADAPTER_TYPE_D: { struct HBD_MessageUnit0 *phbdmu; u_int32_t rid0 = PCIR_BAR(0); vm_offset_t mem_base0; acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(acb->sys_res_arcmsr[0] == NULL) { arcmsr_free_resource(acb); printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); return ENOMEM; } if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); return ENXIO; } mem_base0 = (vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); if(mem_base0 == 0) { arcmsr_free_resource(acb); printf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); return ENXIO; } acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]); acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]); acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE); phbdmu = (struct HBD_MessageUnit0 *)acb->pmu; phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0; } break; } if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { arcmsr_free_resource(acb); printf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); return ENXIO; } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; /* ******************************************************************** ** init raid volume state ******************************************************************** */ for(i=0; i < ARCMSR_MAX_TARGETID; i++) { for(j=0; j < ARCMSR_MAX_TARGETLUN; j++) { acb->devstate[i][j] = ARECA_RAID_GONE; } } arcmsr_iop_init(acb); return(0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_attach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); u_int32_t unit=device_get_unit(dev); struct ccb_setasync csa; struct cam_devq *devq; /* Device Queue to use for this SIM */ struct resource *irqres; int rid; if(acb == NULL) { printf("arcmsr%d: cannot allocate softc\n", unit); return (ENOMEM); } arcmsr_mutex_init(acb); acb->pci_dev = dev; acb->pci_unit = unit; if(arcmsr_initialize(dev)) { printf("arcmsr%d: initialize failure!\n", unit); arcmsr_mutex_destroy(acb); return ENXIO; } /* After setting up the adapter, map our interrupt */ rid = 0; irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { #else bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih)) { #endif arcmsr_free_resource(acb); arcmsr_mutex_destroy(acb); printf("arcmsr%d: unable to register interrupt handler!\n", unit); return ENXIO; } acb->irqres = irqres; /* * Now let the CAM generic SCSI layer find the SCSI devices on * the bus * start queue to reset to the idle loop. * * Create device queue of SIM(s) * (MAX_START_JOB - 1) : * max_sim_transactions */ devq = cam_simq_alloc(acb->maxOutstanding); if(devq == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_simq_alloc failure!\n", unit); return ENXIO; } #if __FreeBSD_version >= 700025 acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->isr_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #else acb->psim = cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); #endif if(acb->psim == NULL) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_simq_free(devq); arcmsr_mutex_destroy(acb); printf("arcmsr%d: cam_sim_alloc failure!\n", unit); return ENXIO; } ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); #if __FreeBSD_version >= 700044 if(xpt_bus_register(acb->psim, dev, 0) != CAM_SUCCESS) { #else if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { #endif arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); cam_sim_free(acb->psim, /*free_devq*/TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_bus_register failure!\n", unit); return ENXIO; } if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { arcmsr_free_resource(acb); bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, /* free_simq */ TRUE); arcmsr_mutex_destroy(acb); printf("arcmsr%d: xpt_create_path failure!\n", unit); return ENXIO; } /* **************************************************** */ xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = arcmsr_async; csa.callback_arg = acb->psim; xpt_action((union ccb *)&csa); ARCMSR_LOCK_RELEASE(&acb->isr_lock); /* Create the control device. */ acb->ioctl_dev = make_dev(&arcmsr_cdevsw, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); #if __FreeBSD_version < 503000 acb->ioctl_dev->si_drv1 = acb; #endif #if __FreeBSD_version > 500005 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); #endif arcmsr_callout_init(&acb->devmap_callout); callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_probe(device_t dev) { u_int32_t id; u_int16_t sub_device_id; static char buf[256]; char x_type[]={"unknown"}; char *type; int raid6 = 1; if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { return (ENXIO); } sub_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); switch(id = pci_get_devid(dev)) { case PCIDevVenIDARC1110: case PCIDevVenIDARC1200: case PCIDevVenIDARC1201: case PCIDevVenIDARC1210: raid6 = 0; /*FALLTHRU*/ case PCIDevVenIDARC1120: case PCIDevVenIDARC1130: case PCIDevVenIDARC1160: case PCIDevVenIDARC1170: case PCIDevVenIDARC1220: case PCIDevVenIDARC1230: case PCIDevVenIDARC1231: case PCIDevVenIDARC1260: case PCIDevVenIDARC1261: case PCIDevVenIDARC1270: case PCIDevVenIDARC1280: type = "SATA 3G"; break; case PCIDevVenIDARC1212: case PCIDevVenIDARC1222: case PCIDevVenIDARC1380: case PCIDevVenIDARC1381: case PCIDevVenIDARC1680: case PCIDevVenIDARC1681: type = "SAS 3G"; break; case PCIDevVenIDARC1880: case PCIDevVenIDARC1882: case PCIDevVenIDARC1213: case PCIDevVenIDARC1223: if (sub_device_id == ARECA_SUB_DEV_ID_1883) type = "SAS 12G"; else type = "SAS 6G"; break; case PCIDevVenIDARC1214: case PCIDevVenIDARC1203: type = "SATA 6G"; break; default: type = x_type; raid6 = 0; break; } if(type == x_type) return(ENXIO); sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n", type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_shutdown(device_t dev) { u_int32_t i; u_int32_t intmask_org; struct CommandControlBlock *srb; struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); /* stop adapter background rebuild */ ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); /* disable all outbound interrupt */ intmask_org = arcmsr_disable_allintr(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); /* abort all outstanding command */ acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; if(acb->srboutstandingcount != 0) { /*clear and abort all outbound posted Q*/ arcmsr_done4abort_postqueue(acb); /* talk to iop 331 outstanding command aborted*/ arcmsr_abort_allcmd(acb); for(i=0; i < ARCMSR_MAX_FREESRB_NUM; i++) { srb = acb->psrb_pool[i]; if(srb->srb_state == ARCMSR_SRB_START) { srb->srb_state = ARCMSR_SRB_ABORTED; srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; arcmsr_srb_complete(srb, 1); } } } acb->srboutstandingcount = 0; acb->workingsrb_doneindex = 0; acb->workingsrb_startindex = 0; acb->pktRequestCount = 0; acb->pktReturnCount = 0; ARCMSR_LOCK_RELEASE(&acb->isr_lock); return (0); } /* ************************************************************************ ************************************************************************ */ static int arcmsr_detach(device_t dev) { struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); int i; callout_stop(&acb->devmap_callout); bus_teardown_intr(dev, acb->irqres, acb->ih); arcmsr_shutdown(dev); arcmsr_free_resource(acb); for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); } bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); ARCMSR_LOCK_ACQUIRE(&acb->isr_lock); xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); xpt_free_path(acb->ppath); xpt_bus_deregister(cam_sim_path(acb->psim)); cam_sim_free(acb->psim, TRUE); ARCMSR_LOCK_RELEASE(&acb->isr_lock); arcmsr_mutex_destroy(acb); return (0); } #ifdef ARCMSR_DEBUG1 static void arcmsr_dump_data(struct AdapterControlBlock *acb) { if((acb->pktRequestCount - acb->pktReturnCount) == 0) return; printf("Command Request Count =0x%x\n",acb->pktRequestCount); printf("Command Return Count =0x%x\n",acb->pktReturnCount); printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount)); printf("Queued Command Count =0x%x\n",acb->srboutstandingcount); } #endif Index: stable/11/sys/dev/ata/ata-all.c =================================================================== --- stable/11/sys/dev/ata/ata-all.c (revision 314380) +++ stable/11/sys/dev/ata/ata-all.c (revision 314381) @@ -1,1229 +1,1225 @@ /*- * Copyright (c) 1998 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* prototypes */ static void ataaction(struct cam_sim *sim, union ccb *ccb); static void atapoll(struct cam_sim *sim); static void ata_cam_begin_transaction(device_t dev, union ccb *ccb); static void ata_cam_end_transaction(device_t dev, struct ata_request *request); static void ata_cam_request_sense(device_t dev, struct ata_request *request); static int ata_check_ids(device_t dev, union ccb *ccb); static void ata_conn_event(void *context, int dummy); static void ata_interrupt_locked(void *data); static int ata_module_event_handler(module_t mod, int what, void *arg); static void ata_periodic_poll(void *data); static int ata_str2mode(const char *str); /* global vars */ MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; devclass_t ata_devclass; int ata_dma_check_80pin = 1; /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, CTLFLAG_RWTUN, &ata_dma_check_80pin, 0, "Check for 80pin cable before setting ATA DMA mode"); FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); /* * newbus device interface related functions */ int ata_probe(device_t dev) { return (BUS_PROBE_LOW_PRIORITY); } int ata_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int error, rid; struct cam_devq *devq; const char *res; char buf[64]; int i, mode; /* check that we have a virgin channel to attach */ if (ch->r_irq) return EEXIST; /* initialize the softc basics */ ch->dev = dev; ch->state = ATA_IDLE; bzero(&ch->state_mtx, sizeof(struct mtx)); mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); for (i = 0; i < 16; i++) { ch->user[i].revision = 0; snprintf(buf, sizeof(buf), "dev%d.sata_rev", i); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &mode) != 0 && resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &mode) != 0) mode = -1; if (mode >= 0) ch->user[i].revision = mode; ch->user[i].mode = 0; snprintf(buf, sizeof(buf), "dev%d.mode", i); if (resource_string_value(device_get_name(dev), device_get_unit(dev), buf, &res) == 0) mode = ata_str2mode(res); else if (resource_string_value(device_get_name(dev), device_get_unit(dev), "mode", &res) == 0) mode = ata_str2mode(res); else mode = -1; if (mode >= 0) ch->user[i].mode = mode; if (ch->flags & ATA_SATA) ch->user[i].bytecount = 8192; else ch->user[i].bytecount = MAXPHYS; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->flags & ATA_SATA) { if (ch->pm_level > 0) ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->pm_level > 1) ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; } else { if (!(ch->flags & ATA_NO_48BIT_DMA)) ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48; } } callout_init(&ch->poll_callout, 1); /* allocate DMA resources if DMA HW present*/ if (ch->dma.alloc) ch->dma.alloc(dev); /* setup interrupt delivery */ rid = ATA_IRQ_RID; ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!ch->r_irq) { device_printf(dev, "unable to allocate interrupt\n"); return ENXIO; } if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ata_interrupt, ch, &ch->ih))) { bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); device_printf(dev, "unable to setup interrupt\n"); return error; } if (ch->flags & ATA_PERIODIC_POLL) callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); mtx_lock(&ch->state_mtx); /* Create the device queue for our SIM. */ devq = cam_simq_alloc(1); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, device_get_unit(dev), &ch->state_mtx, 1, 0, devq); if (ch->sim == NULL) { device_printf(dev, "unable to allocate sim\n"); cam_simq_free(devq); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } mtx_unlock(&ch->state_mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); ch->sim = NULL; err1: bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); mtx_unlock(&ch->state_mtx); if (ch->flags & ATA_PERIODIC_POLL) callout_drain(&ch->poll_callout); return (error); } int ata_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* check that we have a valid channel to detach */ if (!ch->r_irq) return ENXIO; /* grap the channel lock so no new requests gets launched */ mtx_lock(&ch->state_mtx); ch->state |= ATA_STALL_QUEUE; mtx_unlock(&ch->state_mtx); if (ch->flags & ATA_PERIODIC_POLL) callout_drain(&ch->poll_callout); taskqueue_drain(taskqueue_thread, &ch->conntask); mtx_lock(&ch->state_mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); ch->sim = NULL; mtx_unlock(&ch->state_mtx); /* release resources */ bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ch->r_irq = NULL; /* free DMA resources if DMA HW present*/ if (ch->dma.free) ch->dma.free(dev); mtx_destroy(&ch->state_mtx); return 0; } static void ata_conn_event(void *context, int dummy) { device_t dev = (device_t)context; struct ata_channel *ch = device_get_softc(dev); union ccb *ccb; mtx_lock(&ch->state_mtx); if (ch->sim == NULL) { mtx_unlock(&ch->state_mtx); return; } ata_reinit(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); mtx_unlock(&ch->state_mtx); } int ata_reinit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; xpt_freeze_simq(ch->sim, 1); if ((request = ch->running)) { ch->running = NULL; if (ch->state == ATA_ACTIVE) ch->state = ATA_IDLE; callout_stop(&request->callout); if (ch->dma.unload) ch->dma.unload(request); request->result = ERESTART; ata_cam_end_transaction(dev, request); } /* reset the controller HW, the channel and device(s) */ ATA_RESET(dev); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); return(0); } int ata_suspend(device_t dev) { struct ata_channel *ch; /* check for valid device */ if (!dev || !(ch = device_get_softc(dev))) return ENXIO; if (ch->flags & ATA_PERIODIC_POLL) callout_drain(&ch->poll_callout); mtx_lock(&ch->state_mtx); xpt_freeze_simq(ch->sim, 1); while (ch->state != ATA_IDLE) msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); mtx_unlock(&ch->state_mtx); return(0); } int ata_resume(device_t dev) { struct ata_channel *ch; int error; /* check for valid device */ if (!dev || !(ch = device_get_softc(dev))) return ENXIO; mtx_lock(&ch->state_mtx); error = ata_reinit(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->state_mtx); if (ch->flags & ATA_PERIODIC_POLL) callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); return error; } void ata_interrupt(void *data) { struct ata_channel *ch = (struct ata_channel *)data; mtx_lock(&ch->state_mtx); ata_interrupt_locked(data); mtx_unlock(&ch->state_mtx); } static void ata_interrupt_locked(void *data) { struct ata_channel *ch = (struct ata_channel *)data; struct ata_request *request; /* ignore interrupt if its not for us */ if (ch->hw.status && !ch->hw.status(ch->dev)) return; /* do we have a running request */ if (!(request = ch->running)) return; ATA_DEBUG_RQ(request, "interrupt"); /* safetycheck for the right state */ if (ch->state == ATA_IDLE) { device_printf(request->dev, "interrupt on idle channel ignored\n"); return; } /* * we have the HW locks, so end the transaction for this request * if it finishes immediately otherwise wait for next interrupt */ if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; if (ch->state == ATA_ACTIVE) ch->state = ATA_IDLE; ata_cam_end_transaction(ch->dev, request); return; } } static void ata_periodic_poll(void *data) { struct ata_channel *ch = (struct ata_channel *)data; callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); ata_interrupt(ch); } void ata_print_cable(device_t dev, u_int8_t *who) { device_printf(dev, "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); } /* * misc support functions */ void ata_default_registers(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* fill in the defaults from whats setup already */ ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; } void ata_udelay(int interval) { /* for now just use DELAY, the timer/sleep subsystems are not there yet */ if (1 || interval < (1000000/hz) || ata_delayed_attach) DELAY(interval); else pause("ataslp", interval/(1000000/hz)); } const char * ata_cmd2str(struct ata_request *request) { static char buffer[20]; if (request->flags & ATA_R_ATAPI) { switch (request->u.atapi.sense.key ? request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) { case 0x00: return ("TEST_UNIT_READY"); case 0x01: return ("REZERO"); case 0x03: return ("REQUEST_SENSE"); case 0x04: return ("FORMAT"); case 0x08: return ("READ"); case 0x0a: return ("WRITE"); case 0x10: return ("WEOF"); case 0x11: return ("SPACE"); case 0x12: return ("INQUIRY"); case 0x15: return ("MODE_SELECT"); case 0x19: return ("ERASE"); case 0x1a: return ("MODE_SENSE"); case 0x1b: return ("START_STOP"); case 0x1e: return ("PREVENT_ALLOW"); case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); case 0x25: return ("READ_CAPACITY"); case 0x28: return ("READ_BIG"); case 0x2a: return ("WRITE_BIG"); case 0x2b: return ("LOCATE"); case 0x34: return ("READ_POSITION"); case 0x35: return ("SYNCHRONIZE_CACHE"); case 0x3b: return ("WRITE_BUFFER"); case 0x3c: return ("READ_BUFFER"); case 0x42: return ("READ_SUBCHANNEL"); case 0x43: return ("READ_TOC"); case 0x45: return ("PLAY_10"); case 0x47: return ("PLAY_MSF"); case 0x48: return ("PLAY_TRACK"); case 0x4b: return ("PAUSE"); case 0x51: return ("READ_DISK_INFO"); case 0x52: return ("READ_TRACK_INFO"); case 0x53: return ("RESERVE_TRACK"); case 0x54: return ("SEND_OPC_INFO"); case 0x55: return ("MODE_SELECT_BIG"); case 0x58: return ("REPAIR_TRACK"); case 0x59: return ("READ_MASTER_CUE"); case 0x5a: return ("MODE_SENSE_BIG"); case 0x5b: return ("CLOSE_TRACK/SESSION"); case 0x5c: return ("READ_BUFFER_CAPACITY"); case 0x5d: return ("SEND_CUE_SHEET"); case 0x96: return ("SERVICE_ACTION_IN"); case 0xa1: return ("BLANK_CMD"); case 0xa3: return ("SEND_KEY"); case 0xa4: return ("REPORT_KEY"); case 0xa5: return ("PLAY_12"); case 0xa6: return ("LOAD_UNLOAD"); case 0xad: return ("READ_DVD_STRUCTURE"); case 0xb4: return ("PLAY_CD"); case 0xbb: return ("SET_SPEED"); case 0xbd: return ("MECH_STATUS"); case 0xbe: return ("READ_CD"); case 0xff: return ("POLL_DSC"); } } else { switch (request->u.ata.command) { case 0x00: switch (request->u.ata.feature) { case 0x00: return ("NOP FLUSHQUEUE"); case 0x01: return ("NOP AUTOPOLL"); } return ("NOP"); case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR"); case 0x06: switch (request->u.ata.feature) { case 0x01: return ("DSM TRIM"); } return "DSM"; case 0x08: return ("DEVICE_RESET"); case 0x20: return ("READ"); case 0x24: return ("READ48"); case 0x25: return ("READ_DMA48"); case 0x26: return ("READ_DMA_QUEUED48"); case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); case 0x29: return ("READ_MUL48"); case 0x2a: return ("READ_STREAM_DMA48"); case 0x2b: return ("READ_STREAM48"); case 0x2f: return ("READ_LOG_EXT"); case 0x30: return ("WRITE"); case 0x34: return ("WRITE48"); case 0x35: return ("WRITE_DMA48"); case 0x36: return ("WRITE_DMA_QUEUED48"); case 0x37: return ("SET_MAX_ADDRESS48"); case 0x39: return ("WRITE_MUL48"); case 0x3a: return ("WRITE_STREAM_DMA48"); case 0x3b: return ("WRITE_STREAM48"); case 0x3d: return ("WRITE_DMA_FUA48"); case 0x3e: return ("WRITE_DMA_QUEUED_FUA48"); case 0x3f: return ("WRITE_LOG_EXT"); case 0x40: return ("READ_VERIFY"); case 0x42: return ("READ_VERIFY48"); case 0x45: switch (request->u.ata.feature) { case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO"); case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED"); } return "WRITE_UNCORRECTABLE48"; case 0x51: return ("CONFIGURE_STREAM"); case 0x60: return ("READ_FPDMA_QUEUED"); case 0x61: return ("WRITE_FPDMA_QUEUED"); case 0x63: return ("NCQ_NON_DATA"); case 0x64: return ("SEND_FPDMA_QUEUED"); case 0x65: return ("RECEIVE_FPDMA_QUEUED"); case 0x67: if (request->u.ata.feature == 0xec) return ("SEP_ATTN IDENTIFY"); switch (request->u.ata.lba) { case 0x00: return ("SEP_ATTN READ BUFFER"); case 0x02: return ("SEP_ATTN RECEIVE DIAGNOSTIC RESULTS"); case 0x80: return ("SEP_ATTN WRITE BUFFER"); case 0x82: return ("SEP_ATTN SEND DIAGNOSTIC"); } return ("SEP_ATTN"); case 0x70: return ("SEEK"); case 0x87: return ("CFA_TRANSLATE_SECTOR"); case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC"); case 0x92: return ("DOWNLOAD_MICROCODE"); case 0xa0: return ("PACKET"); case 0xa1: return ("ATAPI_IDENTIFY"); case 0xa2: return ("SERVICE"); case 0xb0: switch(request->u.ata.feature) { case 0xd0: return ("SMART READ ATTR VALUES"); case 0xd1: return ("SMART READ ATTR THRESHOLDS"); case 0xd3: return ("SMART SAVE ATTR VALUES"); case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE"); case 0xd5: return ("SMART READ LOG DATA"); case 0xd8: return ("SMART ENABLE OPERATION"); case 0xd9: return ("SMART DISABLE OPERATION"); case 0xda: return ("SMART RETURN STATUS"); } return ("SMART"); case 0xb1: return ("DEVICE CONFIGURATION"); case 0xc0: return ("CFA_ERASE"); case 0xc4: return ("READ_MUL"); case 0xc5: return ("WRITE_MUL"); case 0xc6: return ("SET_MULTI"); case 0xc7: return ("READ_DMA_QUEUED"); case 0xc8: return ("READ_DMA"); case 0xca: return ("WRITE_DMA"); case 0xcc: return ("WRITE_DMA_QUEUED"); case 0xcd: return ("CFA_WRITE_MULTIPLE_WITHOUT_ERASE"); case 0xce: return ("WRITE_MUL_FUA48"); case 0xd1: return ("CHECK_MEDIA_CARD_TYPE"); case 0xda: return ("GET_MEDIA_STATUS"); case 0xde: return ("MEDIA_LOCK"); case 0xdf: return ("MEDIA_UNLOCK"); case 0xe0: return ("STANDBY_IMMEDIATE"); case 0xe1: return ("IDLE_IMMEDIATE"); case 0xe2: return ("STANDBY"); case 0xe3: return ("IDLE"); case 0xe4: return ("READ_BUFFER/PM"); case 0xe5: return ("CHECK_POWER_MODE"); case 0xe6: return ("SLEEP"); case 0xe7: return ("FLUSHCACHE"); case 0xe8: return ("WRITE_PM"); case 0xea: return ("FLUSHCACHE48"); case 0xec: return ("ATA_IDENTIFY"); case 0xed: return ("MEDIA_EJECT"); case 0xef: switch (request->u.ata.feature) { case 0x03: return ("SETFEATURES SET TRANSFER MODE"); case 0x02: return ("SETFEATURES ENABLE WCACHE"); case 0x82: return ("SETFEATURES DISABLE WCACHE"); case 0x06: return ("SETFEATURES ENABLE PUIS"); case 0x86: return ("SETFEATURES DISABLE PUIS"); case 0x07: return ("SETFEATURES SPIN-UP"); case 0x10: return ("SETFEATURES ENABLE SATA FEATURE"); case 0x90: return ("SETFEATURES DISABLE SATA FEATURE"); case 0xaa: return ("SETFEATURES ENABLE RCACHE"); case 0x55: return ("SETFEATURES DISABLE RCACHE"); case 0x5d: return ("SETFEATURES ENABLE RELIRQ"); case 0xdd: return ("SETFEATURES DISABLE RELIRQ"); case 0x5e: return ("SETFEATURES ENABLE SRVIRQ"); case 0xde: return ("SETFEATURES DISABLE SRVIRQ"); } return "SETFEATURES"; case 0xf1: return ("SECURITY_SET_PASSWORD"); case 0xf2: return ("SECURITY_UNLOCK"); case 0xf3: return ("SECURITY_ERASE_PREPARE"); case 0xf4: return ("SECURITY_ERASE_UNIT"); case 0xf5: return ("SECURITY_FREEZE_LOCK"); case 0xf6: return ("SECURITY_DISABLE_PASSWORD"); case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); case 0xf9: return ("SET_MAX_ADDRESS"); } } sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); return (buffer); } const char * ata_mode2str(int mode) { switch (mode) { case -1: return "UNSUPPORTED"; case ATA_PIO0: return "PIO0"; case ATA_PIO1: return "PIO1"; case ATA_PIO2: return "PIO2"; case ATA_PIO3: return "PIO3"; case ATA_PIO4: return "PIO4"; case ATA_WDMA0: return "WDMA0"; case ATA_WDMA1: return "WDMA1"; case ATA_WDMA2: return "WDMA2"; case ATA_UDMA0: return "UDMA16"; case ATA_UDMA1: return "UDMA25"; case ATA_UDMA2: return "UDMA33"; case ATA_UDMA3: return "UDMA40"; case ATA_UDMA4: return "UDMA66"; case ATA_UDMA5: return "UDMA100"; case ATA_UDMA6: return "UDMA133"; case ATA_SA150: return "SATA150"; case ATA_SA300: return "SATA300"; case ATA_SA600: return "SATA600"; default: if (mode & ATA_DMA_MASK) return "BIOSDMA"; else return "BIOSPIO"; } } static int ata_str2mode(const char *str) { if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); return (-1); } int ata_atapi(device_t dev, int target) { struct ata_channel *ch = device_get_softc(dev); return (ch->devices & (ATA_ATAPI_MASTER << target)); } void ata_timeout(struct ata_request *request) { struct ata_channel *ch; ch = device_get_softc(request->parent); //request->flags |= ATA_R_DEBUG; ATA_DEBUG_RQ(request, "timeout"); /* * If we have an ATA_ACTIVE request running, we flag the request * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly. * Also, NULL out the running request so we wont loose the race with * an eventual interrupt arriving late. */ if (ch->state == ATA_ACTIVE) { request->flags |= ATA_R_TIMEOUT; if (ch->dma.unload) ch->dma.unload(request); ch->running = NULL; ch->state = ATA_IDLE; ata_cam_end_transaction(ch->dev, request); } mtx_unlock(&ch->state_mtx); } static void ata_cam_begin_transaction(device_t dev, union ccb *ccb) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; request = &ch->request; bzero(request, sizeof(*request)); /* setup request */ request->dev = NULL; request->parent = dev; request->unit = ccb->ccb_h.target_id; if (ccb->ccb_h.func_code == XPT_ATA_IO) { request->data = ccb->ataio.data_ptr; request->bytecount = ccb->ataio.dxfer_len; request->u.ata.command = ccb->ataio.cmd.command; request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | (uint16_t)ccb->ataio.cmd.features; request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | (uint16_t)ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { request->flags |= ATA_R_48BIT; request->u.ata.lba = ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); } else { request->u.ata.lba = ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); } request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | (uint64_t)ccb->ataio.cmd.lba_low; if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) request->flags |= ATA_R_NEEDRESULT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ccb->ataio.cmd.flags & CAM_ATAIO_DMA) request->flags |= ATA_R_DMA; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) request->flags |= ATA_R_READ; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) request->flags |= ATA_R_WRITE; if (ccb->ataio.cmd.command == ATA_READ_MUL || ccb->ataio.cmd.command == ATA_READ_MUL48 || ccb->ataio.cmd.command == ATA_WRITE_MUL || ccb->ataio.cmd.command == ATA_WRITE_MUL48) { request->transfersize = min(request->bytecount, ch->curr[ccb->ccb_h.target_id].bytecount); } else request->transfersize = min(request->bytecount, 512); } else { request->data = ccb->csio.data_ptr; request->bytecount = ccb->csio.dxfer_len; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, request->u.atapi.ccb, ccb->csio.cdb_len); request->flags |= ATA_R_ATAPI; if (ch->curr[ccb->ccb_h.target_id].atapi == 16) request->flags |= ATA_R_ATAPI16; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) request->flags |= ATA_R_DMA; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) request->flags |= ATA_R_READ; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) request->flags |= ATA_R_WRITE; request->transfersize = min(request->bytecount, ch->curr[ccb->ccb_h.target_id].bytecount); } request->retries = 0; request->timeout = (ccb->ccb_h.timeout + 999) / 1000; callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); request->ccb = ccb; request->flags |= ATA_R_DATA_IN_CCB; ch->running = request; ch->state = ATA_ACTIVE; if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; ch->state = ATA_IDLE; ata_cam_end_transaction(dev, request); return; } } static void ata_cam_request_sense(device_t dev, struct ata_request *request) { struct ata_channel *ch = device_get_softc(dev); union ccb *ccb = request->ccb; ch->requestsense = 1; bzero(request, sizeof(*request)); request->dev = NULL; request->parent = dev; request->unit = ccb->ccb_h.target_id; request->data = (void *)&ccb->csio.sense_data; request->bytecount = ccb->csio.sense_len; request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE; request->u.atapi.ccb[4] = ccb->csio.sense_len; request->flags |= ATA_R_ATAPI; if (ch->curr[ccb->ccb_h.target_id].atapi == 16) request->flags |= ATA_R_ATAPI16; if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) request->flags |= ATA_R_DMA; request->flags |= ATA_R_READ; request->transfersize = min(request->bytecount, ch->curr[ccb->ccb_h.target_id].bytecount); request->retries = 0; request->timeout = (ccb->ccb_h.timeout + 999) / 1000; callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); request->ccb = ccb; ch->running = request; ch->state = ATA_ACTIVE; if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { ch->running = NULL; ch->state = ATA_IDLE; ata_cam_end_transaction(dev, request); return; } } static void ata_cam_process_sense(device_t dev, struct ata_request *request) { struct ata_channel *ch = device_get_softc(dev); union ccb *ccb = request->ccb; int fatalerr = 0; ch->requestsense = 0; if (request->flags & ATA_R_TIMEOUT) fatalerr = 1; if ((request->flags & ATA_R_TIMEOUT) == 0 && (request->status & ATA_S_ERROR) == 0 && request->result == 0) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ccb); /* Do error recovery if needed. */ if (fatalerr) ata_reinit(dev); } static void ata_cam_end_transaction(device_t dev, struct ata_request *request) { struct ata_channel *ch = device_get_softc(dev); union ccb *ccb = request->ccb; int fatalerr = 0; if (ch->requestsense) { ata_cam_process_sense(dev, request); return; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (request->flags & ATA_R_TIMEOUT) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; fatalerr = 1; } else if (request->status & ATA_S_ERROR) { if (ccb->ccb_h.func_code == XPT_ATA_IO) { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } else { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } } else if (request->result == ERESTART) ccb->ccb_h.status |= CAM_REQUEUE_REQ; else if (request->result != 0) ccb->ccb_h.status |= CAM_REQ_CMP_ERR; else ccb->ccb_h.status |= CAM_REQ_CMP; if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } if (ccb->ccb_h.func_code == XPT_ATA_IO && ((request->status & ATA_S_ERROR) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { struct ata_res *res = &ccb->ataio.res; res->status = request->status; res->error = request->error; res->lba_low = request->u.ata.lba; res->lba_mid = request->u.ata.lba >> 8; res->lba_high = request->u.ata.lba >> 16; res->device = request->u.ata.lba >> 24; res->lba_low_exp = request->u.ata.lba >> 24; res->lba_mid_exp = request->u.ata.lba >> 32; res->lba_high_exp = request->u.ata.lba >> 40; res->sector_count = request->u.ata.count; res->sector_count_exp = request->u.ata.count >> 8; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (ccb->ccb_h.func_code == XPT_ATA_IO) { ccb->ataio.resid = ccb->ataio.dxfer_len - request->donecount; } else { ccb->csio.resid = ccb->csio.dxfer_len - request->donecount; } } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) ata_cam_request_sense(dev, request); else xpt_done(ccb); /* Do error recovery if needed. */ if (fatalerr) ata_reinit(dev); } static int ata_check_ids(device_t dev, union ccb *ccb) { struct ata_channel *ch = device_get_softc(dev); if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } /* * It's a programming error to see AUXILIARY register requests. */ KASSERT(ccb->ccb_h.func_code != XPT_ATA_IO || ((ccb->ataio.ata_flags & ATA_FLAG_AUX) == 0), ("AUX register unsupported")); return (0); } static void ataaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ata_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ata_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ata_check_ids(dev, ccb)) return; if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << ccb->ccb_h.target_id)) == 0) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (ch->running) device_printf(dev, "already running!\n"); if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { struct ata_res *res = &ccb->ataio.res; bzero(res, sizeof(*res)); if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { res->lba_high = 0; res->lba_mid = 0; } else { res->lba_high = 0xeb; res->lba_mid = 0x14; } ccb->ccb_h.status = CAM_REQ_CMP; break; } ata_cam_begin_transaction(dev, ccb); return; - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (ch->flags & ATA_SATA) { if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.sata.mode); } else d->mode = cts->xport_specific.sata.mode; } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; } else { if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.ata.mode); } else d->mode = cts->xport_specific.ata.mode; } if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) d->bytecount = cts->xport_specific.ata.bytecount; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) d->atapi = cts->xport_specific.ata.atapi; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) d->caps = cts->xport_specific.ata.caps; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; if (ch->flags & ATA_SATA) { cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.sata.valid = 0; cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.sata.revision = ATA_GETREV(dev, ccb->ccb_h.target_id); if (cts->xport_specific.sata.revision != 0xff) { cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; } cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; } cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; } else { cts->transport = XPORT_ATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.ata.valid = 0; cts->xport_specific.ata.mode = d->mode; cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; cts->xport_specific.ata.bytecount = d->bytecount; cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.ata.caps = d->caps & CTS_ATA_CAPS_D; if (!(ch->flags & ATA_NO_48BIT_DMA)) cts->xport_specific.ata.caps |= CTS_ATA_CAPS_H_DMA48; cts->xport_specific.ata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else cts->xport_specific.ata.caps = d->caps; cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; cts->xport_specific.ata.atapi = d->atapi; cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ata_reinit(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; if (ch->flags & ATA_NO_SLAVE) cpi->max_target = 0; else cpi->max_target = 1; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); if (ch->flags & ATA_SATA) cpi->base_transfer_speed = 150000; else cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); if (ch->flags & ATA_SATA) cpi->transport = XPORT_SATA; else cpi->transport = XPORT_ATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (device_get_devclass(device_get_parent(parent)) == devclass_find("pci")) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); } cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void atapoll(struct cam_sim *sim) { struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); ata_interrupt_locked(ch); } /* * module handeling */ static int ata_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return 0; case MOD_UNLOAD: return 0; default: return EOPNOTSUPP; } } static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(ata, 1); MODULE_DEPEND(ata, cam, 1, 1, 1); Index: stable/11/sys/dev/buslogic/bt.c =================================================================== --- stable/11/sys/dev/buslogic/bt.c (revision 314380) +++ stable/11/sys/dev/buslogic/bt.c (revision 314381) @@ -1,2393 +1,2389 @@ /*- * Generic driver for the BusLogic MultiMaster SCSI host adapters * Product specific probe and attach routines can be found in: * sys/dev/buslogic/bt_isa.c BT-54X, BT-445 cards * sys/dev/buslogic/bt_mca.c BT-64X, SDC3211B, SDC3211F * sys/dev/buslogic/bt_eisa.c BT-74X, BT-75x cards, SDC3222F * sys/dev/buslogic/bt_pci.c BT-946, BT-948, BT-956, BT-958 cards * * Copyright (c) 1998, 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Special thanks to Leonard N. Zubkoff for writing such a complete and * well documented Mylex/BusLogic MultiMaster driver for Linux. Support * in this driver for the wide range of MultiMaster controllers and * firmware revisions, with their otherwise undocumented quirks, would not * have been possible without his efforts. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* MailBox Management functions */ static __inline void btnextinbox(struct bt_softc *bt); static __inline void btnextoutbox(struct bt_softc *bt); static __inline void btnextinbox(struct bt_softc *bt) { if (bt->cur_inbox == bt->last_inbox) bt->cur_inbox = bt->in_boxes; else bt->cur_inbox++; } static __inline void btnextoutbox(struct bt_softc *bt) { if (bt->cur_outbox == bt->last_outbox) bt->cur_outbox = bt->out_boxes; else bt->cur_outbox++; } /* CCB Mangement functions */ static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct bt_ccb* btccbptov(struct bt_softc *bt, u_int32_t ccb_addr); static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct scsi_sense_data* btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb) { return (bt->bt_ccb_physbase + (u_int32_t)((caddr_t)bccb - (caddr_t)bt->bt_ccb_array)); } static __inline struct bt_ccb * btccbptov(struct bt_softc *bt, u_int32_t ccb_addr) { return (bt->bt_ccb_array + ((struct bt_ccb*)(uintptr_t)ccb_addr - (struct bt_ccb*)(uintptr_t)bt->bt_ccb_physbase)); } static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers_physbase + (index * sizeof(struct scsi_sense_data))); } static __inline struct scsi_sense_data * btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers + index); } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt); static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb); static void btallocccbs(struct bt_softc *bt); static bus_dmamap_callback_t btexecuteccb; static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code); static void bt_intr_locked(struct bt_softc *bt); /* Host adapter command functions */ static int btreset(struct bt_softc* bt, int hard_reset); /* Initialization functions */ static int btinitmboxes(struct bt_softc *bt); static bus_dmamap_callback_t btmapmboxes; static bus_dmamap_callback_t btmapccbs; static bus_dmamap_callback_t btmapsgs; /* Transfer Negotiation Functions */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_bccb_ptr spriv_ptr0 #define ccb_bt_ptr spriv_ptr1 static void btaction(struct cam_sim *sim, union ccb *ccb); static void btpoll(struct cam_sim *sim); /* Our timeout handler */ static void bttimeout(void *arg); /* * XXX * Do our own re-probe protection until a configuration * manager can do it for us. This ensures that we don't * reprobe a card already found by the EISA or PCI probes. */ struct bt_isa_port bt_isa_ports[] = { { 0x130, 0, 4 }, { 0x134, 0, 5 }, { 0x230, 0, 2 }, { 0x234, 0, 3 }, { 0x330, 0, 0 }, { 0x334, 0, 1 } }; /* * I/O ports listed in the order enumerated by the * card for certain op codes. */ u_int16_t bt_board_ports[] = { 0x330, 0x334, 0x230, 0x234, 0x130, 0x134 }; /* Exported functions */ void bt_init_softc(device_t dev, struct resource *port, struct resource *irq, struct resource *drq) { struct bt_softc *bt = device_get_softc(dev); SLIST_INIT(&bt->free_bt_ccbs); LIST_INIT(&bt->pending_ccbs); SLIST_INIT(&bt->sg_maps); bt->dev = dev; bt->port = port; bt->irq = irq; bt->drq = drq; mtx_init(&bt->lock, "bt", NULL, MTX_DEF); } void bt_free_softc(device_t dev) { struct bt_softc *bt = device_get_softc(dev); switch (bt->init_level) { default: case 11: bus_dmamap_unload(bt->sense_dmat, bt->sense_dmamap); case 10: bus_dmamem_free(bt->sense_dmat, bt->sense_buffers, bt->sense_dmamap); case 9: bus_dma_tag_destroy(bt->sense_dmat); case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&bt->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&bt->sg_maps, links); bus_dmamap_unload(bt->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(bt->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(bt->sg_dmat); } case 7: bus_dmamap_unload(bt->ccb_dmat, bt->ccb_dmamap); /* FALLTHROUGH */ case 6: bus_dmamem_free(bt->ccb_dmat, bt->bt_ccb_array, bt->ccb_dmamap); /* FALLTHROUGH */ case 5: bus_dma_tag_destroy(bt->ccb_dmat); /* FALLTHROUGH */ case 4: bus_dmamap_unload(bt->mailbox_dmat, bt->mailbox_dmamap); /* FALLTHROUGH */ case 3: bus_dmamem_free(bt->mailbox_dmat, bt->in_boxes, bt->mailbox_dmamap); /* FALLTHROUGH */ case 2: bus_dma_tag_destroy(bt->buffer_dmat); /* FALLTHROUGH */ case 1: bus_dma_tag_destroy(bt->mailbox_dmat); /* FALLTHROUGH */ case 0: break; } mtx_destroy(&bt->lock); } int bt_port_probe(device_t dev, struct bt_probe_info *info) { struct bt_softc *bt = device_get_softc(dev); config_data_t config_data; int error; /* See if there is really a card present */ if (bt_probe(dev) || bt_fetch_adapter_info(dev)) return(1); /* * Determine our IRQ, and DMA settings and * export them to the configuration system. */ mtx_lock(&bt->lock); error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { printf("bt_port_probe: Could not determine IRQ or DMA " "settings for adapter.\n"); return (1); } if (bt->model[0] == '5') { /* DMA settings only make sense for ISA cards */ switch (config_data.dma_chan) { case DMA_CHAN_5: info->drq = 5; break; case DMA_CHAN_6: info->drq = 6; break; case DMA_CHAN_7: info->drq = 7; break; default: printf("bt_port_probe: Invalid DMA setting " "detected for adapter.\n"); return (1); } } else { /* VL/EISA/PCI DMA */ info->drq = -1; } switch (config_data.irq) { case IRQ_9: case IRQ_10: case IRQ_11: case IRQ_12: case IRQ_14: case IRQ_15: info->irq = ffs(config_data.irq) + 8; break; default: printf("bt_port_probe: Invalid IRQ setting %x" "detected for adapter.\n", config_data.irq); return (1); } return (0); } /* * Probe the adapter and verify that the card is a BusLogic. */ int bt_probe(device_t dev) { struct bt_softc *bt = device_get_softc(dev); esetup_info_data_t esetup_info; u_int status; u_int intstat; u_int geometry; int error; u_int8_t param; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = bt_inb(bt, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY| STATUS_REG_RSVD|CMD_INVALID)) != 0) { if (bootverbose) device_printf(dev, "Failed Status Reg Test - %x\n", status); return (ENXIO); } intstat = bt_inb(bt, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { device_printf(dev, "Failed Intstat Reg Test\n"); return (ENXIO); } geometry = bt_inb(bt, GEOMETRY_REG); if (geometry == 0xFF) { if (bootverbose) device_printf(dev, "Failed Geometry Reg Test\n"); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and attempt to fetch the extended setup * information. This should filter out all 1542 cards. */ mtx_lock(&bt->lock); if ((error = btreset(bt, /*hard_reset*/TRUE)) != 0) { mtx_unlock(&bt->lock); if (bootverbose) device_printf(dev, "Failed Reset\n"); return (ENXIO); } param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, ¶m, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { return (ENXIO); } return (0); } /* * Pull the boards setup information and record it in our softc. */ int bt_fetch_adapter_info(device_t dev) { struct bt_softc *bt = device_get_softc(dev); board_id_data_t board_id; esetup_info_data_t esetup_info; config_data_t config_data; int error; u_int8_t length_param; /* First record the firmware version */ mtx_lock(&bt->lock); error = bt_cmd(bt, BOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (u_int8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get Board Info\n"); return (error); } bt->firmware_ver[0] = board_id.firmware_rev_major; bt->firmware_ver[1] = '.'; bt->firmware_ver[2] = board_id.firmware_rev_minor; bt->firmware_ver[3] = '\0'; /* * Depending on the firmware major and minor version, * we may be able to fetch additional minor version info. */ if (bt->firmware_ver[0] > '0') { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_3DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[3], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 3rd Digit\n"); return (error); } if (bt->firmware_ver[3] == ' ') bt->firmware_ver[3] = '\0'; bt->firmware_ver[4] = '\0'; } if (strcmp(bt->firmware_ver, "3.3") >= 0) { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_4DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[4], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 4th Digit\n"); return (error); } if (bt->firmware_ver[4] == ' ') bt->firmware_ver[4] = '\0'; bt->firmware_ver[5] = '\0'; } /* * Some boards do not handle the "recently documented" * Inquire Board Model Number command correctly or do not give * exact information. Use the Firmware and Extended Setup * information in these cases to come up with the right answer. * The major firmware revision number indicates: * * 5.xx BusLogic "W" Series Host Adapters: * BT-948/958/958D * 4.xx BusLogic "C" Series Host Adapters: * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF * 3.xx BusLogic "S" Series Host Adapters: * BT-747S/747D/757S/757D/445S/545S/542D * BT-542B/742A (revision H) * 2.xx BusLogic "A" Series Host Adapters: * BT-542B/742A (revision G and below) * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter */ length_param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, &length_param, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); return (error); } bt->bios_addr = esetup_info.bios_addr << 12; bt->mailbox_addrlimit = BUS_SPACE_MAXADDR; if (esetup_info.bus_type == 'A' && bt->firmware_ver[0] == '2') { snprintf(bt->model, sizeof(bt->model), "542B"); } else if (esetup_info.bus_type == 'E' && bt->firmware_ver[0] == '2') { /* * The 742A seems to object if its mailboxes are * allocated above the 16MB mark. */ bt->mailbox_addrlimit = BUS_SPACE_MAXADDR_24BIT; snprintf(bt->model, sizeof(bt->model), "742A"); } else if (esetup_info.bus_type == 'E' && bt->firmware_ver[0] == '0') { /* AMI FastDisk EISA Series 441 0.x */ snprintf(bt->model, sizeof(bt->model), "747A"); } else { ha_model_data_t model_data; int i; length_param = sizeof(model_data); error = bt_cmd(bt, BOP_INQUIRE_MODEL, &length_param, 1, (u_int8_t*)&model_data, sizeof(model_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed Inquire " "Model Number\n"); return (error); } for (i = 0; i < sizeof(model_data.ascii_model); i++) { bt->model[i] = model_data.ascii_model[i]; if (bt->model[i] == ' ') break; } bt->model[i] = '\0'; } bt->level_trigger_ints = esetup_info.level_trigger_ints ? 1 : 0; /* SG element limits */ bt->max_sg = esetup_info.max_sg; /* Set feature flags */ bt->wide_bus = esetup_info.wide_bus; bt->diff_bus = esetup_info.diff_bus; bt->ultra_scsi = esetup_info.ultra_scsi; if ((bt->firmware_ver[0] == '5') || (bt->firmware_ver[0] == '4' && bt->wide_bus)) bt->extended_lun = TRUE; bt->strict_rr = (strcmp(bt->firmware_ver, "3.31") >= 0); bt->extended_trans = ((bt_inb(bt, GEOMETRY_REG) & EXTENDED_TRANSLATION) != 0); /* * Determine max CCB count and whether tagged queuing is * available based on controller type. Tagged queuing * only works on 'W' series adapters, 'C' series adapters * with firmware of rev 4.42 and higher, and 'S' series * adapters with firmware of rev 3.35 and higher. The * maximum CCB counts are as follows: * * 192 BT-948/958/958D * 100 BT-946C/956C/956CD/747C/757C/757CD/445C * 50 BT-545C/540CF * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A */ if (bt->firmware_ver[0] == '5') { bt->max_ccbs = 192; bt->tag_capable = TRUE; } else if (bt->firmware_ver[0] == '4') { if (bt->model[0] == '5') bt->max_ccbs = 50; else bt->max_ccbs = 100; bt->tag_capable = (strcmp(bt->firmware_ver, "4.22") >= 0); } else { bt->max_ccbs = 30; if (bt->firmware_ver[0] == '3' && (strcmp(bt->firmware_ver, "3.35") >= 0)) bt->tag_capable = TRUE; else bt->tag_capable = FALSE; } if (bt->tag_capable != FALSE) bt->tags_permitted = ALL_TARGETS; /* Determine Sync/Wide/Disc settings */ if (bt->firmware_ver[0] >= '4') { auto_scsi_data_t auto_scsi_data; fetch_lram_params_t fetch_lram_params; int error; /* * These settings are stored in the * AutoSCSI data in LRAM of 'W' and 'C' * adapters. */ fetch_lram_params.offset = AUTO_SCSI_BYTE_OFFSET; fetch_lram_params.response_len = sizeof(auto_scsi_data); error = bt_cmd(bt, BOP_FETCH_LRAM, (u_int8_t*)&fetch_lram_params, sizeof(fetch_lram_params), (u_int8_t*)&auto_scsi_data, sizeof(auto_scsi_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Auto SCSI Info\n"); return (error); } bt->disc_permitted = auto_scsi_data.low_disc_permitted | (auto_scsi_data.high_disc_permitted << 8); bt->sync_permitted = auto_scsi_data.low_sync_permitted | (auto_scsi_data.high_sync_permitted << 8); bt->fast_permitted = auto_scsi_data.low_fast_permitted | (auto_scsi_data.high_fast_permitted << 8); bt->ultra_permitted = auto_scsi_data.low_ultra_permitted | (auto_scsi_data.high_ultra_permitted << 8); bt->wide_permitted = auto_scsi_data.low_wide_permitted | (auto_scsi_data.high_wide_permitted << 8); if (bt->ultra_scsi == FALSE) bt->ultra_permitted = 0; if (bt->wide_bus == FALSE) bt->wide_permitted = 0; } else { /* * 'S' and 'A' series have this information in the setup * information structure. */ setup_data_t setup_info; length_param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { mtx_unlock(&bt->lock); device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { bt->sync_permitted = ALL_TARGETS; if (bt->model[0] == '7') { if (esetup_info.sync_neg10MB != 0) bt->fast_permitted = ALL_TARGETS; if (strcmp(bt->model, "757") == 0) bt->wide_permitted = ALL_TARGETS; } } bt->disc_permitted = ALL_TARGETS; } /* We need as many mailboxes as we can have ccbs */ bt->num_boxes = bt->max_ccbs; /* Determine our SCSI ID */ error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); mtx_unlock(&bt->lock); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get Config\n"); return (error); } bt->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int bt_init(device_t dev) { struct bt_softc *bt = device_get_softc(dev); /* Announce the Adapter */ device_printf(dev, "BT-%s FW Rev. %s ", bt->model, bt->firmware_ver); if (bt->ultra_scsi != 0) printf("Ultra "); if (bt->wide_bus != 0) printf("Wide "); else printf("Narrow "); if (bt->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", bt->scsi_id, bt->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ DFLTPHYS, /* nsegments */ BT_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &bt->lock, &bt->buffer_dmat) != 0) { goto error_exit; } bt->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ bt->mailbox_addrlimit, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->mailbox_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(bt->mailbox_dmat, (void **)&bt->out_boxes, BUS_DMA_NOWAIT, &bt->mailbox_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->mailbox_dmat, bt->mailbox_dmamap, bt->out_boxes, bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), btmapmboxes, bt, /*flags*/0); bt->init_level++; bt->in_boxes = (bt_mbox_in_t *)&bt->out_boxes[bt->num_boxes]; mtx_lock(&bt->lock); btinitmboxes(bt); mtx_unlock(&bt->lock); /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ bt->max_ccbs * sizeof(struct bt_ccb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->ccb_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(bt->ccb_dmat, (void **)&bt->bt_ccb_array, BUS_DMA_NOWAIT, &bt->ccb_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->ccb_dmat, bt->ccb_dmamap, bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb), btmapccbs, bt, /*flags*/0); bt->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create( /* parent */ bt->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ PAGE_SIZE, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &bt->sg_dmat) != 0) { goto error_exit; } bt->init_level++; /* Perform initial CCB allocation */ bzero(bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb)); btallocccbs(bt); if (bt->num_ccbs == 0) { device_printf(dev, "bt_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to attach) */ return 0; error_exit: return (ENXIO); } int bt_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int tagged_dev_openings; struct cam_devq *devq; int error; /* * We reserve 1 ccb for error recovery, so don't * tell the XPT about it. */ if (bt->tag_capable != 0) tagged_dev_openings = bt->max_ccbs - 1; else tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(bt->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ bt->sim = cam_sim_alloc(btaction, btpoll, "bt", bt, device_get_unit(bt->dev), &bt->lock, 2, tagged_dev_openings, devq); if (bt->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } mtx_lock(&bt->lock); if (xpt_bus_register(bt->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(bt->sim, /*free_devq*/TRUE); mtx_unlock(&bt->lock); return (ENXIO); } if (xpt_create_path(&bt->path, /*periph*/NULL, cam_sim_path(bt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(bt->sim)); cam_sim_free(bt->sim, /*free_devq*/TRUE); mtx_unlock(&bt->lock); return (ENXIO); } mtx_unlock(&bt->lock); /* * Setup interrupt. */ error = bus_setup_intr(dev, bt->irq, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, bt_intr, bt, &bt->ih); if (error) { device_printf(dev, "bus_setup_intr() failed: %d\n", error); return (error); } return (0); } int bt_check_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (bt_isa_ports[i].addr == ioport) { if (bt_isa_ports[i].probed != 0) return (1); else { return (0); } } } return (1); } void bt_mark_probed_bio(isa_compat_io_t port) { if (port < BIO_DISABLED) bt_mark_probed_iop(bt_board_ports[port]); } void bt_mark_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (ioport == bt_isa_ports[i].addr) { bt_isa_ports[i].probed = 1; break; } } } void bt_find_probe_range(int ioport, int *port_index, int *max_port_index) { if (ioport > 0) { int i; for (i = 0;i < BT_NUM_ISAPORTS; i++) if (ioport <= bt_isa_ports[i].addr) break; if ((i >= BT_NUM_ISAPORTS) || (ioport != bt_isa_ports[i].addr)) { printf( "bt_find_probe_range: Invalid baseport of 0x%x specified.\n" "bt_find_probe_range: Nearest valid baseport is 0x%x.\n" "bt_find_probe_range: Failing probe.\n", ioport, (i < BT_NUM_ISAPORTS) ? bt_isa_ports[i].addr : bt_isa_ports[BT_NUM_ISAPORTS - 1].addr); *port_index = *max_port_index = -1; return; } *port_index = *max_port_index = bt_isa_ports[i].bio; } else { *port_index = 0; *max_port_index = BT_NUM_ISAPORTS - 1; } } int bt_iop_from_bio(isa_compat_io_t bio_index) { if (bio_index < BT_NUM_ISAPORTS) return (bt_board_ports[bio_index]); return (-1); } static void btallocccbs(struct bt_softc *bt) { struct bt_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; bt_sg_t *segs; int newcount; int i; if (bt->num_ccbs >= bt->max_ccbs) /* Can't allocate any more */ return; next_ccb = &bt->bt_ccb_array[bt->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) goto error_exit; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(bt->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); goto error_exit; } SLIST_INSERT_HEAD(&bt->sg_maps, sg_map, links); bus_dmamap_load(bt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, btmapsgs, bt, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (BT_NSEG * sizeof(bt_sg_t))); for (i = 0; bt->num_ccbs < bt->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = BCCB_FREE; callout_init_mtx(&next_ccb->timer, &bt->lock, 0); error = bus_dmamap_create(bt->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, next_ccb, links); segs += BT_NSEG; physaddr += (BT_NSEG * sizeof(bt_sg_t)); next_ccb++; bt->num_ccbs++; } /* Reserve a CCB for error recovery */ if (bt->recovery_bccb == NULL) { bt->recovery_bccb = SLIST_FIRST(&bt->free_bt_ccbs); SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); } if (SLIST_FIRST(&bt->free_bt_ccbs) != NULL) return; error_exit: device_printf(bt->dev, "Can't malloc BCCBs\n"); } static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb) { if (!dumping) mtx_assert(&bt->lock, MA_OWNED); if ((bccb->flags & BCCB_ACTIVE) != 0) LIST_REMOVE(&bccb->ccb->ccb_h, sim_links.le); if (bt->resource_shortage != 0 && (bccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { bccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; bt->resource_shortage = FALSE; } bccb->flags = BCCB_FREE; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, bccb, links); bt->active_ccbs--; } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt) { struct bt_ccb* bccb; if (!dumping) mtx_assert(&bt->lock, MA_OWNED); if ((bccb = SLIST_FIRST(&bt->free_bt_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } else { btallocccbs(bt); bccb = SLIST_FIRST(&bt->free_bt_ccbs); if (bccb != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } } return (bccb); } static void btaction(struct cam_sim *sim, union ccb *ccb) { struct bt_softc *bt; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("btaction\n")); bt = (struct bt_softc *)cam_sim_softc(sim); mtx_assert(&bt->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct bt_ccb *bccb; struct bt_hccb *hccb; /* * get a bccb to use. */ if ((bccb = btgetccb(bt)) == NULL) { bt->resource_shortage = TRUE; xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &bccb->hccb; /* * So we can find the BCCB when an abort is requested */ bccb->ccb = ccb; ccb->ccb_h.ccb_bccb_ptr = bccb; ccb->ccb_h.ccb_bt_ptr = bt; /* * Put all the arguments for the xfer in the bccb */ hccb->target_id = ccb->ccb_h.target_id; hccb->target_lun = ccb->ccb_h.target_lun; hccb->btstat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; int error; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = INITIATOR_CCB_WRESID; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; hccb->dataout =(ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_TAG_ACTION_VALID) != 0 && ccb->csio.tag_action != CAM_TAG_ACTION_NONE) { hccb->tag_enable = TRUE; hccb->tag_type = (ccb->csio.tag_action & 0x3); } else { hccb->tag_enable = FALSE; hccb->tag_type = 0; } if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* If need be, bounce our sense buffer */ if (bt->sense_buffers != NULL) { hccb->sense_addr = btsensepaddr(bt, bccb); } else { hccb->sense_addr = vtophys(&csio->sense_data); } /* * If we have any data to send with this command, * map it into bus space. */ error = bus_dmamap_load_ccb( bt->buffer_dmat, bccb->dmamap, ccb, btexecuteccb, bccb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the * controller queue until our mapping is * returned. */ xpt_freeze_simq(bt->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; hccb->tag_enable = FALSE; hccb->tag_type = 0; btexecuteccb(bccb, NULL, 0, 0); } break; } - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((bt->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((bt->tags_permitted & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if ((bt->ultra_permitted & target_mask) != 0) spi->sync_period = 12; else if ((bt->fast_permitted & target_mask) != 0) spi->sync_period = 25; else if ((bt->sync_permitted & target_mask) != 0) spi->sync_period = 50; else spi->sync_period = 0; if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if ((bt->wide_permitted & target_mask) != 0) spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; } else { btfetchtransinfo(bt, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (bt->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { btreset(bt, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (bt->tag_capable != 0) cpi->hba_inquiry |= PI_TAG_ABLE; if (bt->wide_bus != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = bt->wide_bus ? 15 : 7; cpi->max_lun = 7; cpi->initiator_id = bt->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "BusLogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void btexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; if (error != 0) { if (error != EFBIG) device_printf(bt->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } btfreeccb(bt, bccb); xpt_done(ccb); return; } if (nseg != 0) { bt_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = bccb->sg_list; while (dm_segs < end_seg) { sg->len = dm_segs->ds_len; sg->addr = dm_segs->ds_addr; sg++; dm_segs++; } if (nseg > 1) { bccb->hccb.opcode = INITIATOR_SG_CCB_WRESID; bccb->hccb.data_len = sizeof(bt_sg_t) * nseg; bccb->hccb.data_addr = bccb->sg_list_phys; } else { bccb->hccb.data_len = bccb->sg_list->len; bccb->hccb.data_addr = bccb->sg_list->addr; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); } else { bccb->hccb.opcode = INITIATOR_CCB; bccb->hccb.data_len = 0; bccb->hccb.data_addr = 0; } /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); xpt_done(ccb); return; } bccb->flags = BCCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&bt->pending_ccbs, &ccb->ccb_h, sim_links.le); callout_reset_sbt(&bccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, bttimeout, bccb, 0); /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bccb); if (bt->cur_outbox->action_code != BMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(bt->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!\n", bt->active_ccbs, bt->max_ccbs); callout_stop(&bccb->timer); if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); bt->resource_shortage = TRUE; xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); } void bt_intr(void *arg) { struct bt_softc *bt; bt = arg; mtx_lock(&bt->lock); bt_intr_locked(bt); mtx_unlock(&bt->lock); } void bt_intr_locked(struct bt_softc *bt) { u_int intstat; while (((intstat = bt_inb(bt, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { bt->latched_status = bt_inb(bt, STATUS_REG); bt->command_cmp = TRUE; } bt_outb(bt, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (bt->cur_inbox->comp_code != BMBI_FREE) { btdone(bt, btccbptov(bt, bt->cur_inbox->ccb_addr), bt->cur_inbox->comp_code); bt->cur_inbox->comp_code = BMBI_FREE; btnextinbox(bt); } } if ((intstat & SCSI_BUS_RESET) != 0) { btreset(bt, /*hardreset*/FALSE); } } } static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = bccb->ccb; csio = &bccb->ccb->csio; if ((bccb->flags & BCCB_ACTIVE) == 0) { device_printf(bt->dev, "btdone - Attempt to free non-active BCCB %p\n", (void *)bccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); } if (bccb == bt->recovery_bccb) { /* * The recovery BCCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occurred */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(bt->sim), bccb->hccb.target_id, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; if (pending_bccb->hccb.target_id == bccb->hccb.target_id) { pending_bccb->hccb.btstat = BTSTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); btdone(bt, pending_bccb, BMBI_ERROR); } else { callout_reset_sbt(&pending_bccb->timer, SBT_1MS * ccb_h->timeout, 0, bttimeout, pending_bccb, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(bt->dev, "No longer in timeout\n"); return; } callout_stop(&bccb->timer); switch (comp_code) { case BMBI_FREE: device_printf(bt->dev, "btdone - CCB completed with free status!\n"); break; case BMBI_NOT_FOUND: device_printf(bt->dev, "btdone - CCB Abort failed to find CCB\n"); break; case BMBI_ABORT: case BMBI_ERROR: if (bootverbose) { printf("bt: ccb %p - error %x occurred. " "btstat = %x, sdstat = %x\n", (void *)bccb, comp_code, bccb->hccb.btstat, bccb->hccb.sdstat); } /* An error occurred */ switch(bccb->hccb.btstat) { case BTSTAT_DATARUN_ERROR: if (bccb->hccb.data_len == 0) { /* * At least firmware 4.22, does this * for a QUEUE FULL condition. */ bccb->hccb.sdstat = SCSI_STATUS_QUEUE_FULL; } else if (bccb->hccb.data_len < 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ case BTSTAT_NOERROR: case BTSTAT_LINKED_CMD_COMPLETE: case BTSTAT_LINKED_CMD_FLAG_COMPLETE: case BTSTAT_DATAUNDERUN_ERROR: csio->scsi_status = bccb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* Bounce sense back if necessary */ if (bt->sense_buffers != NULL) { csio->sense_data = *btsensevaddr(bt, bccb); } break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } csio->resid = bccb->hccb.data_len; break; case BTSTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case BTSTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case BTSTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case BTSTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", bt_name(bt)); break; case BTSTAT_INVALID_OPCODE: panic("%s: Inavlid CCB Opcode code", bt_name(bt)); break; case BTSTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", bt_name(bt)); break; case BTSTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", bt_name(bt)); break; case BTSTAT_AUTOSENSE_FAILED: csio->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case BTSTAT_TAGGED_MSG_REJECTED: { struct ccb_trans_settings neg; struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = 0; xpt_print_path(csio->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); xpt_setup_ccb(&neg.ccb_h, csio->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, csio->ccb_h.path, &neg); bt->tags_permitted &= ~(0x01 << csio->ccb_h.target_id); csio->ccb_h.status = CAM_MSG_REJECT_REC; break; } case BTSTAT_UNSUPPORTED_MSG_RECEIVED: /* * XXX You would think that this is * a recoverable error... Hmmm. */ csio->ccb_h.status = CAM_REQ_CMP_ERR; break; case BTSTAT_HA_SOFTWARE_ERROR: case BTSTAT_HA_WATCHDOG_ERROR: case BTSTAT_HARDWARE_FAILURE: /* Hardware reset ??? Can we recover ??? */ csio->ccb_h.status = CAM_NO_HBA; break; case BTSTAT_TARGET_IGNORED_ATN: case BTSTAT_OTHER_SCSI_BUS_RESET: case BTSTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case BTSTAT_HA_BDR: if ((bccb->flags & BCCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; case BTSTAT_INVALID_RECONNECT: case BTSTAT_ABORT_QUEUE_GENERATED: csio->ccb_h.status = CAM_REQ_TERMIO; break; case BTSTAT_SCSI_PERROR_DETECTED: csio->ccb_h.status = CAM_UNCOR_PARITY; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; case BMBI_OK: /* All completed without incident */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; } } static int btreset(struct bt_softc* bt, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; u_int8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; bt_outb(bt, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { if (bootverbose) device_printf(bt->dev, "btreset - Diagnostic Active failed to " "assert. status = 0x%x\n", status); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: btreset - Diagnostic Active failed to drop. " "status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { device_printf(bt->dev, "btreset - Host adapter failed to come ready. " "status = 0x%x\n", status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { device_printf(bt->dev, "btreset - Adapter failed diagnostics\n"); if ((status & DATAIN_REG_READY) != 0) device_printf(bt->dev, "btreset - Host Adapter Error code = 0x%x\n", bt_inb(bt, DATAIN_REG)); return (ENXIO); } /* If we've allocated mailboxes, initialize them */ if (bt->init_level > 4) btinitmboxes(bt); /* If we've attached to the XPT, tell it about the event */ if (bt->path != NULL) xpt_async(AC_BUS_RESET, bt->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&bt->pending_ccbs)) != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; pending_bccb->hccb.btstat = BTSTAT_HA_SCSI_BUS_RESET; btdone(bt, pending_bccb, BMBI_ERROR); } return (0); } /* * Send a command to the adapter. */ int bt_cmd(struct bt_softc *bt, bt_op_t opcode, u_int8_t *params, u_int param_len, u_int8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; bt->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)bt_inb(bt, DATAIN_REG); DELAY(100); } if (timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout waiting for adapter ready, " "status = 0x%x\n", status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ bt_outb(bt, COMMAND_REG, opcode); /* * Wait for up to 1sec for each byte of the * parameter list sent to be sent. */ timeout = 10000; while (param_len && --timeout) { DELAY(100); status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (bt->command_cmp != 0) { saved_status = bt->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { bt_outb(bt, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout sending parameters, " "status = 0x%x\n", status); cmd_complete = 1; saved_status = status; error = ETIMEDOUT; } /* * Wait for the command to complete. */ while (cmd_complete == 0 && --cmd_timeout) { status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); /* * It may be that this command was issued with * controller interrupts disabled. We'll never * get to our command if an incoming mailbox * interrupt is pending, so take care of completed * mailbox commands by calling our interrupt handler. */ if ((intstat & (INTR_PENDING|IMB_LOADED)) == (INTR_PENDING|IMB_LOADED)) bt_intr_locked(bt); if (bt->command_cmp != 0) { /* * Our interrupt handler saw CMD_COMPLETE * status before we did. */ cmd_complete = 1; saved_status = bt->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } else if (opcode == BOP_MODIFY_IO_ADDR && (status & CMD_REG_BUSY) == 0) { /* * The BOP_MODIFY_IO_ADDR does not issue a CMD_COMPLETE, * but it should update the status register. So, we * consider this command complete when the CMD_REG_BUSY * status clears. */ saved_status = status; cmd_complete = 1; } else if ((status & DATAIN_REG_READY) != 0) { u_int8_t data; data = bt_inb(bt, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { device_printf(bt->dev, "bt_cmd - Discarded reply data byte " "for opcode 0x%x\n", opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } else if ((opcode == BOP_FETCH_LRAM) && (status & HA_READY) != 0) { saved_status = status; cmd_complete = 1; } DELAY(100); } if (cmd_timeout == 0) { device_printf(bt->dev, "bt_cmd: Timeout waiting for command (%x) " "to complete.\n", opcode); device_printf(bt->dev, "status = 0x%x, intstat = 0x%x, " "rlen %d\n", status, intstat, reply_len); error = (ETIMEDOUT); } /* * Clear any pending interrupts. */ bt_intr_locked(bt); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ if (bootverbose) device_printf(bt->dev, "Invalid Command 0x%x\n", opcode); DELAY(1000); status = bt_inb(bt, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) { btreset(bt, /*hard_reset*/FALSE); } return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ return (EMSGSIZE); } /* We were successful */ return (0); } static int btinitmboxes(struct bt_softc *bt) { init_32b_mbox_params_t init_mbox; int error; bzero(bt->in_boxes, sizeof(bt_mbox_in_t) * bt->num_boxes); bzero(bt->out_boxes, sizeof(bt_mbox_out_t) * bt->num_boxes); bt->cur_inbox = bt->in_boxes; bt->last_inbox = bt->in_boxes + bt->num_boxes - 1; bt->cur_outbox = bt->out_boxes; bt->last_outbox = bt->out_boxes + bt->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_boxes = bt->num_boxes; init_mbox.base_addr[0] = bt->mailbox_physbase & 0xFF; init_mbox.base_addr[1] = (bt->mailbox_physbase >> 8) & 0xFF; init_mbox.base_addr[2] = (bt->mailbox_physbase >> 16) & 0xFF; init_mbox.base_addr[3] = (bt->mailbox_physbase >> 24) & 0xFF; error = bt_cmd(bt, BOP_INITIALIZE_32BMBOX, (u_int8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("btinitmboxes: Initialization command failed\n"); else if (bt->strict_rr != 0) { /* * If the controller supports * strict round robin mode, * enable it */ u_int8_t param; param = 0; error = bt_cmd(bt, BOP_ENABLE_STRICT_RR, ¶m, 1, /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("btinitmboxes: Unable to enable strict RR\n"); error = 0; } else if (bootverbose) { device_printf(bt->dev, "Using Strict Round Robin Mailbox Mode\n"); } } return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int targ_mask; u_int sync_period; u_int sync_offset; u_int bus_width; int error; u_int8_t param; targ_syncinfo_t sync_info; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; spi->valid = 0; scsi->valid = 0; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); targ_mask = (0x01 << targ_offset); /* * Inquire Setup Information. This command retreives the * Wide negotiation status for recent adapters as well as * the sync info for older models. */ param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(bt->dev, "btfetchtransinfo - Inquire Setup Info Failed %x\n", error); return; } sync_info = (target < 8) ? setup_info.low_syncinfo[targ_offset] : setup_info.high_syncinfo[targ_offset]; if (sync_info.sync == 0) sync_offset = 0; else sync_offset = sync_info.offset; bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (strcmp(bt->firmware_ver, "5.06L") >= 0) { u_int wide_active; wide_active = (target < 8) ? (setup_info.low_wide_active & targ_mask) : (setup_info.high_wide_active & targ_mask); if (wide_active) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else if ((bt->wide_permitted & targ_mask) != 0) { struct ccb_getdev cgd; /* * Prior to rev 5.06L, wide status isn't provided, * so we "guess" that wide transfers are in effect * if the user settings allow for wide and the inquiry * data for the device indicates that it can handle * wide transfers. */ xpt_setup_ccb(&cgd.ccb_h, cts->ccb_h.path, /*priority*/1); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (cgd.inq_data.flags & SID_WBus16) != 0) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } if (bt->firmware_ver[0] >= '3') { /* * For adapters that can do fast or ultra speeds, * use the more exact Target Sync Information command. */ target_sync_info_data_t sync_info; param = sizeof(sync_info); error = bt_cmd(bt, BOP_TARG_SYNC_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&sync_info, sizeof(sync_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(bt->dev, "btfetchtransinfo - Inquire Sync " "Info Failed 0x%x\n", error); return; } sync_period = sync_info.sync_rate[target] * 100; } else { sync_period = 2000 + (500 * sync_info.period); } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->sync_period = sync_period; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->sync_offset = sync_offset; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void btmapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->mailbox_physbase = segs->ds_addr; } static void btmapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->bt_ccb_physbase = segs->ds_addr; } static void btmapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; SLIST_FIRST(&bt->sg_maps)->sg_physaddr = segs->ds_addr; } static void btpoll(struct cam_sim *sim) { bt_intr_locked(cam_sim_softc(sim)); } void bttimeout(void *arg) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; mtx_assert(&bt->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)bccb); if ((bccb->flags & BCCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)bccb); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ if ((bccb->flags & BCCB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((bccb->flags & BCCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(bt->sim, /*count*/1); bccb->flags |= BCCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; callout_stop(&pending_bccb->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((bccb->flags & BCCB_DEVICE_RESET) != 0 || bt->cur_outbox->action_code != BMBO_FREE || ((bccb->hccb.tag_enable == TRUE) && (bt->firmware_ver[0] < '5'))) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources * or because of faulty firmware. It turns out * that firmware versions prior to 5.xx treat BDRs * as untagged commands that cannot be sent until * all outstanding tagged commands have been processed. * This makes it somewhat difficult to use a BDR to * clear up a problem with an uncompleted tagged command. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; btreset(bt, /*hardreset*/TRUE); device_printf(bt->dev, "No longer in timeout\n"); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ bccb->flags |= BCCB_DEVICE_RESET; callout_reset(&bccb->timer, 2 * hz, bttimeout, bccb); bt->recovery_bccb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ bt->recovery_bccb->hccb.datain = TRUE; bt->recovery_bccb->hccb.dataout = TRUE; bt->recovery_bccb->hccb.btstat = 0; bt->recovery_bccb->hccb.sdstat = 0; bt->recovery_bccb->hccb.target_id = ccb->ccb_h.target_id; /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bt->recovery_bccb); bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); } } MODULE_VERSION(bt, 1); MODULE_DEPEND(bt, cam, 1, 1, 1); Index: stable/11/sys/dev/hptmv/entry.c =================================================================== --- stable/11/sys/dev/hptmv/entry.c (revision 314380) +++ stable/11/sys/dev/hptmv/entry.c (revision 314381) @@ -1,2992 +1,2988 @@ /* * Copyright (c) 2004-2005 HighPoint Technologies, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __KERNEL__ #define __KERNEL__ #endif #include #include #include #include #ifdef DEBUG #ifdef DEBUG_LEVEL int hpt_dbg_level = DEBUG_LEVEL; #else int hpt_dbg_level = 0; #endif #endif #define MV_ERROR printf /* * CAM SIM entry points */ static int hpt_probe (device_t dev); static void launch_worker_thread(void); static int hpt_attach(device_t dev); static int hpt_detach(device_t dev); static int hpt_shutdown(device_t dev); static void hpt_poll(struct cam_sim *sim); static void hpt_intr(void *arg); static void hpt_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hpt_action(struct cam_sim *sim, union ccb *ccb); static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hpt_probe), DEVMETHOD(device_attach, hpt_attach), DEVMETHOD(device_detach, hpt_detach), DEVMETHOD(device_shutdown, hpt_shutdown), DEVMETHOD_END }; static driver_t hpt_pci_driver = { __str(PROC_DIR_NAME), driver_methods, sizeof(IAL_ADAPTER_T) }; static devclass_t hpt_devclass; #define __DRIVER_MODULE(p1, p2, p3, p4, p5, p6) DRIVER_MODULE(p1, p2, p3, p4, p5, p6) __DRIVER_MODULE(PROC_DIR_NAME, pci, hpt_pci_driver, hpt_devclass, 0, 0); MODULE_DEPEND(PROC_DIR_NAME, cam, 1, 1, 1); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev); static void HPTLIBAPI OsSendCommand (_VBUS_ARG union ccb * ccb); static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd); static void ccb_done(union ccb *ccb); static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb); static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter); static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void handleEdmaError(_VBUS_ARG PCommand pCmd); static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static int fResetActiveCommands(PVBus _vbus_p); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter); static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter); static void hptmv_handle_event_disconnect(void *data); static void hptmv_handle_event_connect(void *data); static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum); static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel); static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel); static int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical); static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct); static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2); #define ccb_ccb_ptr spriv_ptr0 #define ccb_adapter ccb_h.spriv_ptr1 static struct sx hptmv_list_lock; SX_SYSINIT(hptmv_list_lock, &hptmv_list_lock, "hptmv list"); IAL_ADAPTER_T *gIal_Adapter = 0; IAL_ADAPTER_T *pCurAdapter = 0; static MV_SATA_CHANNEL gMvSataChannels[MAX_VBUS][MV_SATA_CHANNELS_NUM]; typedef struct st_HPT_DPC { IAL_ADAPTER_T *pAdapter; void (*dpc)(IAL_ADAPTER_T *, void *, UCHAR); void *arg; UCHAR flags; } ST_HPT_DPC; #define MAX_DPC 16 UCHAR DPC_Request_Nums = 0; static ST_HPT_DPC DpcQueue[MAX_DPC]; static int DpcQueue_First=0; static int DpcQueue_Last = 0; static struct mtx DpcQueue_Lock; MTX_SYSINIT(hpmtv_dpc_lock, &DpcQueue_Lock, "hptmv dpc", MTX_DEF); char DRIVER_VERSION[] = "v1.16"; /******************************************************************************* * Name: hptmv_free_channel * * Description: free allocated queues for the given channel * * Parameters: pMvSataAdapter - pointer to the RR18xx controller this * channel connected to. * channelNum - channel number. * ******************************************************************************/ static void hptmv_free_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { HPT_ASSERT(channelNum < MV_SATA_CHANNELS_NUM); pAdapter->mvSataAdapter.sataChannel[channelNum] = NULL; } static void failDevice(PVDevice pVDev) { PVBus _vbus_p = pVDev->pVBus; IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt; pVDev->u.disk.df_on_line = 0; pVDev->vf_online = 0; if (pVDev->pfnDeviceFailed) CallWhenIdle(_VBUS_P (DPC_PROC)pVDev->pfnDeviceFailed, pVDev); fNotifyGUI(ET_DEVICE_REMOVED, pVDev); #ifndef FOR_DEMO if (pAdapter->ver_601==2 && !pAdapter->beeping) { pAdapter->beeping = 1; BeepOn(pAdapter->mvSataAdapter.adapterIoBaseAddress); set_fail_led(&pAdapter->mvSataAdapter, pVDev->u.disk.mv->channelNumber, 1); } #endif } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel); static void handleEdmaError(_VBUS_ARG PCommand pCmd) { PDevice pDevice = &pCmd->pVDevice->u.disk; MV_SATA_ADAPTER * pSataAdapter = pDevice->mv->mvSataAdapter; if (!pDevice->df_on_line) { KdPrint(("Device is offline")); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } if (pCmd->RetryCount++>5) { hpt_printk(("too many retries on channel(%d)\n", pDevice->mv->channelNumber)); failed: failDevice(pCmd->pVDevice); pCmd->Result = RETURN_IDE_ERROR; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* reset the channel and retry the command */ if (MvSataResetChannel(pSataAdapter, pDevice->mv->channelNumber)) goto failed; fNotifyGUI(ET_DEVICE_ERROR, Map2pVDevice(pDevice)); hpt_printk(("Retry on channel(%d)\n", pDevice->mv->channelNumber)); fDeviceSendCommand(_VBUS_P pCmd); } /**************************************************************** * Name: hptmv_init_channel * * Description: allocate request and response queues for the EDMA of the * given channel and sets other fields. * * Parameters: * pAdapter - pointer to the emulated adapter data structure * channelNum - channel number. * Return: 0 on success, otherwise on failure ****************************************************************/ static int hptmv_init_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_CHANNEL *pMvSataChannel; dma_addr_t req_dma_addr; dma_addr_t rsp_dma_addr; if (channelNum >= MV_SATA_CHANNELS_NUM) { MV_ERROR("RR18xx[%d]: Bad channelNum=%d", pAdapter->mvSataAdapter.adapterId, channelNum); return -1; } pMvSataChannel = &gMvSataChannels[pAdapter->mvSataAdapter.adapterId][channelNum]; pAdapter->mvSataAdapter.sataChannel[channelNum] = pMvSataChannel; pMvSataChannel->channelNumber = channelNum; pMvSataChannel->lba48Address = MV_FALSE; pMvSataChannel->maxReadTransfer = MV_FALSE; pMvSataChannel->requestQueue = (struct mvDmaRequestQueueEntry *) (pAdapter->requestsArrayBaseAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE)); req_dma_addr = pAdapter->requestsArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_REQUEST_QUEUE_SIZE); KdPrint(("requestQueue addr is 0x%llX", (HPT_U64)(ULONG_PTR)req_dma_addr)); /* check the 1K alignment of the request queue*/ if (req_dma_addr & 0x3ff) { MV_ERROR("RR18xx[%d]: request queue allocated isn't 1 K aligned," " dma_addr=%llx channel=%d\n", pAdapter->mvSataAdapter.adapterId, (HPT_U64)(ULONG_PTR)req_dma_addr, channelNum); return -1; } pMvSataChannel->requestQueuePciLowAddress = req_dma_addr; pMvSataChannel->requestQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: request queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->requestQueue)); pMvSataChannel->responseQueue = (struct mvDmaResponseQueueEntry *) (pAdapter->responsesArrayBaseAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE)); rsp_dma_addr = pAdapter->responsesArrayBaseDmaAlignedAddr + (channelNum * MV_EDMA_RESPONSE_QUEUE_SIZE); /* check the 256 alignment of the response queue*/ if (rsp_dma_addr & 0xff) { MV_ERROR("RR18xx[%d,%d]: response queue allocated isn't 256 byte " "aligned, dma_addr=%llx\n", pAdapter->mvSataAdapter.adapterId, channelNum, (HPT_U64)(ULONG_PTR)rsp_dma_addr); return -1; } pMvSataChannel->responseQueuePciLowAddress = rsp_dma_addr; pMvSataChannel->responseQueuePciHiAddress = 0; KdPrint(("RR18xx[%d,%d]: response queue allocated: 0x%p", pAdapter->mvSataAdapter.adapterId, channelNum, pMvSataChannel->responseQueue)); pAdapter->mvChannel[channelNum].online = MV_TRUE; return 0; } /****************************************************************************** * Name: hptmv_parse_identify_results * * Description: this functions parses the identify command results, checks * that the connected deives can be accesed by RR18xx EDMA, * and updates the channel structure accordingly. * * Parameters: pMvSataChannel, pointer to the channel data structure. * * Returns: =0 ->success, < 0 ->failure. * ******************************************************************************/ static int hptmv_parse_identify_results(MV_SATA_CHANNEL *pMvSataChannel) { MV_U16 *iden = pMvSataChannel->identifyDevice; /*LBA addressing*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x200)) { KdPrint(("IAL Error in IDENTIFY info: LBA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "LBA supported")); } /*DMA support*/ if (! (iden[IDEN_CAPACITY_1_OFFSET] & 0x100)) { KdPrint(("IAL Error in IDENTIFY info: DMA not supported\n")); return -1; } else { KdPrint(("%25s - %s\n", "Capabilities", "DMA supported")); } /* PIO */ if ((iden[IDEN_VALID] & 2) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find PIO mode\n")); return -1; } KdPrint(("%25s - 0x%02x\n", "PIO modes supported", iden[IDEN_PIO_MODE_SPPORTED] & 0xff)); /*UDMA*/ if ((iden[IDEN_VALID] & 4) == 0) { KdPrint(("IAL Error in IDENTIFY info: not able to find UDMA mode\n")); return -1; } /* 48 bit address */ if ((iden[IDEN_SUPPORTED_COMMANDS2] & 0x400)) { KdPrint(("%25s - %s\n", "LBA48 addressing", "supported")); pMvSataChannel->lba48Address = MV_TRUE; } else { KdPrint(("%25s - %s\n", "LBA48 addressing", "Not supported")); pMvSataChannel->lba48Address = MV_FALSE; } return 0; } static void init_vdev_params(IAL_ADAPTER_T *pAdapter, MV_U8 channel) { PVDevice pVDev = &pAdapter->VDevices[channel]; MV_SATA_CHANNEL *pMvSataChannel = pAdapter->mvSataAdapter.sataChannel[channel]; MV_U16_PTR IdentifyData = pMvSataChannel->identifyDevice; pMvSataChannel->outstandingCommands = 0; pVDev->u.disk.mv = pMvSataChannel; pVDev->u.disk.df_on_line = 1; pVDev->u.disk.pVBus = &pAdapter->VBus; pVDev->pVBus = &pAdapter->VBus; #ifdef SUPPORT_48BIT_LBA if (pMvSataChannel->lba48Address == MV_TRUE) pVDev->u.disk.dDeRealCapacity = ((IdentifyData[101]<<16) | IdentifyData[100]) - 1; else #endif if(IdentifyData[53] & 1) { pVDev->u.disk.dDeRealCapacity = (((IdentifyData[58]<<16 | IdentifyData[57]) < (IdentifyData[61]<<16 | IdentifyData[60])) ? (IdentifyData[61]<<16 | IdentifyData[60]) : (IdentifyData[58]<<16 | IdentifyData[57])) - 1; } else pVDev->u.disk.dDeRealCapacity = (IdentifyData[61]<<16 | IdentifyData[60]) - 1; pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxPioModeSupported - MV_ATA_TRANSFER_PIO_0; if (pAdapter->mvChannel[channel].maxUltraDmaModeSupported!=0xFF) { pVDev->u.disk.bDeUsable_Mode = pVDev->u.disk.bDeModeSetting = pAdapter->mvChannel[channel].maxUltraDmaModeSupported - MV_ATA_TRANSFER_UDMA_0 + 8; } } static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plugged) { PVDevice pVDev; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelIndex]; if (!pMvSataChannel) return; if (plugged) { pVDev = &(pAdapter->VDevices[channelIndex]); init_vdev_params(pAdapter, channelIndex); pVDev->VDeviceType = pVDev->u.disk.df_atapi? VD_ATAPI : pVDev->u.disk.df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->VDeviceCapacity = pVDev->u.disk.dDeRealCapacity-SAVE_FOR_RAID_INFO; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; pVDev->vf_online = 1; #ifdef SUPPORT_ARRAY if(pVDev->pParent) { int iMember; for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++) if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev) pVDev->pParent->u.array.pMember[iMember] = NULL; pVDev->pParent = NULL; } #endif fNotifyGUI(ET_DEVICE_PLUGGED,pVDev); fCheckBootable(pVDev); RegisterVDevice(pVDev); #ifndef FOR_DEMO if (pAdapter->beeping) { pAdapter->beeping = 0; BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress); } #endif } else { pVDev = &(pAdapter->VDevices[channelIndex]); failDevice(pVDev); } } static int start_channel(IAL_ADAPTER_T *pAdapter, MV_U8 channelNum) { MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channelNum]; MV_CHANNEL *pChannelInfo = &(pAdapter->mvChannel[channelNum]); MV_U32 udmaMode,pioMode; KdPrint(("RR18xx [%d]: start channel (%d)", pMvSataAdapter->adapterId, channelNum)); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { /* If failed, try again - this is when trying to hardreset a channel */ /* when drive is just spinning up */ StallExec(5000000); /* wait 5 sec before trying again */ if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Hard reset\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* identify device*/ if (mvStorageDevATAIdentifyDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform ATA Identify command\n" , pMvSataAdapter->adapterId, channelNum); return -1; } if (hptmv_parse_identify_results(pMvSataChannel)) { MV_ERROR("RR18xx [%d,%d]: Error in parsing ATA Identify message\n" , pMvSataAdapter->adapterId, channelNum); return -1; } /* mvStorageDevATASetFeatures */ /* Disable 8 bit PIO in case CFA enabled */ if (pMvSataChannel->identifyDevice[86] & 4) { KdPrint(("RR18xx [%d]: Disable 8 bit PIO (CFA enabled) \n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_8_BIT_PIO, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures" " failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Write cache */ #ifdef ENABLE_WRITE_CACHE if (pMvSataChannel->identifyDevice[82] & 0x20) { if (!(pMvSataChannel->identifyDevice[85] & 0x20)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel %d, write cache enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, write cache not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else /* disable write cache */ { if (pMvSataChannel->identifyDevice[85] & 0x20) { KdPrint(("RR18xx [%d]: channel =%d, disable write cache\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]: channel %d: mvStorageDevATASetFeatures failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, write cache disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif /* Set transfer mode */ KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_SLOW\n", pMvSataAdapter->adapterId)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 1) { pioMode = MV_ATA_TRANSFER_PIO_4; } else if (pMvSataChannel->identifyDevice[IDEN_PIO_MODE_SPPORTED] & 2) { pioMode = MV_ATA_TRANSFER_PIO_3; } else { MV_ERROR("IAL Error in IDENTIFY info: PIO modes 3 and 4 not supported\n"); pioMode = MV_ATA_TRANSFER_PIO_SLOW; } KdPrint(("RR18xx [%d] Set transfer mode XFER_PIO_4\n", pMvSataAdapter->adapterId)); pAdapter->mvChannel[channelNum].maxPioModeSupported = pioMode; if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pioMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } udmaMode = MV_ATA_TRANSFER_UDMA_0; if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x40) { udmaMode = MV_ATA_TRANSFER_UDMA_6; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x20) { udmaMode = MV_ATA_TRANSFER_UDMA_5; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 0x10) { udmaMode = MV_ATA_TRANSFER_UDMA_4; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 8) { udmaMode = MV_ATA_TRANSFER_UDMA_3; } else if (pMvSataChannel->identifyDevice[IDEN_UDMA_MODE] & 4) { udmaMode = MV_ATA_TRANSFER_UDMA_2; } KdPrint(("RR18xx [%d] Set transfer mode XFER_UDMA_%d\n", pMvSataAdapter->adapterId, udmaMode & 0xf)); pChannelInfo->maxUltraDmaModeSupported = udmaMode; /*if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, udmaMode, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; }*/ if (pChannelInfo->maxUltraDmaModeSupported == 0xFF) return TRUE; else do { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_TRANSFER, pChannelInfo->maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) { if (pChannelInfo->maxUltraDmaModeSupported > MV_ATA_TRANSFER_UDMA_0) { if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) { MV_REG_WRITE_BYTE(pMvSataAdapter->adapterIoBaseAddress, pMvSataChannel->eDmaRegsOffset + 0x11c, /* command reg */ MV_ATA_COMMAND_IDLE_IMMEDIATE); mvMicroSecondsDelay(10000); mvSataChannelHardReset(pMvSataAdapter, channelNum); if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; } if (mvSataChannelHardReset(pMvSataAdapter, channelNum) == MV_FALSE) return FALSE; pChannelInfo->maxUltraDmaModeSupported--; continue; } else return FALSE; } break; }while (1); /* Read look ahead */ #ifdef ENABLE_READ_AHEAD if (pMvSataChannel->identifyDevice[82] & 0x40) { if (!(pMvSataChannel->identifyDevice[85] & 0x40)) /* if not enabled by default */ { if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d: Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]: channel=%d, read look ahead enabled\n", pMvSataAdapter->adapterId, channelNum)); } else { KdPrint(("RR18xx [%d]: channel %d, Read Look Ahead not supported\n", pMvSataAdapter->adapterId, channelNum)); } #else { if (pMvSataChannel->identifyDevice[86] & 0x20) { KdPrint(("RR18xx [%d]:channel %d, disable read look ahead\n", pMvSataAdapter->adapterId, channelNum)); if (mvStorageDevATASetFeatures(pMvSataAdapter, channelNum, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d]:channel %d: ATA Set Features failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } KdPrint(("RR18xx [%d]:channel %d, read look ahead disabled\n", pMvSataAdapter->adapterId, channelNum)); } #endif { KdPrint(("RR18xx [%d]: channel %d config EDMA, Non Queued Mode\n", pMvSataAdapter->adapterId, channelNum)); if (mvSataConfigEdmaMode(pMvSataAdapter, channelNum, MV_EDMA_MODE_NOT_QUEUED, 0) == MV_FALSE) { MV_ERROR("RR18xx [%d] channel %d Error: mvSataConfigEdmaMode failed\n", pMvSataAdapter->adapterId, channelNum); return -1; } } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channelNum) == MV_FALSE) { MV_ERROR("RR18xx [%d] Failed to enable DMA, channel=%d\n", pMvSataAdapter->adapterId, channelNum); return -1; } MV_ERROR("RR18xx [%d,%d]: channel started successfully\n", pMvSataAdapter->adapterId, channelNum); #ifndef FOR_DEMO set_fail_led(pMvSataAdapter, channelNum, 0); #endif return 0; } static void hptmv_handle_event(void * data, int flag) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)data; MV_SATA_ADAPTER *pMvSataAdapter = &pAdapter->mvSataAdapter; MV_U8 channelIndex; mtx_assert(&pAdapter->lock, MA_OWNED); /* mvOsSemTake(&pMvSataAdapter->semaphore); */ for (channelIndex = 0; channelIndex < MV_SATA_CHANNELS_NUM; channelIndex++) { switch(pAdapter->sataEvents[channelIndex]) { case SATA_EVENT_CHANNEL_CONNECTED: /* Handle only connects */ if (flag == 1) break; KdPrint(("RR18xx [%d,%d]: new device connected\n", pMvSataAdapter->adapterId, channelIndex)); hptmv_init_channel(pAdapter, channelIndex); if (mvSataConfigureChannel( pMvSataAdapter, channelIndex) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to configure\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { /*mvSataChannelHardReset(pMvSataAdapter, channel);*/ if (start_channel( pAdapter, channelIndex)) { MV_ERROR("RR18xx [%d,%d]Failed to start channel\n", pMvSataAdapter->adapterId, channelIndex); hptmv_free_channel(pAdapter, channelIndex); } else { device_change(pAdapter, channelIndex, TRUE); } } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_CHANNEL_DISCONNECTED: /* Handle only disconnects */ if (flag == 0) break; KdPrint(("RR18xx [%d,%d]: device disconnected\n", pMvSataAdapter->adapterId, channelIndex)); /* Flush pending commands */ if(pMvSataAdapter->sataChannel[channelIndex]) { _VBUS_INST(&pAdapter->VBus) mvSataFlushDmaQueue (pMvSataAdapter, channelIndex, MV_FLUSH_TYPE_CALLBACK); CheckPendingCall(_VBUS_P0); mvSataRemoveChannel(pMvSataAdapter,channelIndex); hptmv_free_channel(pAdapter, channelIndex); pMvSataAdapter->sataChannel[channelIndex] = NULL; KdPrint(("RR18xx [%d,%d]: channel removed\n", pMvSataAdapter->adapterId, channelIndex)); if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); } else { KdPrint(("RR18xx [%d,%d]: channel already removed!!\n", pMvSataAdapter->adapterId, channelIndex)); } pAdapter->sataEvents[channelIndex] = SATA_EVENT_NO_CHANGE; break; case SATA_EVENT_NO_CHANGE: break; default: break; } } /* mvOsSemRelease(&pMvSataAdapter->semaphore); */ } #define EVENT_CONNECT 1 #define EVENT_DISCONNECT 0 static void hptmv_handle_event_connect(void *data) { hptmv_handle_event (data, 0); } static void hptmv_handle_event_disconnect(void *data) { hptmv_handle_event (data, 1); } static MV_BOOLEAN hptmv_event_notify(MV_SATA_ADAPTER *pMvSataAdapter, MV_EVENT_TYPE eventType, MV_U32 param1, MV_U32 param2) { IAL_ADAPTER_T *pAdapter = pMvSataAdapter->IALData; switch (eventType) { case MV_EVENT_TYPE_SATA_CABLE: { MV_U8 channel = param2; if (param1 == EVENT_CONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_CONNECTED; KdPrint(("RR18xx [%d,%d]: device connected event received\n", pMvSataAdapter->adapterId, channel)); /* Delete previous timers (if multiple drives connected in the same time */ callout_reset(&pAdapter->event_timer_connect, 10 * hz, hptmv_handle_event_connect, pAdapter); } else if (param1 == EVENT_DISCONNECT) { pAdapter->sataEvents[channel] = SATA_EVENT_CHANNEL_DISCONNECTED; KdPrint(("RR18xx [%d,%d]: device disconnected event received \n", pMvSataAdapter->adapterId, channel)); device_change(pAdapter, channel, FALSE); /* Delete previous timers (if multiple drives disconnected in the same time */ /*callout_reset(&pAdapter->event_timer_disconnect, 10 * hz, hptmv_handle_event_disconnect, pAdapter); */ /*It is not necessary to wait, handle it directly*/ hptmv_handle_event_disconnect(pAdapter); } else { MV_ERROR("RR18xx: illegal value for param1(%d) at " "connect/disconnect event, host=%d\n", param1, pMvSataAdapter->adapterId ); } } break; case MV_EVENT_TYPE_ADAPTER_ERROR: KdPrint(("RR18xx: DEVICE error event received, pci cause " "reg=%x, don't how to handle this\n", param1)); return MV_TRUE; default: MV_ERROR("RR18xx[%d]: unknown event type (%d)\n", pMvSataAdapter->adapterId, eventType); return MV_FALSE; } return MV_TRUE; } static int hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter) { pAdapter->requestsArrayBaseAddr = (MV_U8 *)contigmalloc(REQUESTS_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->requestsArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA request" " queues\n", pAdapter->mvSataAdapter.adapterId); return -1; } pAdapter->requestsArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->requestsArrayBaseAddr); pAdapter->requestsArrayBaseAlignedAddr = pAdapter->requestsArrayBaseAddr; pAdapter->requestsArrayBaseAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->requestsArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1)); pAdapter->requestsArrayBaseDmaAlignedAddr = pAdapter->requestsArrayBaseDmaAddr; pAdapter->requestsArrayBaseDmaAlignedAddr += MV_EDMA_REQUEST_QUEUE_SIZE; pAdapter->requestsArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_REQUEST_QUEUE_SIZE - 1); if ((pAdapter->requestsArrayBaseDmaAlignedAddr - pAdapter->requestsArrayBaseDmaAddr) != (pAdapter->requestsArrayBaseAlignedAddr - pAdapter->requestsArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); return -1; } /* response queues */ pAdapter->responsesArrayBaseAddr = (MV_U8 *)contigmalloc(RESPONSES_ARRAY_SIZE, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); if (pAdapter->responsesArrayBaseAddr == NULL) { MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response" " queues\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr); pAdapter->responsesArrayBaseAlignedAddr = pAdapter->responsesArrayBaseAddr; pAdapter->responsesArrayBaseAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseAlignedAddr = (MV_U8 *) (((ULONG_PTR)pAdapter->responsesArrayBaseAlignedAddr) & ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1)); pAdapter->responsesArrayBaseDmaAlignedAddr = pAdapter->responsesArrayBaseDmaAddr; pAdapter->responsesArrayBaseDmaAlignedAddr += MV_EDMA_RESPONSE_QUEUE_SIZE; pAdapter->responsesArrayBaseDmaAlignedAddr &= ~(ULONG_PTR)(MV_EDMA_RESPONSE_QUEUE_SIZE - 1); if ((pAdapter->responsesArrayBaseDmaAlignedAddr - pAdapter->responsesArrayBaseDmaAddr) != (pAdapter->responsesArrayBaseAlignedAddr - pAdapter->responsesArrayBaseAddr)) { MV_ERROR("RR18xx[%d]: Error in Response Queues Alignment\n", pAdapter->mvSataAdapter.adapterId); contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); return -1; } return 0; } static void hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter) { contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF); contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF); } static PVOID AllocatePRDTable(IAL_ADAPTER_T *pAdapter) { PVOID ret; if (pAdapter->pFreePRDLink) { KdPrint(("pAdapter->pFreePRDLink:%p\n",pAdapter->pFreePRDLink)); ret = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = *(void**)ret; return ret; } return NULL; } static void FreePRDTable(IAL_ADAPTER_T *pAdapter, PVOID PRDTable) { *(void**)PRDTable = pAdapter->pFreePRDLink; pAdapter->pFreePRDLink = PRDTable; } extern PVDevice fGetFirstChild(PVDevice pLogical); extern void fResetBootMark(PVDevice pLogical); static void fRegisterVdevice(IAL_ADAPTER_T *pAdapter) { PVDevice pPhysical, pLogical; PVBus pVBus; int i,j; for(i=0;iVDevices[i]); pLogical = pPhysical; while (pLogical->pParent) pLogical = pLogical->pParent; if (pLogical->vf_online==0) { pPhysical->vf_bootmark = pLogical->vf_bootmark = 0; continue; } if (pLogical->VDeviceType==VD_SPARE || pPhysical!=fGetFirstChild(pLogical)) continue; pVBus = &pAdapter->VBus; if(pVBus) { j=0; while(jpVDevice[j]) j++; if(jpVDevice[j] = pLogical; pLogical->pVBus = pVBus; if (j>0 && pLogical->vf_bootmark) { if (pVBus->pVDevice[0]->vf_bootmark) { fResetBootMark(pLogical); } else { do { pVBus->pVDevice[j] = pVBus->pVDevice[j-1]; } while (--j); pVBus->pVDevice[0] = pLogical; } } } } } } PVDevice GetSpareDisk(_VBUS_ARG PVDevice pArray) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pArray->pVBus->OsExt; LBA_T capacity = LongDiv(pArray->VDeviceCapacity, pArray->u.array.bArnMember-1); LBA_T thiscap, maxcap = MAX_LBA_T; PVDevice pVDevice, pFind = NULL; int i; for(i=0;iVDevices[i]; if(!pVDevice) continue; thiscap = pArray->vf_format_v2? pVDevice->u.disk.dDeRealCapacity : pVDevice->VDeviceCapacity; /* find the smallest usable spare disk */ if (pVDevice->VDeviceType==VD_SPARE && pVDevice->u.disk.df_on_line && thiscap < maxcap && thiscap >= capacity) { maxcap = pVDevice->VDeviceCapacity; pFind = pVDevice; } } return pFind; } /****************************************************************** * IO ATA Command *******************************************************************/ int HPTLIBAPI fDeReadWrite(PDevice pDev, ULONG Lba, UCHAR Cmd, void *tmpBuffer) { return mvReadWrite(pDev->mv, Lba, Cmd, tmpBuffer); } void HPTLIBAPI fDeSelectMode(PDevice pDev, UCHAR NewMode) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; UCHAR mvMode; /* 508x don't use MW-DMA? */ if (NewMode>4 && NewMode<8) NewMode = 4; pDev->bDeModeSetting = NewMode; if (NewMode<=4) mvMode = MV_ATA_TRANSFER_PIO_0 + NewMode; else mvMode = MV_ATA_TRANSFER_UDMA_0 + (NewMode-8); /*To fix 88i8030 bug*/ if (mvMode > MV_ATA_TRANSFER_UDMA_0 && mvMode < MV_ATA_TRANSFER_UDMA_4) mvMode = MV_ATA_TRANSFER_UDMA_0; mvSataDisableChannelDma(pSataAdapter, channelIndex); /* Flush pending commands */ mvSataFlushDmaQueue (pSataAdapter, channelIndex, MV_FLUSH_TYPE_NONE); if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_TRANSFER, mvMode, 0, 0, 0) == MV_FALSE) { KdPrint(("channel %d: Set Features failed\n", channelIndex)); } /* Enable EDMA */ if (mvSataEnableChannelDma(pSataAdapter, channelIndex) == MV_FALSE) KdPrint(("Failed to enable DMA, channel=%d", channelIndex)); } int HPTLIBAPI fDeSetTCQ(PDevice pDev, int enable, int depth) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if (enable) { if (pSataChannel->queuedDMA == MV_EDMA_MODE_NOT_QUEUED && (pSataChannel->identifyDevice[IDEN_SUPPORTED_COMMANDS2] & (0x2))) { UCHAR depth = ((pSataChannel->identifyDevice[IDEN_QUEUE_DEPTH]) & 0x1f) + 1; channelInfo->queueDepth = (depth==32)? 31 : depth; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_QUEUED, depth); ret = 1; } } else { if (pSataChannel->queuedDMA != MV_EDMA_MODE_NOT_QUEUED) { channelInfo->queueDepth = 2; mvSataConfigEdmaMode(pSataAdapter, channelIndex, MV_EDMA_MODE_NOT_QUEUED, 0); ret = 1; } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetNCQ(PDevice pDev, int enable, int depth) { return 0; } int HPTLIBAPI fDeSetWriteCache(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x20))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_WCACHE, 0, 0, 0, 0)) { channelInfo->writeCacheEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } int HPTLIBAPI fDeSetReadAhead(PDevice pDev, int enable) { MV_SATA_CHANNEL *pSataChannel = pDev->mv; MV_SATA_ADAPTER *pSataAdapter = pSataChannel->mvSataAdapter; MV_U8 channelIndex = pSataChannel->channelNumber; IAL_ADAPTER_T *pAdapter = pSataAdapter->IALData; MV_CHANNEL *channelInfo = &(pAdapter->mvChannel[channelIndex]); int dmaActive = pSataChannel->queueCommandsEnabled; int ret = 0; if (dmaActive) { mvSataDisableChannelDma(pSataAdapter, channelIndex); mvSataFlushDmaQueue(pSataAdapter,channelIndex,MV_FLUSH_TYPE_CALLBACK); } if ((pSataChannel->identifyDevice[82] & (0x40))) { if (enable) { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_ENABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_TRUE; ret = 1; } } else { if (mvStorageDevATASetFeatures(pSataAdapter, channelIndex, MV_ATA_SET_FEATURES_DISABLE_RLA, 0, 0, 0, 0)) { channelInfo->readAheadEnabled = MV_FALSE; ret = 1; } } } if (dmaActive) mvSataEnableChannelDma(pSataAdapter,channelIndex); return ret; } #ifdef SUPPORT_ARRAY #define IdeRegisterVDevice fCheckArray #else void IdeRegisterVDevice(PDevice pDev) { PVDevice pVDev = Map2pVDevice(pDev); pVDev->VDeviceType = pDev->df_atapi? VD_ATAPI : pDev->df_removable_drive? VD_REMOVABLE : VD_SINGLE_DISK; pVDev->vf_online = 1; pVDev->VDeviceCapacity = pDev->dDeRealCapacity; pVDev->pfnSendCommand = pfnSendCommand[pVDev->VDeviceType]; pVDev->pfnDeviceFailed = pfnDeviceFailed[pVDev->VDeviceType]; } #endif static __inline PBUS_DMAMAP dmamap_get(struct IALAdapter * pAdapter) { PBUS_DMAMAP p = pAdapter->pbus_dmamap_list; if (p) pAdapter->pbus_dmamap_list = p-> next; return p; } static __inline void dmamap_put(PBUS_DMAMAP p) { p->next = p->pAdapter->pbus_dmamap_list; p->pAdapter->pbus_dmamap_list = p; } static int num_adapters = 0; static int init_adapter(IAL_ADAPTER_T *pAdapter) { PVBus _vbus_p = &pAdapter->VBus; MV_SATA_ADAPTER *pMvSataAdapter; int i, channel, rid; PVDevice pVDev; mtx_init(&pAdapter->lock, "hptsleeplock", NULL, MTX_DEF); callout_init_mtx(&pAdapter->event_timer_connect, &pAdapter->lock, 0); callout_init_mtx(&pAdapter->event_timer_disconnect, &pAdapter->lock, 0); sx_xlock(&hptmv_list_lock); pAdapter->next = 0; if(gIal_Adapter == 0){ gIal_Adapter = pAdapter; pCurAdapter = gIal_Adapter; } else { pCurAdapter->next = pAdapter; pCurAdapter = pAdapter; } sx_xunlock(&hptmv_list_lock); pAdapter->outstandingCommands = 0; pMvSataAdapter = &(pAdapter->mvSataAdapter); _vbus_p->OsExt = (void *)pAdapter; pMvSataAdapter->IALData = pAdapter; if (bus_dma_tag_create(bus_get_dma_tag(pAdapter->hpt_dev),/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (MAX_SG_DESCRIPTORS-1), /* maxsize */ MAX_SG_DESCRIPTORS, /* nsegments */ 0x10000, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ busdma_lock_mutex, /* lockfunc */ &pAdapter->lock, /* lockfuncarg */ &pAdapter->io_dma_parent /* tag */)) { return ENXIO; } if (hptmv_allocate_edma_queues(pAdapter)) { MV_ERROR("RR18xx: Failed to allocate memory for EDMA queues\n"); return ENOMEM; } /* also map EPROM address */ rid = 0x10; if (!(pAdapter->mem_res = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) || !(pMvSataAdapter->adapterIoBaseAddress = rman_get_virtual(pAdapter->mem_res))) { MV_ERROR("RR18xx: Failed to remap memory space\n"); hptmv_free_edma_queues(pAdapter); return ENXIO; } else { KdPrint(("RR18xx: io base address 0x%p\n", pMvSataAdapter->adapterIoBaseAddress)); } pMvSataAdapter->adapterId = num_adapters++; /* get the revision ID */ pMvSataAdapter->pciConfigRevisionId = pci_read_config(pAdapter->hpt_dev, PCIR_REVID, 1); pMvSataAdapter->pciConfigDeviceId = pci_get_device(pAdapter->hpt_dev); /* init RR18xx */ pMvSataAdapter->intCoalThre[0]= 1; pMvSataAdapter->intCoalThre[1]= 1; pMvSataAdapter->intTimeThre[0] = 1; pMvSataAdapter->intTimeThre[1] = 1; pMvSataAdapter->pciCommand = 0x0107E371; pMvSataAdapter->pciSerrMask = 0xd77fe6ul; pMvSataAdapter->pciInterruptMask = 0xd77fe6ul; pMvSataAdapter->mvSataEventNotify = hptmv_event_notify; if (mvSataInitAdapter(pMvSataAdapter) == MV_FALSE) { MV_ERROR("RR18xx[%d]: core failed to initialize the adapter\n", pMvSataAdapter->adapterId); unregister: bus_release_resource(pAdapter->hpt_dev, SYS_RES_MEMORY, rid, pAdapter->mem_res); hptmv_free_edma_queues(pAdapter); return ENXIO; } pAdapter->ver_601 = pMvSataAdapter->pcbVersion; #ifndef FOR_DEMO set_fail_leds(pMvSataAdapter, 0); #endif /* setup command blocks */ KdPrint(("Allocate command blocks\n")); _vbus_(pFreeCommands) = 0; pAdapter->pCommandBlocks = malloc(sizeof(struct _Command) * MAX_COMMAND_BLOCKS_FOR_EACH_VBUS, M_DEVBUF, M_NOWAIT); KdPrint(("pCommandBlocks:%p\n",pAdapter->pCommandBlocks)); if (!pAdapter->pCommandBlocks) { MV_ERROR("insufficient memory\n"); goto unregister; } for (i=0; ipCommandBlocks[i])); } /*Set up the bus_dmamap*/ pAdapter->pbus_dmamap = (PBUS_DMAMAP)malloc (sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM, M_DEVBUF, M_NOWAIT); if(!pAdapter->pbus_dmamap) { MV_ERROR("insufficient memory\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); goto unregister; } memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM); pAdapter->pbus_dmamap_list = 0; for (i=0; i < MAX_QUEUE_COMM; i++) { PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]); pmap->pAdapter = pAdapter; dmamap_put(pmap); if(bus_dmamap_create(pAdapter->io_dma_parent, 0, &pmap->dma_map)) { MV_ERROR("Can not allocate dma map\n"); free(pAdapter->pCommandBlocks, M_DEVBUF); free(pAdapter->pbus_dmamap, M_DEVBUF); goto unregister; } callout_init_mtx(&pmap->timeout, &pAdapter->lock, 0); } /* setup PRD Tables */ KdPrint(("Allocate PRD Tables\n")); pAdapter->pFreePRDLink = 0; pAdapter->prdTableAddr = (PUCHAR)contigmalloc( (PRD_ENTRIES_SIZE*PRD_TABLES_FOR_VBUS + 32), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0ul); KdPrint(("prdTableAddr:%p\n",pAdapter->prdTableAddr)); if (!pAdapter->prdTableAddr) { MV_ERROR("insufficient PRD Tables\n"); goto unregister; } pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL); { PUCHAR PRDTable = pAdapter->prdTableAlignedAddr; for (i=0; ipFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */ FreePRDTable(pAdapter, PRDTable); PRDTable += PRD_ENTRIES_SIZE; } } /* enable the adapter interrupts */ /* configure and start the connected channels*/ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { pAdapter->mvChannel[channel].online = MV_FALSE; if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_TRUE) { KdPrint(("RR18xx[%d]: channel %d is connected\n", pMvSataAdapter->adapterId, channel)); if (hptmv_init_channel(pAdapter, channel) == 0) { if (mvSataConfigureChannel(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx[%d]: Failed to configure channel" " %d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } else { if (start_channel(pAdapter, channel)) { MV_ERROR("RR18xx[%d]: Failed to start channel," " channel=%d\n",pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); } pAdapter->mvChannel[channel].online = MV_TRUE; /* mvSataChannelSetEdmaLoopBackMode(pMvSataAdapter, channel, MV_TRUE);*/ } } } KdPrint(("pAdapter->mvChannel[channel].online:%x, channel:%d\n", pAdapter->mvChannel[channel].online, channel)); } #ifdef SUPPORT_ARRAY for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) { pVDev = ArrayTables(i); mArFreeArrayTable(pVDev); } #endif KdPrint(("Initialize Devices\n")); for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) { MV_SATA_CHANNEL *pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel) { init_vdev_params(pAdapter, channel); IdeRegisterVDevice(&pAdapter->VDevices[channel].u.disk); } } #ifdef SUPPORT_ARRAY CheckArrayCritical(_VBUS_P0); #endif _vbus_p->nInstances = 1; fRegisterVdevice(pAdapter); for (channel=0;channelpVDevice[channel]; if (pVDev && pVDev->vf_online) fCheckBootable(pVDev); } #if defined(SUPPORT_ARRAY) && defined(_RAID5N_) init_raid5_memory(_VBUS_P0); _vbus_(r5).enable_write_back = 1; printf("RR18xx: RAID5 write-back %s\n", _vbus_(r5).enable_write_back? "enabled" : "disabled"); #endif mvSataUnmaskAdapterInterrupt(pMvSataAdapter); return 0; } int MvSataResetChannel(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channel) { IAL_ADAPTER_T *pAdapter = (IAL_ADAPTER_T *)pMvSataAdapter->IALData; mvSataDisableChannelDma(pMvSataAdapter, channel); /* Flush pending commands */ mvSataFlushDmaQueue (pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); /* Software reset channel */ if (mvStorageDevATASoftResetDevice(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d]: failed to perform Software reset\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Hardware reset channel */ if (mvSataChannelHardReset(pMvSataAdapter, channel)== MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Hard reser the SATA channel\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; } if (mvSataIsStorageDeviceConnected(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("RR18xx [%d,%d] Failed to Connect Device\n", pMvSataAdapter->adapterId, channel); hptmv_free_channel(pAdapter, channel); return -1; }else { MV_ERROR("channel %d: perform recalibrate command", channel); if (!mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, MV_NON_UDMA_PROTOCOL_NON_DATA, MV_FALSE, NULL, /* pBuffer*/ 0, /* count */ 0, /*features*/ /* sectorCount */ 0, 0, /* lbaLow */ 0, /* lbaMid */ /* lbaHigh */ 0, 0, /* device */ /* command */ 0x10)) MV_ERROR("channel %d: recalibrate failed", channel); /* Set transfer mode */ if((mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, MV_ATA_TRANSFER_PIO_SLOW, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxPioModeSupported, 0, 0, 0) == MV_FALSE) || (mvStorageDevATASetFeatures(pMvSataAdapter, channel, MV_ATA_SET_FEATURES_TRANSFER, pAdapter->mvChannel[channel].maxUltraDmaModeSupported, 0, 0, 0) == MV_FALSE) ) { MV_ERROR("channel %d: Set Features failed", channel); hptmv_free_channel(pAdapter, channel); return -1; } /* Enable EDMA */ if (mvSataEnableChannelDma(pMvSataAdapter, channel) == MV_FALSE) { MV_ERROR("Failed to enable DMA, channel=%d", channel); hptmv_free_channel(pAdapter, channel); return -1; } } return 0; } static int fResetActiveCommands(PVBus _vbus_p) { MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_U8 channel; for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands) MvSataResetChannel(pMvSataAdapter,channel); } return 0; } void fCompleteAllCommandsSynchronously(PVBus _vbus_p) { UINT cont; ULONG ticks = 0; MV_U8 channel; MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; do { check_cmds: cont = 0; CheckPendingCall(_VBUS_P0); #ifdef _RAID5N_ dataxfer_poll(); xor_poll(); #endif for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) { pMvSataChannel = pMvSataAdapter->sataChannel[channel]; if (pMvSataChannel && pMvSataChannel->outstandingCommands) { while (pMvSataChannel->outstandingCommands) { if (!mvSataInterruptServiceRoutine(pMvSataAdapter)) { StallExec(1000); if (ticks++ > 3000) { MvSataResetChannel(pMvSataAdapter,channel); goto check_cmds; } } else ticks = 0; } cont = 1; } } } while (cont); } void fResetVBus(_VBUS_ARG0) { KdPrint(("fMvResetBus(%p)", _vbus_p)); /* some commands may already finished. */ CheckPendingCall(_VBUS_P0); fResetActiveCommands(_vbus_p); /* * the other pending commands may still be finished successfully. */ fCompleteAllCommandsSynchronously(_vbus_p); /* Now there should be no pending commands. No more action needed. */ CheckIdleCall(_VBUS_P0); KdPrint(("fMvResetBus() done")); } /*No rescan function*/ void fRescanAllDevice(_VBUS_ARG0) { } static MV_BOOLEAN CommandCompletionCB(MV_SATA_ADAPTER *pMvSataAdapter, MV_U8 channelNum, MV_COMPLETION_TYPE comp_type, MV_VOID_PTR commandId, MV_U16 responseFlags, MV_U32 timeStamp, MV_STORAGE_DEVICE_REGISTERS *registerStruct) { PCommand pCmd = (PCommand) commandId; _VBUS_INST(pCmd->pVDevice->pVBus) if (pCmd->uScratch.sata_param.prdAddr) FreePRDTable(pMvSataAdapter->IALData,pCmd->uScratch.sata_param.prdAddr); switch (comp_type) { case MV_COMPLETION_TYPE_NORMAL: pCmd->Result = RETURN_SUCCESS; break; case MV_COMPLETION_TYPE_ABORT: pCmd->Result = RETURN_BUS_RESET; break; case MV_COMPLETION_TYPE_ERROR: MV_ERROR("IAL: COMPLETION ERROR, adapter %d, channel %d, flags=%x\n", pMvSataAdapter->adapterId, channelNum, responseFlags); if (responseFlags & 4) { MV_ERROR("ATA regs: error %x, sector count %x, LBA low %x, LBA mid %x," " LBA high %x, device %x, status %x\n", registerStruct->errorRegister, registerStruct->sectorCountRegister, registerStruct->lbaLowRegister, registerStruct->lbaMidRegister, registerStruct->lbaHighRegister, registerStruct->deviceRegister, registerStruct->statusRegister); } /*We can't do handleEdmaError directly here, because CommandCompletionCB is called by * mv's ISR, if we retry the command, than the internel data structure may be destroyed*/ pCmd->uScratch.sata_param.responseFlags = responseFlags; pCmd->uScratch.sata_param.bIdeStatus = registerStruct->statusRegister; pCmd->uScratch.sata_param.errorRegister = registerStruct->errorRegister; pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)handleEdmaError,pCmd); return TRUE; default: MV_ERROR(" Unknown completion type (%d)\n", comp_type); return MV_FALSE; } if (pCmd->uCmd.Ide.Command == IDE_COMMAND_VERIFY && pCmd->uScratch.sata_param.cmd_priv > 1) { pCmd->uScratch.sata_param.cmd_priv --; return TRUE; } pCmd->pVDevice->u.disk.QueueLength--; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return TRUE; } void fDeviceSendCommand(_VBUS_ARG PCommand pCmd) { MV_SATA_EDMA_PRD_ENTRY *pPRDTable = 0; MV_SATA_ADAPTER *pMvSataAdapter; MV_SATA_CHANNEL *pMvSataChannel; PVDevice pVDevice = pCmd->pVDevice; PDevice pDevice = &pVDevice->u.disk; LBA_T Lba = pCmd->uCmd.Ide.Lba; USHORT nSector = pCmd->uCmd.Ide.nSectors; MV_QUEUE_COMMAND_RESULT result; MV_QUEUE_COMMAND_INFO commandInfo; MV_UDMA_COMMAND_PARAMS *pUdmaParams = &commandInfo.commandParams.udmaCommand; MV_NONE_UDMA_COMMAND_PARAMS *pNoUdmaParams = &commandInfo.commandParams.NoneUdmaCommand; MV_BOOLEAN is48bit; MV_U8 channel; int i=0; DECLARE_BUFFER(FPSCAT_GATH, tmpSg); if (!pDevice->df_on_line) { MV_ERROR("Device is offline"); pCmd->Result = RETURN_BAD_DEVICE; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } pDevice->HeadPosition = pCmd->uCmd.Ide.Lba + pCmd->uCmd.Ide.nSectors; pMvSataChannel = pDevice->mv; pMvSataAdapter = pMvSataChannel->mvSataAdapter; channel = pMvSataChannel->channelNumber; /* old RAID0 has hidden lba. Remember to clear dDeHiddenLba when delete array! */ Lba += pDevice->dDeHiddenLba; /* check LBA */ if (Lba+nSector-1 > pDevice->dDeRealCapacity) { pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } /* * always use 48bit LBA if drive supports it. * Some Seagate drives report error if you use a 28-bit command * to access sector 0xfffffff. */ is48bit = pMvSataChannel->lba48Address; switch (pCmd->uCmd.Ide.Command) { case IDE_COMMAND_READ: case IDE_COMMAND_WRITE: if (pDevice->bDeModeSetting<8) goto pio; commandInfo.type = MV_QUEUED_COMMAND_TYPE_UDMA; pUdmaParams->isEXT = is48bit; pUdmaParams->numOfSectors = nSector; pUdmaParams->lowLBAAddress = Lba; pUdmaParams->highLBAAddress = 0; pUdmaParams->prdHighAddr = 0; pUdmaParams->callBack = CommandCompletionCB; pUdmaParams->commandId = (MV_VOID_PTR )pCmd; if(pCmd->uCmd.Ide.Command == IDE_COMMAND_READ) pUdmaParams->readWrite = MV_UDMA_TYPE_READ; else pUdmaParams->readWrite = MV_UDMA_TYPE_WRITE; if (pCmd->pSgTable && pCmd->cf_physical_sg) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 0)) { pio: mvSataDisableChannelDma(pMvSataAdapter, channel); mvSataFlushDmaQueue(pMvSataAdapter, channel, MV_FLUSH_TYPE_CALLBACK); if (pCmd->pSgTable && pCmd->cf_physical_sg==0) { FPSCAT_GATH sg1=tmpSg, sg2=pCmd->pSgTable; do { *sg1++=*sg2; } while ((sg2++->wSgFlag & SG_FLAG_EOT)==0); } else { if (!pCmd->pfnBuildSgl || !pCmd->pfnBuildSgl(_VBUS_P pCmd, tmpSg, 1)) { pCmd->Result = RETURN_NEED_LOGICAL_SG; goto finish_cmd; } } do { ULONG size = tmpSg->wSgSize? tmpSg->wSgSize : 0x10000; ULONG_PTR addr = tmpSg->dSgAddress; if (size & 0x1ff) { pCmd->Result = RETURN_INVALID_REQUEST; goto finish_cmd; } if (mvStorageDevATAExecuteNonUDMACommand(pMvSataAdapter, channel, (pCmd->cf_data_out)?MV_NON_UDMA_PROTOCOL_PIO_DATA_OUT:MV_NON_UDMA_PROTOCOL_PIO_DATA_IN, is48bit, (MV_U16_PTR)addr, size >> 1, /* count */ 0, /* features N/A */ (MV_U16)(size>>9), /*sector count*/ (MV_U16)( (is48bit? (MV_U16)((Lba >> 16) & 0xFF00) : 0 ) | (UCHAR)(Lba & 0xFF) ), /*lbalow*/ (MV_U16)((Lba >> 8) & 0xFF), /* lbaMid */ (MV_U16)((Lba >> 16) & 0xFF),/* lbaHigh */ (MV_U8)(0x40 | (is48bit ? 0 : (UCHAR)(Lba >> 24) & 0xFF )),/* device */ (MV_U8)(is48bit ? (pCmd->cf_data_in?IDE_COMMAND_READ_EXT:IDE_COMMAND_WRITE_EXT):pCmd->uCmd.Ide.Command) )==MV_FALSE) { pCmd->Result = RETURN_IDE_ERROR; goto finish_cmd; } Lba += size>>9; if(Lba & 0xF0000000) is48bit = MV_TRUE; } while ((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pCmd->Result = RETURN_SUCCESS; finish_cmd: mvSataEnableChannelDma(pMvSataAdapter,channel); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); return; } } pPRDTable = (MV_SATA_EDMA_PRD_ENTRY *) AllocatePRDTable(pMvSataAdapter->IALData); KdPrint(("pPRDTable:%p\n",pPRDTable)); if (!pPRDTable) { pCmd->Result = RETURN_DEVICE_BUSY; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); HPT_ASSERT(0); return; } do{ pPRDTable[i].highBaseAddr = (sizeof(tmpSg->dSgAddress)>4 ? (MV_U32)(tmpSg->dSgAddress>>32) : 0); pPRDTable[i].flags = (MV_U16)tmpSg->wSgFlag; pPRDTable[i].byteCount = (MV_U16)tmpSg->wSgSize; pPRDTable[i].lowBaseAddr = (MV_U32)tmpSg->dSgAddress; pPRDTable[i].reserved = 0; i++; }while((tmpSg++->wSgFlag & SG_FLAG_EOT)==0); pUdmaParams->prdLowAddr = (ULONG)fOsPhysicalAddress(pPRDTable); if ((pUdmaParams->numOfSectors == 256) && (pMvSataChannel->lba48Address == MV_FALSE)) { pUdmaParams->numOfSectors = 0; } pCmd->uScratch.sata_param.prdAddr = (PVOID)pPRDTable; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK) { queue_failed: switch (result) { case MV_QUEUE_COMMAND_RESULT_BAD_LBA_ADDRESS: MV_ERROR("IAL Error: Edma Queue command failed. Bad LBA " "LBA[31:0](0x%08x)\n", pUdmaParams->lowLBAAddress); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_QUEUED_MODE_DISABLED: MV_ERROR("IAL Error: Edma Queue command failed. EDMA" " disabled adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); mvSataEnableChannelDma(pMvSataAdapter,channel); pCmd->Result = RETURN_IDE_ERROR; break; case MV_QUEUE_COMMAND_RESULT_FULL: MV_ERROR("IAL Error: Edma Queue command failed. Queue is" " Full adapter %d channel %d\n", pMvSataAdapter->adapterId, channel); pCmd->Result = RETURN_DEVICE_BUSY; break; case MV_QUEUE_COMMAND_RESULT_BAD_PARAMS: MV_ERROR("IAL Error: Edma Queue command failed. (Bad " "Params), pMvSataAdapter: %p, pSataChannel: %p.\n", pMvSataAdapter, pMvSataAdapter->sataChannel[channel]); pCmd->Result = RETURN_IDE_ERROR; break; default: MV_ERROR("IAL Error: Bad result value (%d) from queue" " command\n", result); pCmd->Result = RETURN_IDE_ERROR; } if(pPRDTable) FreePRDTable(pMvSataAdapter->IALData,pPRDTable); CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); } pDevice->QueueLength++; return; case IDE_COMMAND_VERIFY: commandInfo.type = MV_QUEUED_COMMAND_TYPE_NONE_UDMA; pNoUdmaParams->bufPtr = NULL; pNoUdmaParams->callBack = CommandCompletionCB; pNoUdmaParams->commandId = (MV_VOID_PTR)pCmd; pNoUdmaParams->count = 0; pNoUdmaParams->features = 0; pNoUdmaParams->protocolType = MV_NON_UDMA_PROTOCOL_NON_DATA; pCmd->uScratch.sata_param.cmd_priv = 1; if (pMvSataChannel->lba48Address == MV_TRUE){ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS_EXT; pNoUdmaParams->isEXT = MV_TRUE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(((Lba & 0xff000000) >> 16)| (Lba & 0xff)); pNoUdmaParams->sectorCount = nSector; pNoUdmaParams->device = 0x40; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } return; } else{ pNoUdmaParams->command = MV_ATA_COMMAND_READ_VERIFY_SECTORS; pNoUdmaParams->isEXT = MV_FALSE; pNoUdmaParams->lbaHigh = (MV_U16)((Lba & 0xff0000) >> 16); pNoUdmaParams->lbaMid = (MV_U16)((Lba & 0xff00) >> 8); pNoUdmaParams->lbaLow = (MV_U16)(Lba & 0xff); pNoUdmaParams->sectorCount = 0xff & nSector; pNoUdmaParams->device = (MV_U8)(0x40 | ((Lba & 0xf000000) >> 24)); pNoUdmaParams->callBack = CommandCompletionCB; result = mvSataQueueCommand(pMvSataAdapter, channel, &commandInfo); /*FIXME: how about the commands already queued? but marvel also forgets to consider this*/ if (result != MV_QUEUE_COMMAND_RESULT_OK){ goto queue_failed; } } break; default: pCmd->Result = RETURN_INVALID_REQUEST; CallAfterReturn(_VBUS_P (DPC_PROC)pCmd->pfnCompletion, pCmd); break; } } /********************************************************** * * Probe the hostadapter. * **********************************************************/ static int hpt_probe(device_t dev) { if ((pci_get_vendor(dev) == MV_SATA_VENDOR_ID) && (pci_get_device(dev) == MV_SATA_DEVICE_ID_5081 #ifdef FOR_DEMO || pci_get_device(dev) == MV_SATA_DEVICE_ID_5080 #endif )) { KdPrintI((CONTROLLER_NAME " found\n")); device_set_desc(dev, CONTROLLER_NAME); return (BUS_PROBE_DEFAULT); } else return(ENXIO); } /*********************************************************** * * Auto configuration: attach and init a host adapter. * ***********************************************************/ static int hpt_attach(device_t dev) { IAL_ADAPTER_T * pAdapter = device_get_softc(dev); int rid; union ccb *ccb; struct cam_devq *devq; struct cam_sim *hpt_vsim; device_printf(dev, "%s Version %s \n", DRIVER_NAME, DRIVER_VERSION); pAdapter->hpt_dev = dev; rid = init_adapter(pAdapter); if (rid) return rid; rid = 0; if ((pAdapter->hpt_irq = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); } if (bus_setup_intr(pAdapter->hpt_dev, pAdapter->hpt_irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hpt_intr, pAdapter, &pAdapter->hpt_intr)) { hpt_printk(("can't set up interrupt\n")); free(pAdapter, M_DEVBUF); return(ENXIO); } if((ccb = (union ccb *)malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK)) != (union ccb*)NULL) { bzero(ccb, sizeof(*ccb)); ccb->ccb_h.pinfo.priority = 1; ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; } else { return ENOMEM; } /* * Create the device queue for our SIM(s). */ if((devq = cam_simq_alloc(8/*MAX_QUEUE_COMM*/)) == NULL) { KdPrint(("ENXIO\n")); return ENOMEM; } /* * Construct our SIM entry */ hpt_vsim = cam_sim_alloc(hpt_action, hpt_poll, __str(PROC_DIR_NAME), pAdapter, device_get_unit(pAdapter->hpt_dev), &pAdapter->lock, 1, 8, devq); if (hpt_vsim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&pAdapter->lock); if (xpt_bus_register(hpt_vsim, dev, 0) != CAM_SUCCESS) { cam_sim_free(hpt_vsim, /*free devq*/ TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } if(xpt_create_path(&pAdapter->path, /*periph */ NULL, cam_sim_path(hpt_vsim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(hpt_vsim)); cam_sim_free(hpt_vsim, /*free_devq*/TRUE); mtx_unlock(&pAdapter->lock); hpt_vsim = NULL; return ENXIO; } mtx_unlock(&pAdapter->lock); xpt_setup_ccb(&(ccb->ccb_h), pAdapter->path, /*priority*/5); ccb->ccb_h.func_code = XPT_SASYNC_CB; ccb->csa.event_enable = AC_LOST_DEVICE; ccb->csa.callback = hpt_async; ccb->csa.callback_arg = hpt_vsim; xpt_action((union ccb *)ccb); free(ccb, M_DEVBUF); if (device_get_unit(dev) == 0) { /* Start the work thread. XXX */ launch_worker_thread(); } return 0; } static int hpt_detach(device_t dev) { return (EBUSY); } /*************************************************************** * The poll function is used to simulate the interrupt when * the interrupt subsystem is not functioning. * ***************************************************************/ static void hpt_poll(struct cam_sim *sim) { IAL_ADAPTER_T *pAdapter; pAdapter = cam_sim_softc(sim); hpt_intr_locked((void *)cam_sim_softc(sim)); } /**************************************************************** * Name: hpt_intr * Description: Interrupt handler. ****************************************************************/ static void hpt_intr(void *arg) { IAL_ADAPTER_T *pAdapter; pAdapter = arg; mtx_lock(&pAdapter->lock); hpt_intr_locked(pAdapter); mtx_unlock(&pAdapter->lock); } static void hpt_intr_locked(IAL_ADAPTER_T *pAdapter) { mtx_assert(&pAdapter->lock, MA_OWNED); /* KdPrintI(("----- Entering Isr() -----\n")); */ if (mvSataInterruptServiceRoutine(&pAdapter->mvSataAdapter) == MV_TRUE) { _VBUS_INST(&pAdapter->VBus) CheckPendingCall(_VBUS_P0); } /* KdPrintI(("----- Leaving Isr() -----\n")); */ } /********************************************************** * Asynchronous Events *********************************************************/ #if (!defined(UNREFERENCED_PARAMETER)) #define UNREFERENCED_PARAMETER(x) (void)(x) #endif static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { /* debug XXXX */ panic("Here"); UNREFERENCED_PARAMETER(callback_arg); UNREFERENCED_PARAMETER(code); UNREFERENCED_PARAMETER(path); UNREFERENCED_PARAMETER(arg); } static void FlushAdapter(IAL_ADAPTER_T *pAdapter) { int i; hpt_printk(("flush all devices\n")); /* flush all devices */ for (i=0; iVBus.pVDevice[i]; if(pVDev) fFlushVDev(pVDev); } } static int hpt_shutdown(device_t dev) { IAL_ADAPTER_T *pAdapter; pAdapter = device_get_softc(dev); EVENTHANDLER_DEREGISTER(shutdown_final, pAdapter->eh); mtx_lock(&pAdapter->lock); FlushAdapter(pAdapter); mtx_unlock(&pAdapter->lock); /* give the flush some time to happen, *otherwise "shutdown -p now" will make file system corrupted */ DELAY(1000 * 1000 * 5); return 0; } void Check_Idle_Call(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) if (mWaitingForIdle(_VBUS_P0)) { CheckIdleCall(_VBUS_P0); #ifdef SUPPORT_ARRAY { int i; PVDevice pArray; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){ if ((pArray=ArrayTables(i))->u.array.dArStamp==0) continue; else if (pArray->u.array.rf_auto_rebuild) { KdPrint(("auto rebuild.\n")); pArray->u.array.rf_auto_rebuild = 0; hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE); } } } #endif } /* launch the awaiting commands blocked by mWaitingForIdle */ while(pAdapter->pending_Q!= NULL) { _VBUS_INST(&pAdapter->VBus) union ccb *ccb = (union ccb *)pAdapter->pending_Q->ccb_h.ccb_ccb_ptr; hpt_free_ccb(&pAdapter->pending_Q, ccb); CallAfterReturn(_VBUS_P (DPC_PROC)OsSendCommand, ccb); } } static void ccb_done(union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T * pAdapter = pmap->pAdapter; KdPrintI(("ccb_done: ccb %p status %x\n", ccb, ccb->ccb_h.status)); dmamap_put(pmap); xpt_done(ccb); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) { if(DPC_Request_Nums == 0) Check_Idle_Call(pAdapter); wakeup(pAdapter); } } /**************************************************************** * Name: hpt_action * Description: Process a queued command from the CAM layer. * Parameters: sim - Pointer to SIM object * ccb - Pointer to SCSI command structure. ****************************************************************/ void hpt_action(struct cam_sim *sim, union ccb *ccb) { IAL_ADAPTER_T * pAdapter = (IAL_ADAPTER_T *) cam_sim_softc(sim); PBUS_DMAMAP pmap; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("hpt_action\n")); KdPrint(("hpt_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ { /* ccb->ccb_h.path_id is not our bus id - don't check it */ if (ccb->ccb_h.target_lun) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (ccb->ccb_h.target_id >= MAX_VDEVICE_PER_VBUS || pAdapter->VBus.pVDevice[ccb->ccb_h.target_id]==0) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if (pAdapter->outstandingCommands==0 && DPC_Request_Nums==0) Check_Idle_Call(pAdapter); pmap = dmamap_get(pAdapter); HPT_ASSERT(pmap); ccb->ccb_adapter = pmap; memset((void *)pmap->psg, 0, sizeof(pmap->psg)); if (mWaitingForIdle(_VBUS_P0)) hpt_queue_ccb(&pAdapter->pending_Q, ccb); else OsSendCommand(_VBUS_P ccb); /* KdPrint(("leave scsiio\n")); */ break; } case XPT_RESET_BUS: KdPrint(("reset bus\n")); fResetVBus(_VBUS_P0); xpt_done(ccb); break; case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: /* XXX Implement */ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; /* Not necessary to reset bus */ cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = MAX_VDEVICE_PER_VBUS; cpi->max_lun = 0; cpi->initiator_id = MAX_VDEVICE_PER_VBUS; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: KdPrint(("invalid cmd\n")); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } /* KdPrint(("leave hpt_action..............\n")); */ } /* shall be called at lock_driver() */ static void hpt_queue_ccb(union ccb **ccb_Q, union ccb *ccb) { if(*ccb_Q == NULL) ccb->ccb_h.ccb_ccb_ptr = ccb; else { ccb->ccb_h.ccb_ccb_ptr = (*ccb_Q)->ccb_h.ccb_ccb_ptr; (*ccb_Q)->ccb_h.ccb_ccb_ptr = (char *)ccb; } *ccb_Q = ccb; } /* shall be called at lock_driver() */ static void hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb) { union ccb *TempCCB; TempCCB = *ccb_Q; if(ccb->ccb_h.ccb_ccb_ptr == ccb) /*it means SCpnt is the last one in CURRCMDs*/ *ccb_Q = NULL; else { while(TempCCB->ccb_h.ccb_ccb_ptr != (char *)ccb) TempCCB = (union ccb *)TempCCB->ccb_h.ccb_ccb_ptr; TempCCB->ccb_h.ccb_ccb_ptr = ccb->ccb_h.ccb_ccb_ptr; if(*ccb_Q == ccb) *ccb_Q = TempCCB; } } #ifdef SUPPORT_ARRAY /*************************************************************************** * Function: hpt_worker_thread * Description: Do background rebuilding. Execute in kernel thread context. * Returns: None ***************************************************************************/ static void hpt_worker_thread(void) { for(;;) { mtx_lock(&DpcQueue_Lock); while (DpcQueue_First!=DpcQueue_Last) { ST_HPT_DPC p; p = DpcQueue[DpcQueue_First]; DpcQueue_First++; DpcQueue_First %= MAX_DPC; DPC_Request_Nums++; mtx_unlock(&DpcQueue_Lock); p.dpc(p.pAdapter, p.arg, p.flags); mtx_lock(&p.pAdapter->lock); mtx_lock(&DpcQueue_Lock); DPC_Request_Nums--; /* since we may have prevented Check_Idle_Call, do it here */ if (DPC_Request_Nums==0) { if (p.pAdapter->outstandingCommands == 0) { _VBUS_INST(&p.pAdapter->VBus); Check_Idle_Call(p.pAdapter); CheckPendingCall(_VBUS_P0); } } mtx_unlock(&p.pAdapter->lock); mtx_unlock(&DpcQueue_Lock); /*Schedule out*/ if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) { /* abort rebuilding process. */ IAL_ADAPTER_T *pAdapter; PVDevice pArray; PVBus _vbus_p; int i; sx_slock(&hptmv_list_lock); pAdapter = gIal_Adapter; while(pAdapter != 0){ mtx_lock(&pAdapter->lock); _vbus_p = &pAdapter->VBus; for (i=0;iu.array.dArStamp==0) continue; else if (pArray->u.array.rf_rebuilding || pArray->u.array.rf_verifying || pArray->u.array.rf_initializing) { pArray->u.array.rf_abort_rebuild = 1; } } mtx_unlock(&pAdapter->lock); pAdapter = pAdapter->next; } sx_sunlock(&hptmv_list_lock); } mtx_lock(&DpcQueue_Lock); } mtx_unlock(&DpcQueue_Lock); /*Remove this debug option*/ /* #ifdef DEBUG if (SIGISMEMBER(curproc->p_siglist, SIGSTOP)) pause("hptrdy", 2*hz); #endif */ kproc_suspend_check(curproc); pause("-", 2*hz); /* wait for something to do */ } } static struct proc *hptdaemonproc; static struct kproc_desc hpt_kp = { "hpt_wt", hpt_worker_thread, &hptdaemonproc }; /*Start this thread in the hpt_attach, to prevent kernel from loading it without our controller.*/ static void launch_worker_thread(void) { IAL_ADAPTER_T *pAdapTemp; kproc_start(&hpt_kp); sx_slock(&hptmv_list_lock); for (pAdapTemp = gIal_Adapter; pAdapTemp; pAdapTemp = pAdapTemp->next) { _VBUS_INST(&pAdapTemp->VBus) int i; PVDevice pVDev; for(i = 0; i < MAX_ARRAY_PER_VBUS; i++) if ((pVDev=ArrayTables(i))->u.array.dArStamp==0) continue; else{ if (pVDev->u.array.rf_need_rebuild && !pVDev->u.array.rf_rebuilding) hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapTemp, pVDev, (UCHAR)((pVDev->u.array.CriticalMembers || pVDev->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY)); } } sx_sunlock(&hptmv_list_lock); /* * hpt_worker_thread needs to be suspended after shutdown sync, when fs sync finished. */ EVENTHANDLER_REGISTER(shutdown_post_sync, kproc_shutdown, hptdaemonproc, SHUTDOWN_PRI_LAST); } /* *SYSINIT(hptwt, SI_SUB_KTHREAD_IDLE, SI_ORDER_FIRST, launch_worker_thread, NULL); */ #endif /********************************************************************************/ int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical) { union ccb *ccb = (union ccb *)pCmd->pOrgCommand; if (logical) { pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr; pSg->wSgSize = ccb->csio.dxfer_len; pSg->wSgFlag = SG_FLAG_EOT; return TRUE; } /* since we have provided physical sg, nobody will ask us to build physical sg */ HPT_ASSERT(0); return FALSE; } /*******************************************************************************/ ULONG HPTLIBAPI GetStamp(void) { /* * the system variable, ticks, can't be used since it hasn't yet been active * when our driver starts (ticks==0, it's a invalid stamp value) */ ULONG stamp; do { stamp = random(); } while (stamp==0); return stamp; } static void SetInquiryData(PINQUIRYDATA inquiryData, PVDevice pVDev) { int i; IDENTIFY_DATA2 *pIdentify = (IDENTIFY_DATA2*)pVDev->u.disk.mv->identifyDevice; inquiryData->DeviceType = T_DIRECT; /*DIRECT_ACCESS_DEVICE*/ inquiryData->AdditionalLength = (UCHAR)(sizeof(INQUIRYDATA) - 5); #ifndef SERIAL_CMDS inquiryData->CommandQueue = 1; #endif switch(pVDev->VDeviceType) { case VD_SINGLE_DISK: case VD_ATAPI: case VD_REMOVABLE: /* Set the removable bit, if applicable. */ if ((pVDev->u.disk.df_removable_drive) || (pIdentify->GeneralConfiguration & 0x80)) inquiryData->RemovableMedia = 1; /* Fill in vendor identification fields. */ for (i = 0; i < 20; i += 2) { inquiryData->VendorId[i] = ((PUCHAR)pIdentify->ModelNumber)[i + 1]; inquiryData->VendorId[i+1] = ((PUCHAR)pIdentify->ModelNumber)[i]; } /* Initialize unused portion of product id. */ for (i = 0; i < 4; i++) inquiryData->ProductId[12+i] = ' '; /* firmware revision */ for (i = 0; i < 4; i += 2) { inquiryData->ProductRevisionLevel[i] = ((PUCHAR)pIdentify->FirmwareRevision)[i+1]; inquiryData->ProductRevisionLevel[i+1] = ((PUCHAR)pIdentify->FirmwareRevision)[i]; } break; default: memcpy(&inquiryData->VendorId, "RR18xx ", 8); #ifdef SUPPORT_ARRAY switch(pVDev->VDeviceType){ case VD_RAID_0: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 1/0 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 0 Array ", 16); break; case VD_RAID_1: if ((pVDev->u.array.pMember[0] && mIsArray(pVDev->u.array.pMember[0])) || (pVDev->u.array.pMember[1] && mIsArray(pVDev->u.array.pMember[1]))) memcpy(&inquiryData->ProductId, "RAID 0/1 Array ", 16); else memcpy(&inquiryData->ProductId, "RAID 1 Array ", 16); break; case VD_RAID_5: memcpy(&inquiryData->ProductId, "RAID 5 Array ", 16); break; case VD_JBOD: memcpy(&inquiryData->ProductId, "JBOD Array ", 16); break; } #endif memcpy(&inquiryData->ProductRevisionLevel, "3.00", 4); break; } } static void hpt_timeout(void *arg) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)((union ccb *)arg)->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; _VBUS_INST(&pAdapter->VBus) mtx_assert(&pAdapter->lock, MA_OWNED); fResetVBus(_VBUS_P0); } static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { PCommand pCmd = (PCommand)arg; union ccb *ccb = pCmd->pOrgCommand; struct ccb_hdr *ccb_h = &ccb->ccb_h; PBUS_DMAMAP pmap = (PBUS_DMAMAP) ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; FPSCAT_GATH psg = pCmd->pSgTable; int idx; _VBUS_INST(pVDev->pVBus) HPT_ASSERT(pCmd->cf_physical_sg); if (error) panic("busdma error"); HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS); if (nsegs != 0) { for (idx = 0; idx < nsegs; idx++, psg++) { psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr; psg->wSgSize = segs[idx].ds_len; psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0; /* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */ } /* psg[-1].wSgFlag = SG_FLAG_EOT; */ if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE); } } callout_reset(&pmap->timeout, 20 * hz, hpt_timeout, ccb); pVDev->pfnSendCommand(_VBUS_P pCmd); CheckPendingCall(_VBUS_P0); } static void HPTLIBAPI OsSendCommand(_VBUS_ARG union ccb *ccb) { PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; struct ccb_hdr *ccb_h = &ccb->ccb_h; struct ccb_scsiio *csio = &ccb->csio; PVDevice pVDev = pAdapter->VBus.pVDevice[ccb_h->target_id]; KdPrintI(("OsSendCommand: ccb %p cdb %x-%x-%x\n", ccb, *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[0], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[4], *(ULONG *)&ccb->csio.cdb_io.cdb_bytes[8] )); pAdapter->outstandingCommands++; if (pVDev == NULL || pVDev->vf_online == 0) { ccb->ccb_h.status = CAM_REQ_INVALID; ccb_done(ccb); goto Command_Complished; } switch(ccb->csio.cdb_io.cdb_bytes[0]) { case TEST_UNIT_READY: case START_STOP_UNIT: case SYNCHRONIZE_CACHE: /* FALLTHROUGH */ ccb->ccb_h.status = CAM_REQ_CMP; break; case INQUIRY: ZeroMemory(ccb->csio.data_ptr, ccb->csio.dxfer_len); SetInquiryData((PINQUIRYDATA)ccb->csio.data_ptr, pVDev); ccb_h->status = CAM_REQ_CMP; break; case READ_CAPACITY: { UCHAR *rbuf=csio->data_ptr; unsigned int cap; if (pVDev->VDeviceCapacity > 0xfffffffful) { cap = 0xfffffffful; } else { cap = pVDev->VDeviceCapacity - 1; } rbuf[0] = (UCHAR)(cap>>24); rbuf[1] = (UCHAR)(cap>>16); rbuf[2] = (UCHAR)(cap>>8); rbuf[3] = (UCHAR)cap; /* Claim 512 byte blocks (big-endian). */ rbuf[4] = 0; rbuf[5] = 0; rbuf[6] = 2; rbuf[7] = 0; ccb_h->status = CAM_REQ_CMP; break; } case 0x9e: /*SERVICE_ACTION_IN*/ { UCHAR *rbuf = csio->data_ptr; LBA_T cap = pVDev->VDeviceCapacity - 1; rbuf[0] = (UCHAR)(cap>>56); rbuf[1] = (UCHAR)(cap>>48); rbuf[2] = (UCHAR)(cap>>40); rbuf[3] = (UCHAR)(cap>>32); rbuf[4] = (UCHAR)(cap>>24); rbuf[5] = (UCHAR)(cap>>16); rbuf[6] = (UCHAR)(cap>>8); rbuf[7] = (UCHAR)cap; rbuf[8] = 0; rbuf[9] = 0; rbuf[10] = 2; rbuf[11] = 0; ccb_h->status = CAM_REQ_CMP; break; } case READ_6: case WRITE_6: case READ_10: case WRITE_10: case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ case 0x13: case 0x2f: { UCHAR Cdb[16]; UCHAR CdbLength; _VBUS_INST(pVDev->pVBus) PCommand pCmd = AllocateCommand(_VBUS_P0); int error; HPT_ASSERT(pCmd); CdbLength = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, Cdb, CdbLength); } else { KdPrintE(("ERROR!!!\n")); ccb->ccb_h.status = CAM_REQ_INVALID; break; } } else { bcopy(csio->cdb_io.cdb_bytes, Cdb, CdbLength); } pCmd->pOrgCommand = ccb; pCmd->pVDevice = pVDev; pCmd->pfnCompletion = fOsCommandDone; pCmd->pfnBuildSgl = fOsBuildSgl; pCmd->pSgTable = pmap->psg; switch (Cdb[0]) { case READ_6: case WRITE_6: case 0x13: pCmd->uCmd.Ide.Lba = ((ULONG)Cdb[1] << 16) | ((ULONG)Cdb[2] << 8) | (ULONG)Cdb[3]; pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[4]; break; case 0x88: /* READ_16 */ case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Lba = (HPT_U64)Cdb[2] << 56 | (HPT_U64)Cdb[3] << 48 | (HPT_U64)Cdb[4] << 40 | (HPT_U64)Cdb[5] << 32 | (HPT_U64)Cdb[6] << 24 | (HPT_U64)Cdb[7] << 16 | (HPT_U64)Cdb[8] << 8 | (HPT_U64)Cdb[9]; pCmd->uCmd.Ide.nSectors = (USHORT)Cdb[12] << 8 | (USHORT)Cdb[13]; break; default: pCmd->uCmd.Ide.Lba = (ULONG)Cdb[5] | ((ULONG)Cdb[4] << 8) | ((ULONG)Cdb[3] << 16) | ((ULONG)Cdb[2] << 24); pCmd->uCmd.Ide.nSectors = (USHORT) Cdb[8] | ((USHORT)Cdb[7]<<8); break; } switch (Cdb[0]) { case READ_6: case READ_10: case 0x88: /* READ_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_READ; pCmd->cf_data_in = 1; break; case WRITE_6: case WRITE_10: case 0x8a: /* WRITE_16 */ pCmd->uCmd.Ide.Command = IDE_COMMAND_WRITE; pCmd->cf_data_out = 1; break; case 0x13: case 0x2f: pCmd->uCmd.Ide.Command = IDE_COMMAND_VERIFY; break; } /*///////////////////////// */ pCmd->cf_physical_sg = 1; error = bus_dmamap_load_ccb(pAdapter->io_dma_parent, pmap->dma_map, ccb, hpt_io_dmamap_callback, pCmd, BUS_DMA_WAITOK ); KdPrint(("bus_dmamap_load return %d\n", error)); if (error && error!=EINPROGRESS) { hpt_printk(("bus_dmamap_load error %d\n", error)); FreeCommand(_VBUS_P pCmd); ccb->ccb_h.status = CAM_REQ_CMP_ERR; dmamap_put(pmap); pAdapter->outstandingCommands--; if (pAdapter->outstandingCommands == 0) wakeup(pAdapter); xpt_done(ccb); } goto Command_Complished; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ccb_done(ccb); Command_Complished: CheckPendingCall(_VBUS_P0); return; } static void HPTLIBAPI fOsCommandDone(_VBUS_ARG PCommand pCmd) { union ccb *ccb = pCmd->pOrgCommand; PBUS_DMAMAP pmap = (PBUS_DMAMAP)ccb->ccb_adapter; IAL_ADAPTER_T *pAdapter = pmap->pAdapter; KdPrint(("fOsCommandDone(pcmd=%p, result=%d)\n", pCmd, pCmd->Result)); callout_stop(&pmap->timeout); switch(pCmd->Result) { case RETURN_SUCCESS: ccb->ccb_h.status = CAM_REQ_CMP; break; case RETURN_BAD_DEVICE: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case RETURN_DEVICE_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case RETURN_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case RETURN_SELECTION_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case RETURN_RETRY: ccb->ccb_h.status = CAM_BUSY; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } if (pCmd->cf_data_in) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTREAD); } else if (pCmd->cf_data_out) { bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(pAdapter->io_dma_parent, pmap->dma_map); FreeCommand(_VBUS_P pCmd); ccb_done(ccb); } int hpt_queue_dpc(HPT_DPC dpc, IAL_ADAPTER_T * pAdapter, void *arg, UCHAR flags) { int p; mtx_lock(&DpcQueue_Lock); p = (DpcQueue_Last + 1) % MAX_DPC; if (p==DpcQueue_First) { KdPrint(("DPC Queue full!\n")); mtx_unlock(&DpcQueue_Lock); return -1; } DpcQueue[DpcQueue_Last].dpc = dpc; DpcQueue[DpcQueue_Last].pAdapter = pAdapter; DpcQueue[DpcQueue_Last].arg = arg; DpcQueue[DpcQueue_Last].flags = flags; DpcQueue_Last = p; mtx_unlock(&DpcQueue_Lock); return 0; } #ifdef _RAID5N_ /* * Allocate memory above 16M, otherwise we may eat all low memory for ISA devices. * How about the memory for 5081 request/response array and PRD table? */ void *os_alloc_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void *os_alloc_dma_page(_VBUS_ARG0) { return (void *)contigmalloc(0x1000, M_DEVBUF, M_NOWAIT, 0x1000000, 0xffffffff, PAGE_SIZE, 0ul); } void os_free_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void os_free_dma_page(_VBUS_ARG void *p) { contigfree(p, 0x1000, M_DEVBUF); } void DoXor1(ULONG *p0, ULONG *p1, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ = *p1++ ^ *p2++; } void DoXor2(ULONG *p0, ULONG *p2, UINT nBytes) { UINT i; for (i = 0; i < nBytes / 4; i++) *p0++ ^= *p2++; } #endif Index: stable/11/sys/dev/mvs/mvs.c =================================================================== --- stable/11/sys/dev/mvs/mvs.c (revision 314380) +++ stable/11/sys/dev/mvs/mvs.c (revision 314381) @@ -1,2460 +1,2456 @@ /*- * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" #include #include #include #include #include /* local prototypes */ static int mvs_ch_init(device_t dev); static int mvs_ch_deinit(device_t dev); static int mvs_ch_suspend(device_t dev); static int mvs_ch_resume(device_t dev); static void mvs_dmainit(device_t dev); static void mvs_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void mvs_dmafini(device_t dev); static void mvs_slotsalloc(device_t dev); static void mvs_slotsfree(device_t dev); static void mvs_setup_edma_queues(device_t dev); static void mvs_set_edma_mode(device_t dev, enum mvs_edma_mode mode); static void mvs_ch_pm(void *arg); static void mvs_ch_intr_locked(void *data); static void mvs_ch_intr(void *data); static void mvs_reset(device_t dev); static void mvs_softreset(device_t dev, union ccb *ccb); static int mvs_sata_connect(struct mvs_channel *ch); static int mvs_sata_phy_reset(device_t dev); static int mvs_wait(device_t dev, u_int s, u_int c, int t); static void mvs_tfd_read(device_t dev, union ccb *ccb); static void mvs_tfd_write(device_t dev, union ccb *ccb); static void mvs_legacy_intr(device_t dev, int poll); static void mvs_crbq_intr(device_t dev); static void mvs_begin_transaction(device_t dev, union ccb *ccb); static void mvs_legacy_execute_transaction(struct mvs_slot *slot); static void mvs_timeout(struct mvs_slot *slot); static void mvs_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void mvs_requeue_frozen(device_t dev); static void mvs_execute_transaction(struct mvs_slot *slot); static void mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et); static void mvs_issue_recovery(device_t dev); static void mvs_process_read_log(device_t dev, union ccb *ccb); static void mvs_process_request_sense(device_t dev, union ccb *ccb); static void mvsaction(struct cam_sim *sim, union ccb *ccb); static void mvspoll(struct cam_sim *sim); static MALLOC_DEFINE(M_MVS, "MVS driver", "MVS driver data buffers"); #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static int mvs_ch_probe(device_t dev) { device_set_desc_copy(dev, "Marvell SATA channel"); return (BUS_PROBE_DEFAULT); } static int mvs_ch_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(device_get_parent(dev)); struct mvs_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->quirks = ctlr->quirks; mtx_init(&ch->mtx, "MVS channel lock", NULL, MTX_DEF); ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = (ch->quirks & MVS_Q_GENIIE) ? 8192 : 2048; ch->user[i].tags = MVS_MAX_SLOTS; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_AN; } rid = ch->unit; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); mvs_dmainit(dev); mvs_slotsalloc(dev); mvs_ch_init(dev); mtx_lock(&ch->mtx); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, mvs_ch_intr_locked, dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(MVS_MAX_SLOTS - 1); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(mvsaction, mvspoll, "mvsch", ch, device_get_unit(dev), &ch->mtx, 2, (ch->quirks & MVS_Q_GENI) ? 0 : MVS_MAX_SLOTS - 1, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, mvs_ch_pm, dev); } mtx_unlock(&ch->mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int mvs_ch_detach(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); mvs_ch_deinit(dev); mvs_slotsfree(dev); mvs_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int mvs_ch_init(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); uint32_t reg; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, EDMA_IEM, 0); /* Stop EDMA */ ch->curr_mode = MVS_EDMA_UNKNOWN; mvs_set_edma_mode(dev, MVS_EDMA_OFF); /* Clear and configure FIS interrupts. */ ATA_OUTL(ch->r_mem, SATA_FISIC, 0); reg = ATA_INL(ch->r_mem, SATA_FISC); reg |= SATA_FISC_FISWAIT4HOSTRDYEN_B1; ATA_OUTL(ch->r_mem, SATA_FISC, reg); reg = ATA_INL(ch->r_mem, SATA_FISIM); reg |= SATA_FISC_FISWAIT4HOSTRDYEN_B1; ATA_OUTL(ch->r_mem, SATA_FISC, reg); /* Clear SATA error register. */ ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff); /* Clear any outstanding error interrupts. */ ATA_OUTL(ch->r_mem, EDMA_IEC, 0); /* Unmask all error interrupts */ ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT); return (0); } static int mvs_ch_deinit(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); /* Stop EDMA */ mvs_set_edma_mode(dev, MVS_EDMA_OFF); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, EDMA_IEM, 0); return (0); } static int mvs_ch_suspend(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "mvssusp", hz/100); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } mvs_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int mvs_ch_resume(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); mvs_ch_init(dev); mvs_reset(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } struct mvs_dc_cb_args { bus_addr_t maddr; int error; }; static void mvs_dmainit(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); struct mvs_dc_cb_args dcba; /* EDMA command request area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MVS_WORKRQ_SIZE, 1, MVS_WORKRQ_SIZE, 0, NULL, NULL, &ch->dma.workrq_tag)) goto error; if (bus_dmamem_alloc(ch->dma.workrq_tag, (void **)&ch->dma.workrq, 0, &ch->dma.workrq_map)) goto error; if (bus_dmamap_load(ch->dma.workrq_tag, ch->dma.workrq_map, ch->dma.workrq, MVS_WORKRQ_SIZE, mvs_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.workrq_tag, ch->dma.workrq, ch->dma.workrq_map); goto error; } ch->dma.workrq_bus = dcba.maddr; /* EDMA command response area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 256, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MVS_WORKRP_SIZE, 1, MVS_WORKRP_SIZE, 0, NULL, NULL, &ch->dma.workrp_tag)) goto error; if (bus_dmamem_alloc(ch->dma.workrp_tag, (void **)&ch->dma.workrp, 0, &ch->dma.workrp_map)) goto error; if (bus_dmamap_load(ch->dma.workrp_tag, ch->dma.workrp_map, ch->dma.workrp, MVS_WORKRP_SIZE, mvs_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.workrp_tag, ch->dma.workrp, ch->dma.workrp_map); goto error; } ch->dma.workrp_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, MVS_EPRD_MAX, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MVS_SG_ENTRIES * PAGE_SIZE * MVS_MAX_SLOTS, MVS_SG_ENTRIES, MVS_EPRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); mvs_dmafini(dev); } static void mvs_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct mvs_dc_cb_args *dcba = (struct mvs_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void mvs_dmafini(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.workrp_bus) { bus_dmamap_unload(ch->dma.workrp_tag, ch->dma.workrp_map); bus_dmamem_free(ch->dma.workrp_tag, ch->dma.workrp, ch->dma.workrp_map); ch->dma.workrp_bus = 0; ch->dma.workrp = NULL; } if (ch->dma.workrp_tag) { bus_dma_tag_destroy(ch->dma.workrp_tag); ch->dma.workrp_tag = NULL; } if (ch->dma.workrq_bus) { bus_dmamap_unload(ch->dma.workrq_tag, ch->dma.workrq_map); bus_dmamem_free(ch->dma.workrq_tag, ch->dma.workrq, ch->dma.workrq_map); ch->dma.workrq_bus = 0; ch->dma.workrq = NULL; } if (ch->dma.workrq_tag) { bus_dma_tag_destroy(ch->dma.workrq_tag); ch->dma.workrq_tag = NULL; } } static void mvs_slotsalloc(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < MVS_MAX_SLOTS; i++) { struct mvs_slot *slot = &ch->slot[i]; slot->dev = dev; slot->slot = i; slot->state = MVS_SLOT_EMPTY; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void mvs_slotsfree(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < MVS_MAX_SLOTS; i++) { struct mvs_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static void mvs_setup_edma_queues(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); uint64_t work; /* Requests queue. */ work = ch->dma.workrq_bus; ATA_OUTL(ch->r_mem, EDMA_REQQBAH, work >> 32); ATA_OUTL(ch->r_mem, EDMA_REQQIP, work & 0xffffffff); ATA_OUTL(ch->r_mem, EDMA_REQQOP, work & 0xffffffff); bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map, BUS_DMASYNC_PREWRITE); /* Responses queue. */ memset(ch->dma.workrp, 0xff, MVS_WORKRP_SIZE); work = ch->dma.workrp_bus; ATA_OUTL(ch->r_mem, EDMA_RESQBAH, work >> 32); ATA_OUTL(ch->r_mem, EDMA_RESQIP, work & 0xffffffff); ATA_OUTL(ch->r_mem, EDMA_RESQOP, work & 0xffffffff); bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map, BUS_DMASYNC_PREREAD); ch->out_idx = 0; ch->in_idx = 0; } static void mvs_set_edma_mode(device_t dev, enum mvs_edma_mode mode) { struct mvs_channel *ch = device_get_softc(dev); int timeout; uint32_t ecfg, fcfg, hc, ltm, unkn; if (mode == ch->curr_mode) return; /* If we are running, we should stop first. */ if (ch->curr_mode != MVS_EDMA_OFF) { ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EDSEDMA); timeout = 0; while (ATA_INL(ch->r_mem, EDMA_CMD) & EDMA_CMD_EENEDMA) { DELAY(1000); if (timeout++ > 1000) { device_printf(dev, "stopping EDMA engine failed\n"); break; } } } ch->curr_mode = mode; ch->fbs_enabled = 0; ch->fake_busy = 0; /* Report mode to controller. Needed for correct CCC operation. */ MVS_EDMA(device_get_parent(dev), dev, mode); /* Configure new mode. */ ecfg = EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2 | EDMA_CFG_EHOSTQUEUECACHEEN; if (ch->pm_present) { ecfg |= EDMA_CFG_EMASKRXPM; if (ch->quirks & MVS_Q_GENIIE) { ecfg |= EDMA_CFG_EEDMAFBS; ch->fbs_enabled = 1; } } if (ch->quirks & MVS_Q_GENI) ecfg |= EDMA_CFG_ERDBSZ; else if (ch->quirks & MVS_Q_GENII) ecfg |= EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN; if (ch->quirks & MVS_Q_CT) ecfg |= EDMA_CFG_ECUTTHROUGHEN; if (mode != MVS_EDMA_OFF) ecfg |= EDMA_CFG_EEARLYCOMPLETIONEN; if (mode == MVS_EDMA_QUEUED) ecfg |= EDMA_CFG_EQUE; else if (mode == MVS_EDMA_NCQ) ecfg |= EDMA_CFG_ESATANATVCMDQUE; ATA_OUTL(ch->r_mem, EDMA_CFG, ecfg); mvs_setup_edma_queues(dev); if (ch->quirks & MVS_Q_GENIIE) { /* Configure FBS-related registers */ fcfg = ATA_INL(ch->r_mem, SATA_FISC); ltm = ATA_INL(ch->r_mem, SATA_LTM); hc = ATA_INL(ch->r_mem, EDMA_HC); if (ch->fbs_enabled) { fcfg |= SATA_FISC_FISDMAACTIVATESYNCRESP; if (mode == MVS_EDMA_NCQ) { fcfg &= ~SATA_FISC_FISWAIT4HOSTRDYEN_B0; hc &= ~EDMA_IE_EDEVERR; } else { fcfg |= SATA_FISC_FISWAIT4HOSTRDYEN_B0; hc |= EDMA_IE_EDEVERR; } ltm |= (1 << 8); } else { fcfg &= ~SATA_FISC_FISDMAACTIVATESYNCRESP; fcfg &= ~SATA_FISC_FISWAIT4HOSTRDYEN_B0; hc |= EDMA_IE_EDEVERR; ltm &= ~(1 << 8); } ATA_OUTL(ch->r_mem, SATA_FISC, fcfg); ATA_OUTL(ch->r_mem, SATA_LTM, ltm); ATA_OUTL(ch->r_mem, EDMA_HC, hc); /* This is some magic, required to handle several DRQs * with basic DMA. */ unkn = ATA_INL(ch->r_mem, EDMA_UNKN_RESD); if (mode == MVS_EDMA_OFF) unkn |= 1; else unkn &= ~1; ATA_OUTL(ch->r_mem, EDMA_UNKN_RESD, unkn); } /* Run EDMA. */ if (mode != MVS_EDMA_OFF) ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EENEDMA); } devclass_t mvs_devclass; devclass_t mvsch_devclass; static device_method_t mvsch_methods[] = { DEVMETHOD(device_probe, mvs_ch_probe), DEVMETHOD(device_attach, mvs_ch_attach), DEVMETHOD(device_detach, mvs_ch_detach), DEVMETHOD(device_suspend, mvs_ch_suspend), DEVMETHOD(device_resume, mvs_ch_resume), { 0, 0 } }; static driver_t mvsch_driver = { "mvsch", mvsch_methods, sizeof(struct mvs_channel) }; DRIVER_MODULE(mvsch, mvs, mvsch_driver, mvsch_devclass, 0, 0); DRIVER_MODULE(mvsch, sata, mvsch_driver, mvsch_devclass, 0, 0); static void mvs_phy_check_events(device_t dev, u_int32_t serr) { struct mvs_channel *ch = device_get_softc(dev); if (ch->pm_level == 0) { u_int32_t status = ATA_INL(ch->r_mem, SATA_SS); union ccb *ccb; if (bootverbose) { if (((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_ONLINE) && ((status & SATA_SS_SPD_MASK) != SATA_SS_SPD_NO_SPEED) && ((status & SATA_SS_IPM_MASK) == SATA_SS_IPM_ACTIVE)) { device_printf(dev, "CONNECT requested\n"); } else device_printf(dev, "DISCONNECT requested\n"); } mvs_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } } static void mvs_notify_events(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); struct cam_path *dpath; uint32_t fis; int d; /* Try to read PMP field from SDB FIS. Present only for Gen-IIe. */ fis = ATA_INL(ch->r_mem, SATA_FISDW0); if ((fis & 0x80ff) == 0x80a1) d = (fis & 0x0f00) >> 8; else d = ch->pm_present ? 15 : 0; if (bootverbose) device_printf(dev, "SNTF %d\n", d); if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), d, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } static void mvs_ch_intr_locked(void *data) { struct mvs_intr_arg *arg = (struct mvs_intr_arg *)data; device_t dev = (device_t)arg->arg; struct mvs_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); mvs_ch_intr(data); mtx_unlock(&ch->mtx); } static void mvs_ch_pm(void *arg) { device_t dev = (device_t)arg; struct mvs_channel *ch = device_get_softc(dev); uint32_t work; if (ch->numrslots != 0) return; /* If we are idle - request power state transition. */ work = ATA_INL(ch->r_mem, SATA_SC); work &= ~SATA_SC_SPM_MASK; if (ch->pm_level == 4) work |= SATA_SC_SPM_PARTIAL; else work |= SATA_SC_SPM_SLUMBER; ATA_OUTL(ch->r_mem, SATA_SC, work); } static void mvs_ch_pm_wake(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); uint32_t work; int timeout = 0; work = ATA_INL(ch->r_mem, SATA_SS); if (work & SATA_SS_IPM_ACTIVE) return; /* If we are not in active state - request power state transition. */ work = ATA_INL(ch->r_mem, SATA_SC); work &= ~SATA_SC_SPM_MASK; work |= SATA_SC_SPM_ACTIVE; ATA_OUTL(ch->r_mem, SATA_SC, work); /* Wait for transition to happen. */ while ((ATA_INL(ch->r_mem, SATA_SS) & SATA_SS_IPM_ACTIVE) == 0 && timeout++ < 100) { DELAY(100); } } static void mvs_ch_intr(void *data) { struct mvs_intr_arg *arg = (struct mvs_intr_arg *)data; device_t dev = (device_t)arg->arg; struct mvs_channel *ch = device_get_softc(dev); uint32_t iec, serr = 0, fisic = 0; enum mvs_err_type et; int i, ccs, port = -1, selfdis = 0; int edma = (ch->numtslots != 0 || ch->numdslots != 0); /* New item in response queue. */ if ((arg->cause & 2) && edma) mvs_crbq_intr(dev); /* Some error or special event. */ if (arg->cause & 1) { iec = ATA_INL(ch->r_mem, EDMA_IEC); if (iec & EDMA_IE_SERRINT) { serr = ATA_INL(ch->r_mem, SATA_SE); ATA_OUTL(ch->r_mem, SATA_SE, serr); } /* EDMA self-disabled due to error. */ if (iec & EDMA_IE_ESELFDIS) selfdis = 1; /* Transport interrupt. */ if (iec & EDMA_IE_ETRANSINT) { /* For Gen-I this bit means self-disable. */ if (ch->quirks & MVS_Q_GENI) selfdis = 1; /* For Gen-II this bit means SDB-N. */ else if (ch->quirks & MVS_Q_GENII) fisic = SATA_FISC_FISWAIT4HOSTRDYEN_B1; else /* For Gen-IIe - read FIS interrupt cause. */ fisic = ATA_INL(ch->r_mem, SATA_FISIC); } if (selfdis) ch->curr_mode = MVS_EDMA_UNKNOWN; ATA_OUTL(ch->r_mem, EDMA_IEC, ~iec); /* Interface errors or Device error. */ if (iec & (0xfc1e9000 | EDMA_IE_EDEVERR)) { port = -1; if (ch->numpslots != 0) { ccs = 0; } else { if (ch->quirks & MVS_Q_GENIIE) ccs = EDMA_S_EIOID(ATA_INL(ch->r_mem, EDMA_S)); else ccs = EDMA_S_EDEVQUETAG(ATA_INL(ch->r_mem, EDMA_S)); /* Check if error is one-PMP-port-specific, */ if (ch->fbs_enabled) { /* Which ports were active. */ for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } /* If several ports were active and EDMA still enabled - * other ports are probably unaffected and may continue. */ if (port == -2 && !selfdis) { uint16_t p = ATA_INL(ch->r_mem, SATA_SATAITC) >> 16; port = ffs(p) - 1; if (port != (fls(p) - 1)) port = -2; } } } mvs_requeue_frozen(dev); for (i = 0; i < MVS_MAX_SLOTS; i++) { /* XXX: reqests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (iec & EDMA_IE_EDEVERR) { /* Device error. */ if (port != -2) { if (ch->numtslots == 0) { /* Untagged operation. */ if (i == ccs) et = MVS_ERR_TFE; else et = MVS_ERR_INNOCENT; } else { /* Tagged operation. */ et = MVS_ERR_NCQ; } } else { et = MVS_ERR_TFE; ch->fatalerr = 1; } } else if (iec & 0xfc1e9000) { if (ch->numtslots == 0 && i != ccs && port != -2) et = MVS_ERR_INNOCENT; else et = MVS_ERR_SATA; } else et = MVS_ERR_INVALID; mvs_end_transaction(&ch->slot[i], et); } } /* Process SDB-N. */ if (fisic & SATA_FISC_FISWAIT4HOSTRDYEN_B1) mvs_notify_events(dev); if (fisic) ATA_OUTL(ch->r_mem, SATA_FISIC, ~fisic); /* Process hot-plug. */ if ((iec & (EDMA_IE_EDEVDIS | EDMA_IE_EDEVCON)) || (serr & SATA_SE_PHY_CHANGED)) mvs_phy_check_events(dev, serr); } /* Legacy mode device interrupt. */ if ((arg->cause & 2) && !edma) mvs_legacy_intr(dev, arg->cause & 4); } static uint8_t mvs_getstatus(device_t dev, int clear) { struct mvs_channel *ch = device_get_softc(dev); uint8_t status = ATA_INB(ch->r_mem, clear ? ATA_STATUS : ATA_ALTSTAT); if (ch->fake_busy) { if (status & (ATA_S_BUSY | ATA_S_DRQ | ATA_S_ERROR)) ch->fake_busy = 0; else status |= ATA_S_BUSY; } return (status); } static void mvs_legacy_intr(device_t dev, int poll) { struct mvs_channel *ch = device_get_softc(dev); struct mvs_slot *slot = &ch->slot[0]; /* PIO is always in slot 0. */ union ccb *ccb = slot->ccb; enum mvs_err_type et = MVS_ERR_NONE; int port; u_int length, resid, size; uint8_t buf[2]; uint8_t status, ireason; /* Clear interrupt and get status. */ status = mvs_getstatus(dev, 1); if (slot->state < MVS_SLOT_RUNNING) return; port = ccb->ccb_h.target_id & 0x0f; /* Wait a bit for late !BUSY status update. */ if (status & ATA_S_BUSY) { if (poll) return; DELAY(100); if ((status = mvs_getstatus(dev, 1)) & ATA_S_BUSY) { DELAY(1000); if ((status = mvs_getstatus(dev, 1)) & ATA_S_BUSY) return; } } /* If we got an error, we are done. */ if (status & ATA_S_ERROR) { et = MVS_ERR_TFE; goto end_finished; } if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* ATA PIO */ ccb->ataio.res.status = status; /* Are we moving data? */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { /* If data read command - get them. */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) { device_printf(dev, "timeout waiting for read DRQ\n"); et = MVS_ERR_TIMEOUT; xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); goto end_finished; } ATA_INSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->ataio.data_ptr + ch->donecount), ch->transfersize / 2); } /* Update how far we've gotten. */ ch->donecount += ch->transfersize; /* Do we need more? */ if (ccb->ataio.dxfer_len > ch->donecount) { /* Set this transfer size according to HW capabilities */ ch->transfersize = min(ccb->ataio.dxfer_len - ch->donecount, ch->transfersize); /* If data write command - put them */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) { device_printf(dev, "timeout waiting for write DRQ\n"); et = MVS_ERR_TIMEOUT; xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); goto end_finished; } ATA_OUTSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->ataio.data_ptr + ch->donecount), ch->transfersize / 2); return; } /* If data read command, return & wait for interrupt */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) return; } } } else if (ch->basic_dma) { /* ATAPI DMA */ if (status & ATA_S_DWF) et = MVS_ERR_TFE; else if (ATA_INL(ch->r_mem, DMA_S) & DMA_S_ERR) et = MVS_ERR_TFE; /* Stop basic DMA. */ ATA_OUTL(ch->r_mem, DMA_C, 0); goto end_finished; } else { /* ATAPI PIO */ length = ATA_INB(ch->r_mem,ATA_CYL_LSB) | (ATA_INB(ch->r_mem,ATA_CYL_MSB) << 8); size = min(ch->transfersize, length); ireason = ATA_INB(ch->r_mem,ATA_IREASON); switch ((ireason & (ATA_I_CMD | ATA_I_IN)) | (status & ATA_S_DRQ)) { case ATAPI_P_CMDOUT: device_printf(dev, "ATAPI CMDOUT\n"); /* Return wait for interrupt */ return; case ATAPI_P_WRITE: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { device_printf(dev, "trying to write on read buffer\n"); et = MVS_ERR_TFE; goto end_finished; break; } ATA_OUTSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->csio.data_ptr + ch->donecount), (size + 1) / 2); for (resid = ch->transfersize + (size & 1); resid < length; resid += sizeof(int16_t)) ATA_OUTW(ch->r_mem, ATA_DATA, 0); ch->donecount += length; /* Set next transfer size according to HW capabilities */ ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount, ch->curr[ccb->ccb_h.target_id].bytecount); /* Return wait for interrupt */ return; case ATAPI_P_READ: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { device_printf(dev, "trying to read on write buffer\n"); et = MVS_ERR_TFE; goto end_finished; } if (size >= 2) { ATA_INSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->csio.data_ptr + ch->donecount), size / 2); } if (size & 1) { ATA_INSW_STRM(ch->r_mem, ATA_DATA, (void*)buf, 1); ((uint8_t *)ccb->csio.data_ptr + ch->donecount + (size & ~1))[0] = buf[0]; } for (resid = ch->transfersize + (size & 1); resid < length; resid += sizeof(int16_t)) ATA_INW(ch->r_mem, ATA_DATA); ch->donecount += length; /* Set next transfer size according to HW capabilities */ ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount, ch->curr[ccb->ccb_h.target_id].bytecount); /* Return wait for interrupt */ return; case ATAPI_P_DONEDRQ: device_printf(dev, "WARNING - DONEDRQ non conformant device\n"); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ATA_INSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->csio.data_ptr + ch->donecount), length / 2); ch->donecount += length; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { ATA_OUTSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->csio.data_ptr + ch->donecount), length / 2); ch->donecount += length; } else et = MVS_ERR_TFE; /* FALLTHROUGH */ case ATAPI_P_ABORT: case ATAPI_P_DONE: if (status & (ATA_S_ERROR | ATA_S_DWF)) et = MVS_ERR_TFE; goto end_finished; default: device_printf(dev, "unknown transfer phase" " (status %02x, ireason %02x)\n", status, ireason); et = MVS_ERR_TFE; } } end_finished: mvs_end_transaction(slot, et); } static void mvs_crbq_intr(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); struct mvs_crpb *crpb; union ccb *ccb; int in_idx, fin_idx, cin_idx, slot; uint32_t val; uint16_t flags; val = ATA_INL(ch->r_mem, EDMA_RESQIP); if (val == 0) val = ATA_INL(ch->r_mem, EDMA_RESQIP); in_idx = (val & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT; bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map, BUS_DMASYNC_POSTREAD); fin_idx = cin_idx = ch->in_idx; ch->in_idx = in_idx; while (in_idx != cin_idx) { crpb = (struct mvs_crpb *) (ch->dma.workrp + MVS_CRPB_OFFSET + (MVS_CRPB_SIZE * cin_idx)); slot = le16toh(crpb->id) & MVS_CRPB_TAG_MASK; flags = le16toh(crpb->rspflg); /* * Handle only successful completions here. * Errors will be handled by main intr handler. */ #if defined(__i386__) || defined(__amd64__) if (crpb->id == 0xffff && crpb->rspflg == 0xffff) { device_printf(dev, "Unfilled CRPB " "%d (%d->%d) tag %d flags %04x rs %08x\n", cin_idx, fin_idx, in_idx, slot, flags, ch->rslots); } else #endif if (ch->numtslots != 0 || (flags & EDMA_IE_EDEVERR) == 0) { #if defined(__i386__) || defined(__amd64__) crpb->id = 0xffff; crpb->rspflg = 0xffff; #endif if (ch->slot[slot].state >= MVS_SLOT_RUNNING) { ccb = ch->slot[slot].ccb; ccb->ataio.res.status = (flags & MVS_CRPB_ATASTS_MASK) >> MVS_CRPB_ATASTS_SHIFT; mvs_end_transaction(&ch->slot[slot], MVS_ERR_NONE); } else { device_printf(dev, "Unused tag in CRPB " "%d (%d->%d) tag %d flags %04x rs %08x\n", cin_idx, fin_idx, in_idx, slot, flags, ch->rslots); } } else { device_printf(dev, "CRPB with error %d tag %d flags %04x\n", cin_idx, slot, flags); } cin_idx = (cin_idx + 1) & (MVS_MAX_SLOTS - 1); } bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map, BUS_DMASYNC_PREREAD); if (cin_idx == ch->in_idx) { ATA_OUTL(ch->r_mem, EDMA_RESQOP, ch->dma.workrp_bus | (cin_idx << EDMA_RESQP_ERPQP_SHIFT)); } } /* Must be called with channel locked. */ static int mvs_check_collision(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* NCQ DMA */ if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { /* Can't mix NCQ and non-NCQ DMA commands. */ if (ch->numdslots != 0) return (1); /* Can't mix NCQ and PIO commands. */ if (ch->numpslots != 0) return (1); /* If we have no FBS */ if (!ch->fbs_enabled) { /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } /* Non-NCQ DMA */ } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) { /* Can't mix non-NCQ DMA and NCQ commands. */ if (ch->numtslots != 0) return (1); /* Can't mix non-NCQ DMA and PIO commands. */ if (ch->numpslots != 0) return (1); /* PIO */ } else { /* Can't mix PIO with anything. */ if (ch->numrslots != 0) return (1); } if (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } } else { /* ATAPI */ /* ATAPI goes without EDMA, so can't mix it with anything. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } static void mvs_tfd_read(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); struct ata_res *res = &ccb->ataio.res; res->status = ATA_INB(ch->r_mem, ATA_ALTSTAT); res->error = ATA_INB(ch->r_mem, ATA_ERROR); res->device = ATA_INB(ch->r_mem, ATA_DRIVE); ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_HOB); res->sector_count_exp = ATA_INB(ch->r_mem, ATA_COUNT); res->lba_low_exp = ATA_INB(ch->r_mem, ATA_SECTOR); res->lba_mid_exp = ATA_INB(ch->r_mem, ATA_CYL_LSB); res->lba_high_exp = ATA_INB(ch->r_mem, ATA_CYL_MSB); ATA_OUTB(ch->r_mem, ATA_CONTROL, 0); res->sector_count = ATA_INB(ch->r_mem, ATA_COUNT); res->lba_low = ATA_INB(ch->r_mem, ATA_SECTOR); res->lba_mid = ATA_INB(ch->r_mem, ATA_CYL_LSB); res->lba_high = ATA_INB(ch->r_mem, ATA_CYL_MSB); } static void mvs_tfd_write(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); struct ata_cmd *cmd = &ccb->ataio.cmd; ATA_OUTB(ch->r_mem, ATA_DRIVE, cmd->device); ATA_OUTB(ch->r_mem, ATA_CONTROL, cmd->control); ATA_OUTB(ch->r_mem, ATA_FEATURE, cmd->features_exp); ATA_OUTB(ch->r_mem, ATA_FEATURE, cmd->features); ATA_OUTB(ch->r_mem, ATA_COUNT, cmd->sector_count_exp); ATA_OUTB(ch->r_mem, ATA_COUNT, cmd->sector_count); ATA_OUTB(ch->r_mem, ATA_SECTOR, cmd->lba_low_exp); ATA_OUTB(ch->r_mem, ATA_SECTOR, cmd->lba_low); ATA_OUTB(ch->r_mem, ATA_CYL_LSB, cmd->lba_mid_exp); ATA_OUTB(ch->r_mem, ATA_CYL_LSB, cmd->lba_mid); ATA_OUTB(ch->r_mem, ATA_CYL_MSB, cmd->lba_high_exp); ATA_OUTB(ch->r_mem, ATA_CYL_MSB, cmd->lba_high); ATA_OUTB(ch->r_mem, ATA_COMMAND, cmd->command); } /* Must be called with channel locked. */ static void mvs_begin_transaction(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); struct mvs_slot *slot; int slotn, tag; if (ch->pm_level > 0) mvs_ch_pm_wake(dev); /* Softreset is a special case. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { mvs_softreset(dev, ccb); return; } /* Choose empty slot. */ slotn = ffs(~ch->oslots) - 1; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { if (ch->quirks & MVS_Q_GENIIE) tag = ffs(~ch->otagspd[ccb->ccb_h.target_id]) - 1; else tag = slotn; } else tag = 0; /* Occupy chosen slot. */ slot = &ch->slot[slotn]; slot->ccb = ccb; slot->tag = tag; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << slot->slot); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if (ccb->ccb_h.func_code == XPT_ATA_IO) { if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { ch->otagspd[ccb->ccb_h.target_id] |= (1 << slot->tag); ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; mvs_set_edma_mode(dev, MVS_EDMA_NCQ); } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) { ch->numdslots++; mvs_set_edma_mode(dev, MVS_EDMA_ON); } else { ch->numpslots++; mvs_set_edma_mode(dev, MVS_EDMA_OFF); } if (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)) { ch->aslots |= (1 << slot->slot); } } else { uint8_t *cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes; ch->numpslots++; /* Use ATAPI DMA only for commands without under-/overruns. */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA && (ch->quirks & MVS_Q_SOC) == 0 && (cdb[0] == 0x08 || cdb[0] == 0x0a || cdb[0] == 0x28 || cdb[0] == 0x2a || cdb[0] == 0x88 || cdb[0] == 0x8a || cdb[0] == 0xa8 || cdb[0] == 0xaa || cdb[0] == 0xbe)) { ch->basic_dma = 1; } mvs_set_edma_mode(dev, MVS_EDMA_OFF); } if (ch->numpslots == 0 || ch->basic_dma) { slot->state = MVS_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, mvs_dmasetprd, slot, 0); } else mvs_legacy_execute_transaction(slot); } /* Locked by busdma engine. */ static void mvs_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mvs_slot *slot = arg; struct mvs_channel *ch = device_get_softc(slot->dev); struct mvs_eprd *eprd; int i; if (error) { device_printf(slot->dev, "DMA load error\n"); mvs_end_transaction(slot, MVS_ERR_INVALID); return; } KASSERT(nsegs <= MVS_SG_ENTRIES, ("too many DMA segment entries\n")); /* If there is only one segment - no need to use S/G table on Gen-IIe. */ if (nsegs == 1 && ch->basic_dma == 0 && (ch->quirks & MVS_Q_GENIIE)) { slot->dma.addr = segs[0].ds_addr; slot->dma.len = segs[0].ds_len; } else { slot->dma.addr = 0; /* Get a piece of the workspace for this EPRD */ eprd = (struct mvs_eprd *) (ch->dma.workrq + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot)); /* Fill S/G table */ for (i = 0; i < nsegs; i++) { eprd[i].prdbal = htole32(segs[i].ds_addr); eprd[i].bytecount = htole32(segs[i].ds_len & MVS_EPRD_MASK); eprd[i].prdbah = htole32((segs[i].ds_addr >> 16) >> 16); } eprd[i - 1].bytecount |= htole32(MVS_EPRD_EOF); } bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); if (ch->basic_dma) mvs_legacy_execute_transaction(slot); else mvs_execute_transaction(slot); } static void mvs_legacy_execute_transaction(struct mvs_slot *slot) { device_t dev = slot->dev; struct mvs_channel *ch = device_get_softc(dev); bus_addr_t eprd; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int timeout; slot->state = MVS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTB(ch->r_mem, SATA_SATAICTL, port << SATA_SATAICTL_PMPTX_SHIFT); if (ccb->ccb_h.func_code == XPT_ATA_IO) { mvs_tfd_write(dev, ccb); /* Device reset doesn't interrupt. */ if (ccb->ataio.cmd.command == ATA_DEVICE_RESET) { int timeout = 1000000; do { DELAY(10); ccb->ataio.res.status = ATA_INB(ch->r_mem, ATA_STATUS); } while (ccb->ataio.res.status & ATA_S_BUSY && timeout--); mvs_legacy_intr(dev, 1); return; } ch->donecount = 0; if (ccb->ataio.cmd.command == ATA_READ_MUL || ccb->ataio.cmd.command == ATA_READ_MUL48 || ccb->ataio.cmd.command == ATA_WRITE_MUL || ccb->ataio.cmd.command == ATA_WRITE_MUL48) { ch->transfersize = min(ccb->ataio.dxfer_len, ch->curr[port].bytecount); } else ch->transfersize = min(ccb->ataio.dxfer_len, 512); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) ch->fake_busy = 1; /* If data write command - output the data */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) { device_printf(dev, "timeout waiting for write DRQ\n"); xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); mvs_end_transaction(slot, MVS_ERR_TIMEOUT); return; } ATA_OUTSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)(ccb->ataio.data_ptr + ch->donecount), ch->transfersize / 2); } } else { ch->donecount = 0; ch->transfersize = min(ccb->csio.dxfer_len, ch->curr[port].bytecount); /* Write ATA PACKET command. */ if (ch->basic_dma) { ATA_OUTB(ch->r_mem, ATA_FEATURE, ATA_F_DMA); ATA_OUTB(ch->r_mem, ATA_CYL_LSB, 0); ATA_OUTB(ch->r_mem, ATA_CYL_MSB, 0); } else { ATA_OUTB(ch->r_mem, ATA_FEATURE, 0); ATA_OUTB(ch->r_mem, ATA_CYL_LSB, ch->transfersize); ATA_OUTB(ch->r_mem, ATA_CYL_MSB, ch->transfersize >> 8); } ATA_OUTB(ch->r_mem, ATA_COMMAND, ATA_PACKET_CMD); ch->fake_busy = 1; /* Wait for ready to write ATAPI command block */ if (mvs_wait(dev, 0, ATA_S_BUSY, 1000) < 0) { device_printf(dev, "timeout waiting for ATAPI !BUSY\n"); xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); mvs_end_transaction(slot, MVS_ERR_TIMEOUT); return; } timeout = 5000; while (timeout--) { int reason = ATA_INB(ch->r_mem, ATA_IREASON); int status = ATA_INB(ch->r_mem, ATA_STATUS); if (((reason & (ATA_I_CMD | ATA_I_IN)) | (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT) break; DELAY(20); } if (timeout <= 0) { device_printf(dev, "timeout waiting for ATAPI command ready\n"); xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); mvs_end_transaction(slot, MVS_ERR_TIMEOUT); return; } /* Write ATAPI command. */ ATA_OUTSW_STRM(ch->r_mem, ATA_DATA, (uint16_t *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes), ch->curr[port].atapi / 2); DELAY(10); if (ch->basic_dma) { /* Start basic DMA. */ eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot); ATA_OUTL(ch->r_mem, DMA_DTLBA, eprd); ATA_OUTL(ch->r_mem, DMA_DTHBA, (eprd >> 16) >> 16); ATA_OUTL(ch->r_mem, DMA_C, DMA_C_START | (((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ? DMA_C_READ : 0)); } } /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, (timeout_t*)mvs_timeout, slot, 0); } /* Must be called with channel locked. */ static void mvs_execute_transaction(struct mvs_slot *slot) { device_t dev = slot->dev; struct mvs_channel *ch = device_get_softc(dev); bus_addr_t eprd; struct mvs_crqb *crqb; struct mvs_crqb_gen2e *crqb2e; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int i; /* Get address of the prepared EPRD */ eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot); /* Prepare CRQB. Gen IIe uses different CRQB format. */ if (ch->quirks & MVS_Q_GENIIE) { crqb2e = (struct mvs_crqb_gen2e *) (ch->dma.workrq + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx)); crqb2e->ctrlflg = htole32( ((ccb->ccb_h.flags & CAM_DIR_IN) ? MVS_CRQB2E_READ : 0) | (slot->tag << MVS_CRQB2E_DTAG_SHIFT) | (port << MVS_CRQB2E_PMP_SHIFT) | (slot->slot << MVS_CRQB2E_HTAG_SHIFT)); /* If there is only one segment - no need to use S/G table. */ if (slot->dma.addr != 0) { eprd = slot->dma.addr; crqb2e->ctrlflg |= htole32(MVS_CRQB2E_CPRD); crqb2e->drbc = slot->dma.len; } crqb2e->cprdbl = htole32(eprd); crqb2e->cprdbh = htole32((eprd >> 16) >> 16); crqb2e->cmd[0] = 0; crqb2e->cmd[1] = 0; crqb2e->cmd[2] = ccb->ataio.cmd.command; crqb2e->cmd[3] = ccb->ataio.cmd.features; crqb2e->cmd[4] = ccb->ataio.cmd.lba_low; crqb2e->cmd[5] = ccb->ataio.cmd.lba_mid; crqb2e->cmd[6] = ccb->ataio.cmd.lba_high; crqb2e->cmd[7] = ccb->ataio.cmd.device; crqb2e->cmd[8] = ccb->ataio.cmd.lba_low_exp; crqb2e->cmd[9] = ccb->ataio.cmd.lba_mid_exp; crqb2e->cmd[10] = ccb->ataio.cmd.lba_high_exp; crqb2e->cmd[11] = ccb->ataio.cmd.features_exp; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { crqb2e->cmd[12] = slot->tag << 3; crqb2e->cmd[13] = 0; } else { crqb2e->cmd[12] = ccb->ataio.cmd.sector_count; crqb2e->cmd[13] = ccb->ataio.cmd.sector_count_exp; } crqb2e->cmd[14] = 0; crqb2e->cmd[15] = 0; } else { crqb = (struct mvs_crqb *) (ch->dma.workrq + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx)); crqb->cprdbl = htole32(eprd); crqb->cprdbh = htole32((eprd >> 16) >> 16); crqb->ctrlflg = htole16( ((ccb->ccb_h.flags & CAM_DIR_IN) ? MVS_CRQB_READ : 0) | (slot->slot << MVS_CRQB_TAG_SHIFT) | (port << MVS_CRQB_PMP_SHIFT)); i = 0; /* * Controller can handle only 11 of 12 ATA registers, * so we have to choose which one to skip. */ if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { crqb->cmd[i++] = ccb->ataio.cmd.features_exp; crqb->cmd[i++] = 0x11; } crqb->cmd[i++] = ccb->ataio.cmd.features; crqb->cmd[i++] = 0x11; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { crqb->cmd[i++] = slot->tag << 3; crqb->cmd[i++] = 0x12; } else { crqb->cmd[i++] = ccb->ataio.cmd.sector_count_exp; crqb->cmd[i++] = 0x12; crqb->cmd[i++] = ccb->ataio.cmd.sector_count; crqb->cmd[i++] = 0x12; } crqb->cmd[i++] = ccb->ataio.cmd.lba_low_exp; crqb->cmd[i++] = 0x13; crqb->cmd[i++] = ccb->ataio.cmd.lba_low; crqb->cmd[i++] = 0x13; crqb->cmd[i++] = ccb->ataio.cmd.lba_mid_exp; crqb->cmd[i++] = 0x14; crqb->cmd[i++] = ccb->ataio.cmd.lba_mid; crqb->cmd[i++] = 0x14; crqb->cmd[i++] = ccb->ataio.cmd.lba_high_exp; crqb->cmd[i++] = 0x15; crqb->cmd[i++] = ccb->ataio.cmd.lba_high; crqb->cmd[i++] = 0x15; crqb->cmd[i++] = ccb->ataio.cmd.device; crqb->cmd[i++] = 0x16; crqb->cmd[i++] = ccb->ataio.cmd.command; crqb->cmd[i++] = 0x97; } bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map, BUS_DMASYNC_PREREAD); slot->state = MVS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); /* Issue command to the controller. */ ch->out_idx = (ch->out_idx + 1) & (MVS_MAX_SLOTS - 1); ATA_OUTL(ch->r_mem, EDMA_REQQIP, ch->dma.workrq_bus + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx)); /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, (timeout_t*)mvs_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void mvs_process_timeout(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < MVS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < MVS_SLOT_RUNNING) continue; mvs_end_transaction(&ch->slot[i], MVS_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void mvs_rearm_timeout(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < MVS_MAX_SLOTS; i++) { struct mvs_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < MVS_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, (timeout_t*)mvs_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void mvs_timeout(struct mvs_slot *slot) { device_t dev = slot->dev; struct mvs_channel *ch = device_get_softc(dev); /* Check for stale timeout. */ if (slot->state < MVS_SLOT_RUNNING) return; device_printf(dev, "Timeout on slot %d\n", slot->slot); device_printf(dev, "iec %08x sstat %08x serr %08x edma_s %08x " "dma_c %08x dma_s %08x rs %08x status %02x\n", ATA_INL(ch->r_mem, EDMA_IEC), ATA_INL(ch->r_mem, SATA_SS), ATA_INL(ch->r_mem, SATA_SE), ATA_INL(ch->r_mem, EDMA_S), ATA_INL(ch->r_mem, DMA_C), ATA_INL(ch->r_mem, DMA_S), ch->rslots, ATA_INB(ch->r_mem, ATA_ALTSTAT)); /* Handle frozen command. */ mvs_requeue_frozen(dev); /* We wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) mvs_process_timeout(dev); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } /* Must be called with channel locked. */ static void mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et) { device_t dev = slot->dev; struct mvs_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; int lastto; bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map, BUS_DMASYNC_POSTWRITE); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == MVS_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { mvs_tfd_read(dev, ccb); } else bzero(res, sizeof(*res)); } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->basic_dma == 0) ccb->csio.resid = ccb->csio.dxfer_len - ch->donecount; } if (ch->numpslots == 0 || ch->basic_dma) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } } if (et != MVS_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != MVS_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case MVS_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case MVS_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case MVS_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case MVS_ERR_TFE: case MVS_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case MVS_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case MVS_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = MVS_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if (ccb->ccb_h.func_code == XPT_ATA_IO) { if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { ch->otagspd[ccb->ccb_h.target_id] &= ~(1 << slot->tag); ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) { ch->numdslots--; } else { ch->numpslots--; } } else { ch->numpslots--; ch->basic_dma = 0; } /* Cancel timeout state if request completed normally. */ if (et != MVS_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { mvs_process_read_log(dev, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { mvs_process_request_sense(dev, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == MVS_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->holdtag[slot->slot] = slot->tag; ch->numhslots++; } else xpt_done(ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { mvs_reset(dev); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { mvs_set_edma_mode(dev, MVS_EDMA_OFF); ch->eslots = 0; } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) mvs_issue_recovery(dev); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != MVS_ERR_TIMEOUT) mvs_rearm_timeout(dev); /* Unfreeze frozen command. */ if (ch->frozen && !mvs_check_collision(dev, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; mvs_begin_transaction(dev, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void mvs_issue_recovery(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < MVS_MAX_SLOTS; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < MVS_MAX_SLOTS; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } mvs_reset(dev); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_MVS, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); mvs_begin_transaction(dev, ccb); } static void mvs_process_read_log(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < MVS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; if ((data[0] & 0x1F) == ch->holdtag[i]) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < MVS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_MVS); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void mvs_process_request_sense(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static int mvs_wait(device_t dev, u_int s, u_int c, int t) { int timeout = 0; uint8_t st; while (((st = mvs_getstatus(dev, 0)) & (s | c)) != s) { if (timeout >= t) { if (t != 0) device_printf(dev, "Wait status %02x\n", st); return (-1); } DELAY(1000); timeout++; } return (timeout); } static void mvs_requeue_frozen(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); union ccb *fccb = ch->frozen; if (fccb) { ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } } static void mvs_reset_to(void *arg) { device_t dev = arg; struct mvs_channel *ch = device_get_softc(dev); int t; if (ch->resetting == 0) return; ch->resetting--; if ((t = mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, 0)) >= 0) { if (bootverbose) { device_printf(dev, "MVS reset: device ready after %dms\n", (310 - ch->resetting) * 100); } ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { device_printf(dev, "MVS reset: device not ready after 31000ms\n"); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void mvs_errata(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); uint32_t val; if (ch->quirks & MVS_Q_SOC65) { val = ATA_INL(ch->r_mem, SATA_PHYM3); val &= ~(0x3 << 27); /* SELMUPF = 1 */ val |= (0x1 << 27); val &= ~(0x3 << 29); /* SELMUPI = 1 */ val |= (0x1 << 29); ATA_OUTL(ch->r_mem, SATA_PHYM3, val); val = ATA_INL(ch->r_mem, SATA_PHYM4); val &= ~0x1; /* SATU_OD8 = 0 */ val |= (0x1 << 16); /* reserved bit 16 = 1 */ ATA_OUTL(ch->r_mem, SATA_PHYM4, val); val = ATA_INL(ch->r_mem, SATA_PHYM9_GEN2); val &= ~0xf; /* TXAMP[3:0] = 8 */ val |= 0x8; val &= ~(0x1 << 14); /* TXAMP[4] = 0 */ ATA_OUTL(ch->r_mem, SATA_PHYM9_GEN2, val); val = ATA_INL(ch->r_mem, SATA_PHYM9_GEN1); val &= ~0xf; /* TXAMP[3:0] = 8 */ val |= 0x8; val &= ~(0x1 << 14); /* TXAMP[4] = 0 */ ATA_OUTL(ch->r_mem, SATA_PHYM9_GEN1, val); } } static void mvs_reset(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(dev, "MVS reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ mvs_requeue_frozen(dev); /* Kill the engine and requeue all running commands. */ mvs_set_edma_mode(dev, MVS_EDMA_OFF); ATA_OUTL(ch->r_mem, DMA_C, 0); for (i = 0; i < MVS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < MVS_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ mvs_end_transaction(&ch->slot[i], MVS_ERR_INNOCENT); } for (i = 0; i < MVS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->fatalerr = 0; ch->fake_busy = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); ATA_OUTL(ch->r_mem, EDMA_IEM, 0); ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EATARST); DELAY(25); ATA_OUTL(ch->r_mem, EDMA_CMD, 0); mvs_errata(dev); /* Reset and reconnect PHY, */ if (!mvs_sata_phy_reset(dev)) { if (bootverbose) device_printf(dev, "MVS reset: device not found\n"); ch->devices = 0; ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff); ATA_OUTL(ch->r_mem, EDMA_IEC, 0); ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(dev, "MVS reset: device found\n"); /* Wait for clearing busy status. */ if ((i = mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, dumping ? 31000 : 0)) < 0) { if (dumping) { device_printf(dev, "MVS reset: device not ready after 31000ms\n"); } else ch->resetting = 310; } else if (bootverbose) device_printf(dev, "MVS reset: device ready after %dms\n", i); ch->devices = 1; ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff); ATA_OUTL(ch->r_mem, EDMA_IEC, 0); ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, mvs_reset_to, dev); else xpt_release_simq(ch->sim, TRUE); } static void mvs_softreset(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); int port = ccb->ccb_h.target_id & 0x0f; int i, stuck; uint8_t status; mvs_set_edma_mode(dev, MVS_EDMA_OFF); ATA_OUTB(ch->r_mem, SATA_SATAICTL, port << SATA_SATAICTL_PMPTX_SHIFT); ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_RESET); DELAY(10000); ATA_OUTB(ch->r_mem, ATA_CONTROL, 0); ccb->ccb_h.status &= ~CAM_STATUS_MASK; /* Wait for clearing busy status. */ if ((i = mvs_wait(dev, 0, ATA_S_BUSY, ccb->ccb_h.timeout)) < 0) { ccb->ccb_h.status |= CAM_CMD_TIMEOUT; stuck = 1; } else { status = mvs_getstatus(dev, 0); if (status & ATA_S_ERROR) ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; else ccb->ccb_h.status |= CAM_REQ_CMP; if (status & ATA_S_DRQ) stuck = 1; else stuck = 0; } mvs_tfd_read(dev, ccb); /* * XXX: If some device on PMP failed to soft-reset, * try to recover by sending dummy soft-reset to PMP. */ if (stuck && ch->pm_present && port != 15) { ATA_OUTB(ch->r_mem, SATA_SATAICTL, 15 << SATA_SATAICTL_PMPTX_SHIFT); ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_RESET); DELAY(10000); ATA_OUTB(ch->r_mem, ATA_CONTROL, 0); mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, ccb->ccb_h.timeout); } xpt_done(ccb); } static int mvs_sata_connect(struct mvs_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, SATA_SS); if ((status & SATA_SS_DET_MASK) != SATA_SS_DET_NO_DEVICE) found = 1; if (((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_ONLINE) && ((status & SATA_SS_SPD_MASK) != SATA_SS_SPD_NO_SPEED) && ((status & SATA_SS_IPM_MASK) == SATA_SS_IPM_ACTIVE)) break; if ((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff); return (1); } static int mvs_sata_phy_reset(device_t dev) { struct mvs_channel *ch = device_get_softc(dev); int sata_rev; uint32_t val; sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = SATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = SATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = SATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, SATA_SC, SATA_SC_DET_RESET | val | SATA_SC_IPM_DIS_PARTIAL | SATA_SC_IPM_DIS_SLUMBER); DELAY(1000); ATA_OUTL(ch->r_mem, SATA_SC, SATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (SATA_SC_IPM_DIS_PARTIAL | SATA_SC_IPM_DIS_SLUMBER))); if (!mvs_sata_connect(ch)) { if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, SATA_SC, SATA_SC_DET_DISABLE); return (0); } return (1); } static int mvs_check_ids(device_t dev, union ccb *ccb) { struct mvs_channel *ch = device_get_softc(dev); if (ccb->ccb_h.target_id > ((ch->quirks & MVS_Q_GENI) ? 0 : 15)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } /* * It's a programming error to see AUXILIARY register requests. */ KASSERT(ccb->ccb_h.func_code != XPT_ATA_IO || ((ccb->ataio.ata_flags & ATA_FLAG_AUX) == 0), ("AUX register unsupported")); return (0); } static void mvsaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct mvs_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mvsaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct mvs_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (mvs_check_ids(dev, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (mvs_check_collision(dev, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } mvs_begin_transaction(dev, ccb); return; - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct mvs_device *d; if (mvs_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) { d->bytecount = min((ch->quirks & MVS_Q_GENIIE) ? 8192 : 2048, cts->xport_specific.sata.bytecount); } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(MVS_MAX_SLOTS, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct mvs_device *d; uint32_t status; if (mvs_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, SATA_SS) & SATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; // if (ch->pm_level) // cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; if (cts->type == CTS_TYPE_CURRENT_SETTINGS/* && (ch->quirks & MVS_Q_GENIIE) == 0*/) cts->xport_specific.sata.caps &= ~CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ mvs_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (!(ch->quirks & MVS_Q_GENI)) { cpi->hba_inquiry |= PI_SATAPM; /* Gen-II is extremely slow with NCQ on PMP. */ if ((ch->quirks & MVS_Q_GENIIE) || ch->pm_present == 0) cpi->hba_inquiry |= PI_TAG_ABLE; } cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; cpi->hba_eng_cnt = 0; if (!(ch->quirks & MVS_Q_GENI)) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Marvell", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; if ((ch->quirks & MVS_Q_SOC) == 0) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); } cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void mvspoll(struct cam_sim *sim) { struct mvs_channel *ch = (struct mvs_channel *)cam_sim_softc(sim); struct mvs_intr_arg arg; arg.arg = ch->dev; arg.cause = 2 | 4; /* XXX */ mvs_ch_intr(&arg); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; mvs_reset_to(ch->dev); } } Index: stable/11/sys/dev/ncr/ncr.c =================================================================== --- stable/11/sys/dev/ncr/ncr.c (revision 314380) +++ stable/11/sys/dev/ncr/ncr.c (revision 314381) @@ -1,7119 +1,7115 @@ /************************************************************************** ** ** ** Device driver for the NCR 53C8XX PCI-SCSI-Controller Family. ** **------------------------------------------------------------------------- ** ** Written for 386bsd and FreeBSD by ** Wolfgang Stanglmeier ** Stefan Esser ** **------------------------------------------------------------------------- */ /*- ** Copyright (c) 1994 Wolfgang Stanglmeier. All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** *************************************************************************** */ #include __FBSDID("$FreeBSD$"); #define NCR_GETCC_WITHMSG #if defined (__FreeBSD__) && defined(_KERNEL) #include "opt_ncr.h" #endif /*========================================================== ** ** Configuration and Debugging ** ** May be overwritten in ** **========================================================== */ /* ** SCSI address of this device. ** The boot routines should have set it. ** If not, use this. */ #ifndef SCSI_NCR_MYADDR #define SCSI_NCR_MYADDR (7) #endif /* SCSI_NCR_MYADDR */ /* ** The default synchronous period factor ** (0=asynchronous) ** If maximum synchronous frequency is defined, use it instead. */ #ifndef SCSI_NCR_MAX_SYNC #ifndef SCSI_NCR_DFLT_SYNC #define SCSI_NCR_DFLT_SYNC (12) #endif /* SCSI_NCR_DFLT_SYNC */ #else #if SCSI_NCR_MAX_SYNC == 0 #define SCSI_NCR_DFLT_SYNC 0 #else #define SCSI_NCR_DFLT_SYNC (250000 / SCSI_NCR_MAX_SYNC) #endif #endif /* ** The minimal asynchronous pre-scaler period (ns) ** Shall be 40. */ #ifndef SCSI_NCR_MIN_ASYNC #define SCSI_NCR_MIN_ASYNC (40) #endif /* SCSI_NCR_MIN_ASYNC */ /* ** The maximal bus with (in log2 byte) ** (0=8 bit, 1=16 bit) */ #ifndef SCSI_NCR_MAX_WIDE #define SCSI_NCR_MAX_WIDE (1) #endif /* SCSI_NCR_MAX_WIDE */ /*========================================================== ** ** Configuration and Debugging ** **========================================================== */ /* ** Number of targets supported by the driver. ** n permits target numbers 0..n-1. ** Default is 7, meaning targets #0..#6. ** #7 .. is myself. */ #define MAX_TARGET (16) /* ** Number of logic units supported by the driver. ** n enables logic unit numbers 0..n-1. ** The common SCSI devices require only ** one lun, so take 1 as the default. */ #ifndef MAX_LUN #define MAX_LUN (8) #endif /* MAX_LUN */ /* ** The maximum number of jobs scheduled for starting. ** There should be one slot per target, and one slot ** for each tag of each target in use. */ #define MAX_START (256) /* ** The maximum number of segments a transfer is split into. */ #define MAX_SCATTER (33) /* ** The maximum transfer length (should be >= 64k). ** MUST NOT be greater than (MAX_SCATTER-1) * PAGE_SIZE. */ #define MAX_SIZE ((MAX_SCATTER-1) * (long) PAGE_SIZE) /* ** other */ #define NCR_SNOOP_TIMEOUT (1000000) /*========================================================== ** ** Include files ** **========================================================== */ #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include /*========================================================== ** ** Debugging tags ** **========================================================== */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_FREEZE (0x0800) #define DEBUG_RESTART (0x1000) /* ** Enable/Disable debug messages. ** Can be changed at runtime too. */ #ifdef SCSI_NCR_DEBUG #define DEBUG_FLAGS ncr_debug #else /* SCSI_NCR_DEBUG */ #define SCSI_NCR_DEBUG 0 #define DEBUG_FLAGS 0 #endif /* SCSI_NCR_DEBUG */ /*========================================================== ** ** assert () ** **========================================================== ** ** modified copy from 386bsd:/usr/include/sys/assert.h ** **---------------------------------------------------------- */ #ifdef DIAGNOSTIC #define assert(expression) { \ KASSERT(expression, ("%s", #expression)); \ } #else #define assert(expression) { \ if (!(expression)) { \ (void)printf("assertion \"%s\" failed: " \ "file \"%s\", line %d\n", \ #expression, __FILE__, __LINE__); \ } \ } #endif /*========================================================== ** ** Access to the controller chip. ** **========================================================== */ #define INB(r) bus_read_1(np->reg_res, offsetof(struct ncr_reg, r)) #define INW(r) bus_read_2(np->reg_res, offsetof(struct ncr_reg, r)) #define INL(r) bus_read_4(np->reg_res, offsetof(struct ncr_reg, r)) #define OUTB(r, val) bus_write_1(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTW(r, val) bus_write_2(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL(r, val) bus_write_4(np->reg_res, offsetof(struct ncr_reg, r), val) #define OUTL_OFF(o, val) bus_write_4(np->reg_res, o, val) #define INB_OFF(o) bus_read_1(np->reg_res, o) #define INW_OFF(o) bus_read_2(np->reg_res, o) #define INL_OFF(o) bus_read_4(np->reg_res, o) #define READSCRIPT_OFF(base, off) \ (base ? *((volatile u_int32_t *)((volatile char *)base + (off))) : \ bus_read_4(np->sram_res, off)) #define WRITESCRIPT_OFF(base, off, val) \ do { \ if (base) \ *((volatile u_int32_t *) \ ((volatile char *)base + (off))) = (val); \ else \ bus_write_4(np->sram_res, off, val); \ } while (0) #define READSCRIPT(r) \ READSCRIPT_OFF(np->script, offsetof(struct script, r)) #define WRITESCRIPT(r, val) \ WRITESCRIPT_OFF(np->script, offsetof(struct script, r), val) /* ** Set bit field ON, OFF */ #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /*========================================================== ** ** Command control block states. ** **========================================================== */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_COMPLETE (4) #define HS_SEL_TIMEOUT (5) /* Selection timeout */ #define HS_RESET (6) /* SCSI reset */ #define HS_ABORTED (7) /* Transfer aborted */ #define HS_TIMEOUT (8) /* Software timeout */ #define HS_FAIL (9) /* SCSI or PCI bus errors */ #define HS_UNEXPECTED (10) /* Unexpected disconnect */ #define HS_STALL (11) /* QUEUE FULL or BUSY */ #define HS_DONEMASK (0xfc) /*========================================================== ** ** Software Interrupt Codes ** **========================================================== */ #define SIR_SENSE_RESTART (1) #define SIR_SENSE_FAILED (2) #define SIR_STALL_RESTART (3) #define SIR_STALL_QUEUE (4) #define SIR_NEGO_SYNC (5) #define SIR_NEGO_WIDE (6) #define SIR_NEGO_FAILED (7) #define SIR_NEGO_PROTO (8) #define SIR_REJECT_RECEIVED (9) #define SIR_REJECT_SENT (10) #define SIR_IGN_RESIDUE (11) #define SIR_MISSING_SAVE (12) #define SIR_MAX (12) /*========================================================== ** ** Extended error codes. ** xerr_status field of struct nccb. ** **========================================================== */ #define XE_OK (0) #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (2) /* illegal phase (4/5) */ /*========================================================== ** ** Negotiation status. ** nego_status field of struct nccb. ** **========================================================== */ #define NS_SYNC (1) #define NS_WIDE (2) /*========================================================== ** ** XXX These are no longer used. Remove once the ** script is updated. ** "Special features" of targets. ** quirks field of struct tcb. ** actualquirks field of struct nccb. ** **========================================================== */ #define QUIRK_AUTOSAVE (0x01) #define QUIRK_NOMSG (0x02) #define QUIRK_NOSYNC (0x10) #define QUIRK_NOWIDE16 (0x20) #define QUIRK_NOTAGS (0x40) #define QUIRK_UPDATE (0x80) /*========================================================== ** ** Misc. ** **========================================================== */ #define CCB_MAGIC (0xf2691ad2) #define MAX_TAGS (32) /* hard limit */ /*========================================================== ** ** OS dependencies. ** **========================================================== */ #define PRINT_ADDR(ccb) xpt_print_path((ccb)->ccb_h.path) /*========================================================== ** ** Declaration of structs. ** **========================================================== */ struct tcb; struct lcb; struct nccb; struct ncb; struct script; typedef struct ncb * ncb_p; typedef struct tcb * tcb_p; typedef struct lcb * lcb_p; typedef struct nccb * nccb_p; struct link { ncrcmd l_cmd; ncrcmd l_paddr; }; struct usrcmd { u_long target; u_long lun; u_long data; u_long cmd; }; #define UC_SETSYNC 10 #define UC_SETTAGS 11 #define UC_SETDEBUG 12 #define UC_SETORDER 13 #define UC_SETWIDE 14 #define UC_SETFLAG 15 #define UF_TRACE (0x01) /*--------------------------------------- ** ** Timestamps for profiling ** **--------------------------------------- */ /* Type of the kernel variable `ticks'. XXX should be declared with the var. */ typedef int ticks_t; struct tstamp { ticks_t start; ticks_t end; ticks_t select; ticks_t command; ticks_t data; ticks_t status; ticks_t disconnect; }; /* ** profiling data (per device) */ struct profile { u_long num_trans; u_long num_bytes; u_long num_disc; u_long num_break; u_long num_int; u_long num_fly; u_long ms_setup; u_long ms_data; u_long ms_disc; u_long ms_post; }; /*========================================================== ** ** Declaration of structs: target control block ** **========================================================== */ #define NCR_TRANS_CUR 0x01 /* Modify current neogtiation status */ #define NCR_TRANS_ACTIVE 0x03 /* Assume this is the active target */ #define NCR_TRANS_GOAL 0x04 /* Modify negotiation goal */ #define NCR_TRANS_USER 0x08 /* Modify user negotiation settings */ struct ncr_transinfo { u_int8_t width; u_int8_t period; u_int8_t offset; }; struct ncr_target_tinfo { /* Hardware version of our sync settings */ u_int8_t disc_tag; #define NCR_CUR_DISCENB 0x01 #define NCR_CUR_TAGENB 0x02 #define NCR_USR_DISCENB 0x04 #define NCR_USR_TAGENB 0x08 u_int8_t sval; struct ncr_transinfo current; struct ncr_transinfo goal; struct ncr_transinfo user; /* Hardware version of our wide settings */ u_int8_t wval; }; struct tcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the encoded target number ** with bit 7 set. ** if it's not this target, jump to the next. ** ** JUMP IF (SFBR != #target#) ** @(next tcb) */ struct link jump_tcb; /* ** load the actual values for the sxfer and the scntl3 ** register (sync/wide mode). ** ** SCR_COPY (1); ** @(sval field of this tcb) ** @(sxfer register) ** SCR_COPY (1); ** @(wval field of this tcb) ** @(scntl3 register) */ ncrcmd getscr[6]; /* ** if next message is "identify" ** then load the message to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_lun; /* ** now look for the right lun. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_lcb; /* ** pointer to interrupted getcc nccb */ nccb_p hold_cp; /* ** pointer to nccb used for negotiating. ** Avoid to start a nego for all queued commands ** when tagged command queuing is enabled. */ nccb_p nego_cp; /* ** statistical data */ u_long transfers; u_long bytes; /* ** user settable limits for sync transfer ** and tagged commands. */ struct ncr_target_tinfo tinfo; /* ** the lcb's of this tcb */ lcb_p lp[MAX_LUN]; }; /*========================================================== ** ** Declaration of structs: lun control block ** **========================================================== */ struct lcb { /* ** during reselection the ncr jumps to this point ** with SFBR set to the "Identify" message. ** if it's not this lun, jump to the next. ** ** JUMP IF (SFBR != #lun#) ** @(next lcb of this target) */ struct link jump_lcb; /* ** if next message is "simple tag", ** then load the tag to SFBR, ** else load 0 to SFBR. ** ** CALL ** */ struct link call_tag; /* ** now look for the right nccb. ** ** JUMP ** @(first nccb of this lun) */ struct link jump_nccb; /* ** start of the nccb chain */ nccb_p next_nccb; /* ** Control of tagged queueing */ u_char reqnccbs; u_char reqlink; u_char actlink; u_char usetags; u_char lasttag; }; /*========================================================== ** ** Declaration of structs: COMMAND control block ** **========================================================== ** ** This substructure is copied from the nccb to a ** global address after selection (or reselection) ** and copied back before disconnect. ** ** These fields are accessible to the script processor. ** **---------------------------------------------------------- */ struct head { /* ** Execution of a nccb starts at this point. ** It's a jump to the "SELECT" label ** of the script. ** ** After successful selection the script ** processor overwrites it with a jump to ** the IDLE label of the script. */ struct link launch; /* ** Saved data pointer. ** Points to the position in the script ** responsible for the actual transfer ** of data. ** It's written after reception of a ** "SAVE_DATA_POINTER" message. ** The goalpointer points after ** the last transfer command. */ u_int32_t savep; u_int32_t lastp; u_int32_t goalp; /* ** The virtual address of the nccb ** containing this header. */ nccb_p cp; /* ** space for some timestamps to gather ** profiling data about devices and this driver. */ struct tstamp stamp; /* ** status fields. */ u_char status[8]; }; /* ** The status bytes are used by the host and the script processor. ** ** The first four byte are copied to the scratchb register ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, ** and copied back just after disconnecting. ** Inside the script the XX_REG are used. ** ** The last four bytes are used inside the script by "COPY" commands. ** Because source and destination must have the same alignment ** in a longword, the fields HAVE to be at the chosen offsets. ** xerr_st (4) 0 (0x34) scratcha ** sync_st (5) 1 (0x05) sxfer ** wide_st (7) 3 (0x03) scntl3 */ /* ** First four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define PS_REG scr3 /* ** First four bytes (host) */ #define actualquirks phys.header.status[0] #define host_status phys.header.status[1] #define s_status phys.header.status[2] #define parity_status phys.header.status[3] /* ** Last four bytes (script) */ #define xerr_st header.status[4] /* MUST be ==0 mod 4 */ #define sync_st header.status[5] /* MUST be ==1 mod 4 */ #define nego_st header.status[6] #define wide_st header.status[7] /* MUST be ==3 mod 4 */ /* ** Last four bytes (host) */ #define xerr_status phys.xerr_st #define sync_status phys.sync_st #define nego_status phys.nego_st #define wide_status phys.wide_st /*========================================================== ** ** Declaration of structs: Data structure block ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct dsb { /* ** Header. ** Has to be the first entry, ** because it's jumped to by the ** script processor */ struct head header; /* ** Table data for Script */ struct scr_tblsel select; struct scr_tblmove smsg ; struct scr_tblmove smsg2 ; struct scr_tblmove cmd ; struct scr_tblmove scmd ; struct scr_tblmove sense ; struct scr_tblmove data [MAX_SCATTER]; }; /*========================================================== ** ** Declaration of structs: Command control block. ** **========================================================== ** ** During execution of a nccb by the script processor, ** the DSA (data structure address) register points ** to this substructure of the nccb. ** This substructure contains the header with ** the script-processor-changable data and then ** data blocks for the indirect move commands. ** **---------------------------------------------------------- */ struct nccb { /* ** This filler ensures that the global header is ** cache line size aligned. */ ncrcmd filler[4]; /* ** during reselection the ncr jumps to this point. ** If a "SIMPLE_TAG" message was received, ** then SFBR is set to the tag. ** else SFBR is set to 0 ** If looking for another tag, jump to the next nccb. ** ** JUMP IF (SFBR != #TAG#) ** @(next nccb of this lun) */ struct link jump_nccb; /* ** After execution of this call, the return address ** (in the TEMP register) points to the following ** data structure block. ** So copy it to the DSA register, and start ** processing of this data structure. ** ** CALL ** */ struct link call_tmp; /* ** This is the data structure which is ** to be executed by the script processor. */ struct dsb phys; /* ** If a data transfer phase is terminated too early ** (after reception of a message (i.e. DISCONNECT)), ** we have to prepare a mini script to transfer ** the rest of the data. */ ncrcmd patch[8]; /* ** The general SCSI driver provides a ** pointer to a control block. */ union ccb *ccb; /* ** We prepare a message to be sent after selection, ** and a second one to be sent after getcc selection. ** Contents are IDENTIFY and SIMPLE_TAG. ** While negotiating sync or wide transfer, ** a SDTM or WDTM message is appended. */ u_char scsi_smsg [8]; u_char scsi_smsg2[8]; /* ** Lock this nccb. ** Flag is used while looking for a free nccb. */ u_long magic; /* ** Physical address of this instance of nccb */ u_long p_nccb; /* ** Completion time out for this job. ** It's set to time of start + allowed number of seconds. */ time_t tlimit; /* ** All nccbs of one hostadapter are chained. */ nccb_p link_nccb; /* ** All nccbs of one target/lun are chained. */ nccb_p next_nccb; /* ** Sense command */ u_char sensecmd[6]; /* ** Tag for this transfer. ** It's patched into jump_nccb. ** If it's not zero, a SIMPLE_TAG ** message is included in smsg. */ u_char tag; }; #define CCB_PHYS(cp,lbl) (cp->p_nccb + offsetof(struct nccb, lbl)) /*========================================================== ** ** Declaration of structs: NCR device descriptor ** **========================================================== */ struct ncb { /* ** The global header. ** Accessible to both the host and the ** script-processor. ** We assume it is cache line size aligned. */ struct head header; device_t dev; /*----------------------------------------------- ** Scripts .. **----------------------------------------------- ** ** During reselection the ncr jumps to this point. ** The SFBR register is loaded with the encoded target id. ** ** Jump to the first target. ** ** JUMP ** @(next tcb) */ struct link jump_tcb; /*----------------------------------------------- ** Configuration .. **----------------------------------------------- ** ** virtual and physical addresses ** of the 53c810 chip. */ int reg_rid; struct resource *reg_res; int sram_rid; struct resource *sram_res; struct resource *irq_res; void *irq_handle; /* ** Scripts instance virtual address. */ struct script *script; struct scripth *scripth; /* ** Scripts instance physical address. */ u_long p_script; u_long p_scripth; /* ** The SCSI address of the host adapter. */ u_char myaddr; /* ** timing parameters */ u_char minsync; /* Minimum sync period factor */ u_char maxsync; /* Maximum sync period factor */ u_char maxoffs; /* Max scsi offset */ u_char clock_divn; /* Number of clock divisors */ u_long clock_khz; /* SCSI clock frequency in KHz */ u_long features; /* Chip features map */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char maxburst; /* log base 2 of dwords burst */ /* ** BIOS supplied PCI bus options */ u_char rv_scntl3; u_char rv_dcntl; u_char rv_dmode; u_char rv_ctest3; u_char rv_ctest4; u_char rv_ctest5; u_char rv_gpcntl; u_char rv_stest2; /*----------------------------------------------- ** CAM SIM information for this instance **----------------------------------------------- */ struct cam_sim *sim; struct cam_path *path; /*----------------------------------------------- ** Job control **----------------------------------------------- ** ** Commands from user */ struct usrcmd user; /* ** Target data */ struct tcb target[MAX_TARGET]; /* ** Start queue. */ u_int32_t squeue [MAX_START]; u_short squeueput; /* ** Timeout handler */ time_t heartbeat; u_short ticks; u_short latetime; time_t lasttime; struct callout timer; /*----------------------------------------------- ** Debug and profiling **----------------------------------------------- ** ** register dump */ struct ncr_reg regdump; time_t regtime; /* ** Profiling data */ struct profile profile; u_long disc_phys; u_long disc_ref; /* ** Head of list of all nccbs for this controller. */ nccb_p link_nccb; /* ** message buffers. ** Should be longword aligned, ** because they're written with a ** COPY script command. */ u_char msgout[8]; u_char msgin [8]; u_int32_t lastmsg; /* ** Buffer for STATUS_IN phase. */ u_char scratch; /* ** controller chip dependent maximal transfer width. */ u_char maxwide; struct mtx lock; #ifdef NCR_IOMAPPED /* ** address of the ncr control registers in io space */ pci_port_t port; #endif }; #define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl)) #define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl)) /*========================================================== ** ** ** Script for NCR-Processor. ** ** Use ncr_script_fill() to create the variable parts. ** Use ncr_script_copy_and_bind() to make a copy and ** bind to physical addresses. ** ** **========================================================== ** ** We have to know the offsets of all labels before ** we reach them (for forward jumps). ** Therefore we declare a struct here. ** If you make changes inside the script, ** DONT FORGET TO CHANGE THE LENGTHS HERE! ** **---------------------------------------------------------- */ /* ** Script fragments which are loaded into the on-board RAM ** of 825A, 875 and 895 chips. */ struct script { ncrcmd start [ 7]; ncrcmd start0 [ 2]; ncrcmd start1 [ 3]; ncrcmd startpos [ 1]; ncrcmd trysel [ 8]; ncrcmd skip [ 8]; ncrcmd skip2 [ 3]; ncrcmd idle [ 2]; ncrcmd select [ 18]; ncrcmd prepare [ 4]; ncrcmd loadpos [ 14]; ncrcmd prepare2 [ 24]; ncrcmd setmsg [ 5]; ncrcmd clrack [ 2]; ncrcmd dispatch [ 33]; ncrcmd no_data [ 17]; ncrcmd checkatn [ 10]; ncrcmd command [ 15]; ncrcmd status [ 27]; ncrcmd msg_in [ 26]; ncrcmd msg_bad [ 6]; ncrcmd complete [ 13]; ncrcmd cleanup [ 12]; ncrcmd cleanup0 [ 9]; ncrcmd signal [ 12]; ncrcmd save_dp [ 5]; ncrcmd restore_dp [ 5]; ncrcmd disconnect [ 12]; ncrcmd disconnect0 [ 5]; ncrcmd disconnect1 [ 23]; ncrcmd msg_out [ 9]; ncrcmd msg_out_done [ 7]; ncrcmd badgetcc [ 6]; ncrcmd reselect [ 8]; ncrcmd reselect1 [ 8]; ncrcmd reselect2 [ 8]; ncrcmd resel_tmp [ 5]; ncrcmd resel_lun [ 18]; ncrcmd resel_tag [ 24]; ncrcmd data_in [MAX_SCATTER * 4 + 7]; ncrcmd data_out [MAX_SCATTER * 4 + 7]; }; /* ** Script fragments which stay in main memory for all chips. */ struct scripth { ncrcmd tryloop [MAX_START*5+2]; ncrcmd msg_parity [ 6]; ncrcmd msg_reject [ 8]; ncrcmd msg_ign_residue [ 32]; ncrcmd msg_extended [ 18]; ncrcmd msg_ext_2 [ 18]; ncrcmd msg_wdtr [ 27]; ncrcmd msg_ext_3 [ 18]; ncrcmd msg_sdtr [ 27]; ncrcmd msg_out_abort [ 10]; ncrcmd getcc [ 4]; ncrcmd getcc1 [ 5]; #ifdef NCR_GETCC_WITHMSG ncrcmd getcc2 [ 29]; #else ncrcmd getcc2 [ 14]; #endif ncrcmd getcc3 [ 6]; ncrcmd aborttag [ 4]; ncrcmd abort [ 22]; ncrcmd snooptest [ 9]; ncrcmd snoopend [ 2]; }; /*========================================================== ** ** ** Function headers. ** ** **========================================================== */ #ifdef _KERNEL static nccb_p ncr_alloc_nccb(ncb_p np, u_long target, u_long lun); static void ncr_complete(ncb_p np, nccb_p cp); static int ncr_delta(int * from, int * to); static void ncr_exception(ncb_p np); static void ncr_free_nccb(ncb_p np, nccb_p cp); static void ncr_freeze_devq(ncb_p np, struct cam_path *path); static void ncr_selectclock(ncb_p np, u_char scntl3); static void ncr_getclock(ncb_p np, u_char multiplier); static nccb_p ncr_get_nccb(ncb_p np, u_long t,u_long l); #if 0 static u_int32_t ncr_info(int unit); #endif static void ncr_init(ncb_p np, char * msg, u_long code); static void ncr_intr(void *vnp); static void ncr_intr_locked(ncb_p np); static void ncr_int_ma(ncb_p np, u_char dstat); static void ncr_int_sir(ncb_p np); static void ncr_int_sto(ncb_p np); #if 0 static void ncr_min_phys(struct buf *bp); #endif static void ncr_poll(struct cam_sim *sim); static void ncb_profile(ncb_p np, nccb_p cp); static void ncr_script_copy_and_bind(ncb_p np, ncrcmd *src, ncrcmd *dst, int len); static void ncr_script_fill(struct script * scr, struct scripth *scrh); static int ncr_scatter(struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen); static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p); static void ncr_setsync(ncb_p np, nccb_p cp,u_char scntl3,u_char sxfer, u_char period); static void ncr_setwide(ncb_p np, nccb_p cp, u_char wide, u_char ack); static int ncr_show_msg(u_char * msg); static int ncr_snooptest(ncb_p np); static void ncr_action(struct cam_sim *sim, union ccb *ccb); static void ncr_timeout(void *arg); static void ncr_wakeup(ncb_p np, u_long code); static int ncr_probe(device_t dev); static int ncr_attach(device_t dev); #endif /* _KERNEL */ /*========================================================== ** ** ** Global static data. ** ** **========================================================== */ #ifdef _KERNEL static int ncr_debug = SCSI_NCR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, ncr_debug, CTLFLAG_RW, &ncr_debug, 0, ""); static int ncr_cache; /* to be aligned _NOT_ static */ /*========================================================== ** ** ** Global static data: auto configure ** ** **========================================================== */ #define NCR_810_ID (0x00011000ul) #define NCR_815_ID (0x00041000ul) #define NCR_820_ID (0x00021000ul) #define NCR_825_ID (0x00031000ul) #define NCR_860_ID (0x00061000ul) #define NCR_875_ID (0x000f1000ul) #define NCR_875_ID2 (0x008f1000ul) #define NCR_885_ID (0x000d1000ul) #define NCR_895_ID (0x000c1000ul) #define NCR_896_ID (0x000b1000ul) #define NCR_895A_ID (0x00121000ul) #define NCR_1510D_ID (0x000a1000ul) /*========================================================== ** ** ** Scripts for NCR-Processor. ** ** Use ncr_script_bind for binding to physical addresses. ** ** **========================================================== ** ** NADDR generates a reference to a field of the controller data. ** PADDR generates a reference to another part of the script. ** RADDR generates a reference to a script processor register. ** FADDR generates a reference to a script processor register ** with offset. ** **---------------------------------------------------------- */ #define RELOC_SOFTC 0x40000000 #define RELOC_LABEL 0x50000000 #define RELOC_REGISTER 0x60000000 #define RELOC_KVAR 0x70000000 #define RELOC_LABELH 0x80000000 #define RELOC_MASK 0xf0000000 #define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label)) #define PADDR(label) (RELOC_LABEL | offsetof(struct script, label)) #define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label)) #define RADDR(label) (RELOC_REGISTER | REG(label)) #define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs))) #define KVAR(which) (RELOC_KVAR | (which)) #define KVAR_SECOND (0) #define KVAR_TICKS (1) #define KVAR_NCR_CACHE (2) #define SCRIPT_KVAR_FIRST (0) #define SCRIPT_KVAR_LAST (3) /* * Kernel variables referenced in the scripts. * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. */ static volatile void *script_kvars[] = { &time_second, &ticks, &ncr_cache }; static struct script script0 = { /*--------------------------< START >-----------------------*/ { /* ** Claim to be still alive ... */ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)), KVAR (KVAR_SECOND), NADDR (heartbeat), /* ** Make data structure address invalid. ** clear SIGP. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_FROM_REG (ctest2), 0, }/*-------------------------< START0 >----------------------*/,{ /* ** Hook for interrupted GetConditionCode. ** Will be patched to ... IFTRUE by ** the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_SENSE_RESTART, }/*-------------------------< START1 >----------------------*/,{ /* ** Hook for stalled start queue. ** Will be patched to IFTRUE by the interrupt handler. */ SCR_INT ^ IFFALSE (0), SIR_STALL_RESTART, /* ** Then jump to a certain point in tryloop. ** Due to the lack of indirect addressing the code ** is self modifying here. */ SCR_JUMP, }/*-------------------------< STARTPOS >--------------------*/,{ PADDRH(tryloop), }/*-------------------------< TRYSEL >----------------------*/,{ /* ** Now: ** DSA: Address of a Data Structure ** or Address of the IDLE-Label. ** ** TEMP: Address of a script, which tries to ** start the NEXT entry. ** ** Save the TEMP register into the SCRATCHA register. ** Then copy the DSA to TEMP and RETURN. ** This is kind of an indirect jump. ** (The script processor has NO stack, so the ** CALL is actually a jump and link, and the ** RETURN is an indirect jump.) ** ** If the slot was empty, DSA contains the address ** of the IDLE part of this script. The processor ** jumps to IDLE and waits for a reselect. ** It will wake up and try the same slot again ** after the SIGP bit becomes set by the host. ** ** If the slot was not empty, DSA contains ** the address of the phys-part of a nccb. ** The processor jumps to this address. ** phys starts with head, ** head starts with launch, ** so actually the processor jumps to ** the lauch part. ** If the entry is scheduled for execution, ** then launch contains a jump to SELECT. ** If it's not scheduled, it contains a jump to IDLE. */ SCR_COPY (4), RADDR (temp), RADDR (scratcha), SCR_COPY (4), RADDR (dsa), RADDR (temp), SCR_RETURN, 0 }/*-------------------------< SKIP >------------------------*/,{ /* ** This entry has been canceled. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), /* ** patch the launch field. ** should look like an idle process. */ SCR_COPY_F (4), RADDR (dsa), PADDR (skip2), SCR_COPY (8), PADDR (idle), }/*-------------------------< SKIP2 >-----------------------*/,{ 0, SCR_JUMP, PADDR(start), }/*-------------------------< IDLE >------------------------*/,{ /* ** Nothing to do? ** Wait for reselect. */ SCR_JUMP, PADDR(reselect), }/*-------------------------< SELECT >----------------------*/,{ /* ** DSA contains the address of a scheduled ** data structure. ** ** SCRATCHA contains the address of the script, ** which starts the next entry. ** ** Set Initiator mode. ** ** (Target mode is left as an exercise for the reader) */ SCR_CLR (SCR_TRG), 0, SCR_LOAD_REG (HS_REG, 0xff), 0, /* ** And try to select this target. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR (reselect), /* ** Now there are 4 possibilities: ** ** (1) The ncr loses arbitration. ** This is ok, because it will try again, ** when the bus becomes idle. ** (But beware of the timeout function!) ** ** (2) The ncr is reselected. ** Then the script processor takes the jump ** to the RESELECT label. ** ** (3) The ncr completes the selection. ** Then it will execute the next statement. ** ** (4) There is a selection timeout. ** Then the ncr should interrupt the host and stop. ** Unfortunately, it seems to continue execution ** of the script. But it will fail with an ** IID-interrupt on the next WHEN. */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** Send the IDENTIFY and SIMPLE_TAG messages ** (and the MSG_EXT_SDTR message) */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg), #ifdef undef /* XXX better fail than try to deal with this ... */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)), -16, #endif SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** Selection complete. ** Next time use the next slot. */ SCR_COPY (4), RADDR (scratcha), PADDR (startpos), }/*-------------------------< PREPARE >----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can access it. ** ** We patch the address part of a ** COPY command with the DSA-register. */ SCR_COPY_F (4), RADDR (dsa), PADDR (loadpos), /* ** then we do the actual copy. */ SCR_COPY (sizeof (struct head)), /* ** continued after the next label ... */ }/*-------------------------< LOADPOS >---------------------*/,{ 0, NADDR (header), /* ** Mark this nccb as not scheduled. */ SCR_COPY (8), PADDR (idle), NADDR (header.launch), /* ** Set a time stamp for this selection */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.select), /* ** load the savep (saved pointer) into ** the TEMP register (actual pointer) */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< PREPARE2 >---------------------*/,{ /* ** Load the synchronous mode register */ SCR_COPY (1), NADDR (sync_st), RADDR (sxfer), /* ** Load the wide mode and timing register */ SCR_COPY (1), NADDR (wide_st), RADDR (scntl3), /* ** Initialize the msgout buffer with a NOOP message. */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_COPY (1), RADDR (scratcha), NADDR (msgin), /* ** Message in phase ? */ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** Extended or reject message ? */ SCR_FROM_REG (sbdl), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), /* ** normal processing */ SCR_JUMP, PADDR (dispatch), }/*-------------------------< SETMSG >----------------------*/,{ SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, }/*-------------------------< CLRACK >----------------------*/,{ /* ** Terminate possible pending message phase. */ SCR_CLR (SCR_ACK), 0, }/*-----------------------< DISPATCH >----------------------*/,{ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** remove bogus output signals */ SCR_REG_REG (socl, SCR_AND, CACK|CATN), 0, SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)), 0, SCR_RETURN ^ IFTRUE (IF (SCR_DATA_IN)), 0, SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)), PADDR (msg_out), SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)), PADDR (msg_in), SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)), PADDR (command), SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)), PADDR (status), /* ** Discard one illegal phase byte, if required. */ SCR_LOAD_REG (scratcha, XE_BAD_PHASE), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_ILG_IN, NADDR (scratch), SCR_JUMP, PADDR (dispatch), }/*-------------------------< NO_DATA >--------------------*/,{ /* ** The target wants to transfer too much data ** or in the wrong direction. ** Remember that in extended error. */ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA), 0, SCR_COPY (1), RADDR (scratcha), NADDR (xerr_st), /* ** Discard one data byte, if required. */ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_OUT, NADDR (scratch), SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)), 8, SCR_MOVE_ABS (1) ^ SCR_DATA_IN, NADDR (scratch), /* ** .. and repeat as required. */ SCR_CALL, PADDR (dispatch), SCR_JUMP, PADDR (no_data), }/*-------------------------< CHECKATN >--------------------*/,{ /* ** If AAP (bit 1 of scntl0 register) is set ** and a parity error is detected, ** the script processor asserts ATN. ** ** The target should switch to a MSG_OUT phase ** to get the message. */ SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)), PADDR (dispatch), /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 1), 0, /* ** Prepare a MSG_INITIATOR_DET_ERR message ** (initiator detected error). ** The target should retry the transfer. */ SCR_LOAD_REG (scratcha, MSG_INITIATOR_DET_ERR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMMAND >--------------------*/,{ /* ** If this is not a GETCC transfer ... */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), 28, /* ** ... set a timestamp ... */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.command), /* ** ... and send the command */ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, cmd), SCR_JUMP, PADDR (dispatch), /* ** Send the GETCC command */ /*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND, offsetof (struct dsb, scmd), SCR_JUMP, PADDR (dispatch), }/*-------------------------< STATUS >--------------------*/,{ /* ** set the timestamp. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.status), /* ** If this is a GETCC transfer, */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (SCSI_STATUS_CHECK_COND)), 40, /* ** get the status */ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), /* ** Save status to scsi_status. ** Mark as complete. ** And wait for disconnect. */ SCR_TO_REG (SS_REG), 0, SCR_REG_REG (SS_REG, SCR_OR, SCSI_STATUS_SENSE), 0, SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), /* ** If it was no GETCC transfer, ** save the status to scsi_status. */ /*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS, NADDR (scratch), SCR_TO_REG (SS_REG), 0, /* ** if it was no check condition ... */ SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDR (checkatn), /* ** ... mark as complete. */ SCR_LOAD_REG (HS_REG, HS_COMPLETE), 0, SCR_JUMP, PADDR (checkatn), }/*-------------------------< MSG_IN >--------------------*/,{ /* ** Get the first byte of the message ** and save it to SCRATCHA. ** ** The script processor doesn't negate the ** ACK signal after this transfer. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[0]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Parity was ok, handle this message. */ SCR_JUMP ^ IFTRUE (DATA (MSG_CMDCOMPLETE)), PADDR (complete), SCR_JUMP ^ IFTRUE (DATA (MSG_SAVEDATAPOINTER)), PADDR (save_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_RESTOREPOINTERS)), PADDR (restore_dp), SCR_JUMP ^ IFTRUE (DATA (MSG_DISCONNECT)), PADDR (disconnect), SCR_JUMP ^ IFTRUE (DATA (MSG_EXTENDED)), PADDRH (msg_extended), SCR_JUMP ^ IFTRUE (DATA (MSG_NOOP)), PADDR (clrack), SCR_JUMP ^ IFTRUE (DATA (MSG_MESSAGE_REJECT)), PADDRH (msg_reject), SCR_JUMP ^ IFTRUE (DATA (MSG_IGN_WIDE_RESIDUE)), PADDRH (msg_ign_residue), /* ** Rest of the messages left as ** an exercise ... ** ** Unimplemented messages: ** fall through to MSG_BAD. */ }/*-------------------------< MSG_BAD >------------------*/,{ /* ** unimplemented message - reject it. */ SCR_INT, SIR_REJECT_SENT, SCR_LOAD_REG (scratcha, MSG_MESSAGE_REJECT), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< COMPLETE >-----------------*/,{ /* ** Complete message. ** ** If it's not the get condition code, ** copy TEMP register to LASTP in header. */ SCR_FROM_REG (SS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (SCSI_STATUS_SENSE, SCSI_STATUS_SENSE)), 12, SCR_COPY (4), RADDR (temp), NADDR (header.lastp), /*>>>*/ /* ** When we terminate the cycle by clearing ACK, ** the target may disconnect immediately. ** ** We don't want to be told of an ** "unexpected disconnect", ** so we disable this feature. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, /* ** Terminate cycle ... */ SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** ... and wait for the disconnect. */ SCR_WAIT_DISC, 0, }/*-------------------------< CLEANUP >-------------------*/,{ /* ** dsa: Pointer to nccb ** or xxxxxxFF (no nccb) ** ** HS_REG: Host-Status (<>0!) */ SCR_FROM_REG (dsa), 0, SCR_JUMP ^ IFTRUE (DATA (0xff)), PADDR (signal), /* ** dsa is valid. ** save the status registers */ SCR_COPY (4), RADDR (scr0), NADDR (header.status), /* ** and copy back the header to the nccb. */ SCR_COPY_F (4), RADDR (dsa), PADDR (cleanup0), SCR_COPY (sizeof (struct head)), NADDR (header), }/*-------------------------< CLEANUP0 >--------------------*/,{ 0, /* ** If command resulted in "check condition" ** status and is not yet completed, ** try to get the condition code. */ SCR_FROM_REG (HS_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)), 16, SCR_FROM_REG (SS_REG), 0, SCR_JUMP ^ IFTRUE (DATA (SCSI_STATUS_CHECK_COND)), PADDRH(getcc2), }/*-------------------------< SIGNAL >----------------------*/,{ /* ** if status = queue full, ** reinsert in startqueue and stall queue. */ /*>>>*/ SCR_FROM_REG (SS_REG), 0, SCR_INT ^ IFTRUE (DATA (SCSI_STATUS_QUEUE_FULL)), SIR_STALL_QUEUE, /* ** And make the DSA register invalid. */ SCR_LOAD_REG (dsa, 0xff), /* invalid */ 0, /* ** if job completed ... */ SCR_FROM_REG (HS_REG), 0, /* ** ... signal completion to the host */ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)), 0, /* ** Auf zu neuen Schandtaten! */ SCR_JUMP, PADDR(start), }/*-------------------------< SAVE_DP >------------------*/,{ /* ** SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), SCR_JUMP, PADDR (clrack), }/*-------------------------< RESTORE_DP >---------------*/,{ /* ** RESTORE_DP message: ** Copy SAVEP in header to TEMP register. */ SCR_COPY (4), NADDR (header.savep), RADDR (temp), SCR_JUMP, PADDR (clrack), }/*-------------------------< DISCONNECT >---------------*/,{ /* ** If QUIRK_AUTOSAVE is set, ** do a "save pointer" operation. */ SCR_FROM_REG (QU_REG), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)), 12, /* ** like SAVE_DP message: ** Copy TEMP register to SAVEP in header. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /*>>>*/ /* ** Check if temp==savep or temp==goalp: ** if not, log a missing save pointer message. ** In fact, it's a comparison mod 256. ** ** Hmmm, I hadn't thought that I would be urged to ** write this kind of ugly self modifying code. ** ** It's unbelievable, but the ncr53c8xx isn't able ** to subtract one register from another. */ SCR_FROM_REG (temp), 0, /* ** You are not expected to understand this .. ** ** CAUTION: only little endian architectures supported! XXX */ SCR_COPY_F (1), NADDR (header.savep), PADDR (disconnect0), }/*-------------------------< DISCONNECT0 >--------------*/,{ /*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)), 20, /* ** neither this */ SCR_COPY_F (1), NADDR (header.goalp), PADDR (disconnect1), }/*-------------------------< DISCONNECT1 >--------------*/,{ SCR_INT ^ IFFALSE (DATA (1)), SIR_MISSING_SAVE, /*>>>*/ /* ** DISCONNECTing ... ** ** disable the "unexpected disconnect" feature, ** and remove the ACK signal. */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, /* ** Wait for the disconnect. */ SCR_WAIT_DISC, 0, /* ** Profiling: ** Set a time stamp, ** and count the disconnects. */ SCR_COPY (sizeof (ticks)), KVAR (KVAR_TICKS), NADDR (header.stamp.disconnect), SCR_COPY (4), NADDR (disc_phys), RADDR (temp), SCR_REG_REG (temp, SCR_ADD, 0x01), 0, SCR_COPY (4), RADDR (temp), NADDR (disc_phys), /* ** Status is: DISCONNECTED. */ SCR_LOAD_REG (HS_REG, HS_DISCONNECT), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< MSG_OUT >-------------------*/,{ /* ** The target requests a message. */ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), /* ** If it was no ABORT message ... */ SCR_JUMP ^ IFTRUE (DATA (MSG_ABORT)), PADDRH (msg_out_abort), /* ** ... wait for the next phase ** if it's a message out, send it again, ... */ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)), PADDR (msg_out), }/*-------------------------< MSG_OUT_DONE >--------------*/,{ /* ** ... else clear the message ... */ SCR_LOAD_REG (scratcha, MSG_NOOP), 0, SCR_COPY (4), RADDR (scratcha), NADDR (msgout), /* ** ... and process the next phase */ SCR_JUMP, PADDR (dispatch), }/*------------------------< BADGETCC >---------------------*/,{ /* ** If SIGP was set, clear it and try again. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDRH (getcc2), SCR_INT, SIR_SENSE_FAILED, }/*-------------------------< RESELECT >--------------------*/,{ /* ** This NOP will be patched with LED OFF ** SCR_REG_REG (gpreg, SCR_OR, 0x01) */ SCR_NO_OP, 0, /* ** make the DSA invalid. */ SCR_LOAD_REG (dsa, 0xff), 0, SCR_CLR (SCR_TRG), 0, /* ** Sleep waiting for a reselection. ** If SIGP is set, special treatment. ** ** Zu allem bereit .. */ SCR_WAIT_RESEL, PADDR(reselect2), }/*-------------------------< RESELECT1 >--------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** ... zu nichts zu gebrauchen ? ** ** load the target id into the SFBR ** and jump to the control block. ** ** Look at the declarations of ** - struct ncb ** - struct tcb ** - struct lcb ** - struct nccb ** to understand what's going on. */ SCR_REG_SFBR (ssid, SCR_AND, 0x8F), 0, SCR_TO_REG (sdid), 0, SCR_JUMP, NADDR (jump_tcb), }/*-------------------------< RESELECT2 >-------------------*/,{ /* ** This NOP will be patched with LED ON ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) */ SCR_NO_OP, 0, /* ** If it's not connected :( ** -> interrupted by SIGP bit. ** Jump to start. */ SCR_FROM_REG (ctest2), 0, SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)), PADDR (start), SCR_JUMP, PADDR (reselect), }/*-------------------------< RESEL_TMP >-------------------*/,{ /* ** The return address in TEMP ** is in fact the data structure address, ** so copy it to the DSA register. */ SCR_COPY (4), RADDR (temp), RADDR (dsa), SCR_JUMP, PADDR (prepare), }/*-------------------------< RESEL_LUN >-------------------*/,{ /* ** come back to this point ** to get an IDENTIFY message ** Wait for a msg_in phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 48, /* ** message phase ** It's not a sony, it's a trick: ** read the data without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (MSG_IDENTIFYFLAG, 0x98)), 32, /* ** It WAS an Identify message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Mask out the lun. */ SCR_REG_REG (sfbr, SCR_AND, 0x07), 0, SCR_RETURN, 0, /* ** No message phase or no IDENTIFY message: ** return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_RETURN, 0, }/*-------------------------< RESEL_TAG >-------------------*/,{ /* ** come back to this point ** to get a SIMPLE_TAG message ** Wait for a MSG_IN phase. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 64, /* ** message phase ** It's a trick - read the data ** without acknowledging it. */ SCR_FROM_REG (sbdl), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (MSG_SIMPLE_Q_TAG)), 48, /* ** It WAS a SIMPLE_TAG message. ** get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK), 0, /* ** Wait for the second byte (the tag) */ /*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)), 24, /* ** Get it and ack it! */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin), SCR_CLR (SCR_ACK|SCR_CARRY), 0, SCR_RETURN, 0, /* ** No message phase or no SIMPLE_TAG message ** or no second byte: return 0. */ /*>>>*/ SCR_LOAD_SFBR (0), 0, SCR_SET (SCR_CARRY), 0, SCR_RETURN, 0, }/*-------------------------< DATA_IN >--------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_IN, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)), ** || PADDR (checkatn), ** || SCR_MOVE_TBL ^ SCR_DATA_IN, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (checkatn), ** SCR_JUMP, ** PADDR (no_data), */ 0 }/*-------------------------< DATA_OUT >-------------------*/,{ /* ** Because the size depends on the ** #define MAX_SCATTER parameter, ** it is filled in at runtime. ** ** SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** PADDR (no_data), ** SCR_COPY (sizeof (ticks)), ** KVAR (KVAR_TICKS), ** NADDR (header.stamp.data), ** SCR_MOVE_TBL ^ SCR_DATA_OUT, ** offsetof (struct dsb, data[ 0]), ** ** ##===========< i=1; i========= ** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)), ** || PADDR (dispatch), ** || SCR_MOVE_TBL ^ SCR_DATA_OUT, ** || offsetof (struct dsb, data[ i]), ** ##========================================== ** ** SCR_CALL, ** PADDR (dispatch), ** SCR_JUMP, ** PADDR (no_data), ** **--------------------------------------------------------- */ (u_long)0 }/*--------------------------------------------------------*/ }; static struct scripth scripth0 = { /*-------------------------< TRYLOOP >---------------------*/{ /* ** Load an entry of the start queue into dsa ** and try to start it by jumping to TRYSEL. ** ** Because the size depends on the ** #define MAX_START parameter, it is filled ** in at runtime. ** **----------------------------------------------------------- ** ** ##===========< I=0; i=========== ** || SCR_COPY (4), ** || NADDR (squeue[i]), ** || RADDR (dsa), ** || SCR_CALL, ** || PADDR (trysel), ** ##========================================== ** ** SCR_JUMP, ** PADDRH(tryloop), ** **----------------------------------------------------------- */ 0 }/*-------------------------< MSG_PARITY >---------------*/,{ /* ** count it */ SCR_REG_REG (PS_REG, SCR_ADD, 0x01), 0, /* ** send a "message parity error" message. */ SCR_LOAD_REG (scratcha, MSG_PARITY_ERROR), 0, SCR_JUMP, PADDR (setmsg), }/*-------------------------< MSG_MESSAGE_REJECT >---------------*/,{ /* ** If a negotiation was in progress, ** negotiation failed. */ SCR_FROM_REG (HS_REG), 0, SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)), SIR_NEGO_FAILED, /* ** else make host log this message */ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)), SIR_REJECT_RECEIVED, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_IGN_RESIDUE >----------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get residue size. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* ** Size is 0 .. ignore message. */ SCR_JUMP ^ IFTRUE (DATA (0)), PADDR (clrack), /* ** Size is not 1 .. have to interrupt. */ /*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)), 40, /* ** Check for residue byte in swide register */ SCR_FROM_REG (scntl2), 0, /*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)), 16, /* ** There IS data in the swide register. ** Discard it. */ SCR_REG_REG (scntl2, SCR_OR, WSR), 0, SCR_JUMP, PADDR (clrack), /* ** Load again the size to the sfbr register. */ /*>>>*/ SCR_FROM_REG (scratcha), 0, /*>>>*/ SCR_INT, SIR_IGN_RESIDUE, SCR_JUMP, PADDR (clrack), }/*-------------------------< MSG_EXTENDED >-------------*/,{ /* ** Terminate cycle */ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get length. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[1]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, /* */ SCR_JUMP ^ IFTRUE (DATA (3)), PADDRH (msg_ext_3), SCR_JUMP ^ IFFALSE (DATA (2)), PADDR (msg_bad), }/*-------------------------< MSG_EXT_2 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_WDTR)), PADDRH (msg_wdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_WDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get data bus width */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_WIDE, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_WDTR */ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_EXT_3 >----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get extended message code. */ SCR_MOVE_ABS (1) ^ SCR_MSG_IN, NADDR (msgin[2]), /* ** Check for message parity error. */ SCR_TO_REG (scratcha), 0, SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), SCR_FROM_REG (scratcha), 0, SCR_JUMP ^ IFTRUE (DATA (MSG_EXT_SDTR)), PADDRH (msg_sdtr), /* ** unknown extended message */ SCR_JUMP, PADDR (msg_bad) }/*-------------------------< MSG_SDTR >-----------------*/,{ SCR_CLR (SCR_ACK), 0, SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)), PADDR (dispatch), /* ** get period and offset */ SCR_MOVE_ABS (2) ^ SCR_MSG_IN, NADDR (msgin[3]), SCR_FROM_REG (socl), 0, SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)), PADDRH (msg_parity), /* ** let the host do the real work. */ SCR_INT, SIR_NEGO_SYNC, /* ** let the target fetch our answer. */ SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)), SIR_NEGO_PROTO, /* ** Send the MSG_EXT_SDTR */ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT, NADDR (msgout), SCR_CLR (SCR_ATN), 0, SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (msg_out_done), }/*-------------------------< MSG_OUT_ABORT >-------------*/,{ /* ** After ABORT message, ** ** expect an immediate disconnect, ... */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, /* ** ... and set the status to "ABORTED" */ SCR_LOAD_REG (HS_REG, HS_ABORTED), 0, SCR_JUMP, PADDR (cleanup), }/*-------------------------< GETCC >-----------------------*/,{ /* ** The ncr doesn't have an indirect load ** or store command. So we have to ** copy part of the control block to a ** fixed place, where we can modify it. ** ** We patch the address part of a COPY command ** with the address of the dsa register ... */ SCR_COPY_F (4), RADDR (dsa), PADDRH (getcc1), /* ** ... then we do the actual copy. */ SCR_COPY (sizeof (struct head)), }/*-------------------------< GETCC1 >----------------------*/,{ 0, NADDR (header), /* ** Initialize the status registers */ SCR_COPY (4), NADDR (header.status), RADDR (scr0), }/*-------------------------< GETCC2 >----------------------*/,{ /* ** Get the condition code from a target. ** ** DSA points to a data structure. ** Set TEMP to the script location ** that receives the condition code. ** ** Because there is no script command ** to load a longword into a register, ** we use a CALL command. */ /*<<<*/ SCR_CALLR, 24, /* ** Get the condition code. */ SCR_MOVE_TBL ^ SCR_DATA_IN, offsetof (struct dsb, sense), /* ** No data phase may follow! */ SCR_CALL, PADDR (checkatn), SCR_JUMP, PADDR (no_data), /*>>>*/ /* ** The CALL jumps to this point. ** Prepare for a RESTORE_POINTER message. ** Save the TEMP register into the saved pointer. */ SCR_COPY (4), RADDR (temp), NADDR (header.savep), /* ** Load scratcha, because in case of a selection timeout, ** the host will expect a new value for startpos in ** the scratcha register. */ SCR_COPY (4), PADDR (startpos), RADDR (scratcha), #ifdef NCR_GETCC_WITHMSG /* ** If QUIRK_NOMSG is set, select without ATN. ** and don't send a message. */ SCR_FROM_REG (QU_REG), 0, SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)), PADDRH(getcc3), /* ** Then try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. */ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Send the IDENTIFY message. ** In case of short transfer, remove ATN. */ SCR_MOVE_TBL ^ SCR_MSG_OUT, offsetof (struct dsb, smsg2), SCR_CLR (SCR_ATN), 0, /* ** save the first byte of the message. */ SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_JUMP, PADDR (prepare2), #endif }/*-------------------------< GETCC3 >----------------------*/,{ /* ** Try to connect to the target. ** If we are reselected, special treatment ** of the current job is required before ** accepting the reselection. ** ** Silly target won't accept a message. ** Select without ATN. */ SCR_SEL_TBL ^ offsetof (struct dsb, select), PADDR(badgetcc), /* ** Force error if selection timeout */ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)), 0, /* ** don't negotiate. */ SCR_JUMP, PADDR (prepare2), }/*-------------------------< ABORTTAG >-------------------*/,{ /* ** Abort a bad reselection. ** Set the message to ABORT vs. ABORT_TAG */ SCR_LOAD_REG (scratcha, MSG_ABORT_TAG), 0, SCR_JUMPR ^ IFFALSE (CARRYSET), 8, }/*-------------------------< ABORT >----------------------*/,{ SCR_LOAD_REG (scratcha, MSG_ABORT), 0, SCR_COPY (1), RADDR (scratcha), NADDR (msgout), SCR_SET (SCR_ATN), 0, SCR_CLR (SCR_ACK), 0, /* ** and send it. ** we expect an immediate disconnect */ SCR_REG_REG (scntl2, SCR_AND, 0x7f), 0, SCR_MOVE_ABS (1) ^ SCR_MSG_OUT, NADDR (msgout), SCR_COPY (1), RADDR (sfbr), NADDR (lastmsg), SCR_CLR (SCR_ACK|SCR_ATN), 0, SCR_WAIT_DISC, 0, SCR_JUMP, PADDR (start), }/*-------------------------< SNOOPTEST >-------------------*/,{ /* ** Read the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (scratcha), /* ** Write the variable. */ SCR_COPY (4), RADDR (temp), KVAR (KVAR_NCR_CACHE), /* ** Read back the variable. */ SCR_COPY (4), KVAR (KVAR_NCR_CACHE), RADDR (temp), }/*-------------------------< SNOOPEND >-------------------*/,{ /* ** And stop. */ SCR_INT, 99, }/*--------------------------------------------------------*/ }; /*========================================================== ** ** ** Fill in #define dependent parts of the script ** ** **========================================================== */ static void ncr_script_fill (struct script * scr, struct scripth * scrh) { int i; ncrcmd *p; p = scrh->tryloop; for (i=0; itryloop + sizeof (scrh->tryloop)); p = scr->data_in; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_in + sizeof (scr->data_in)); p = scr->data_out; *p++ =SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_OUT)); *p++ =PADDR (no_data); *p++ =SCR_COPY (sizeof (ticks)); *p++ =(ncrcmd) KVAR (KVAR_TICKS); *p++ =NADDR (header.stamp.data); *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT; *p++ =offsetof (struct dsb, data[ 0]); for (i=1; idata_out + sizeof (scr->data_out)); } /*========================================================== ** ** ** Copy and rebind a script. ** ** **========================================================== */ static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len) { ncrcmd opcode, new, old, tmp1, tmp2; ncrcmd *start, *end; int relocs, offset; start = src; end = src + len/4; offset = 0; while (src < end) { opcode = *src++; WRITESCRIPT_OFF(dst, offset, opcode); offset += 4; /* ** If we forget to change the length ** in struct script, a field will be ** padded with 0. This is an illegal ** command. */ if (opcode == 0) { device_printf(np->dev, "ERROR0 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%p: <%x>\n", (src-1), (unsigned)opcode); /* ** We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xc: /* ** COPY has TWO arguments. */ relocs = 2; tmp1 = src[0]; if ((tmp1 & RELOC_MASK) == RELOC_KVAR) tmp1 = 0; tmp2 = src[1]; if ((tmp2 & RELOC_MASK) == RELOC_KVAR) tmp2 = 0; if ((tmp1 ^ tmp2) & 3) { device_printf(np->dev, "ERROR1 IN SCRIPT at %d.\n", (int)(src - start - 1)); DELAY (1000000); } /* ** If PREFETCH feature not enabled, remove ** the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features&FE_PFEN)) WRITESCRIPT_OFF(dst, offset - 4, (opcode & ~SCR_NO_FLUSH)); break; case 0x0: /* ** MOVE (absolute address) */ relocs = 1; break; case 0x8: /* ** JUMP / CALL ** dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } if (relocs) { while (relocs--) { old = *src++; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + rman_get_start(np->reg_res); break; case RELOC_LABEL: new = (old & ~RELOC_MASK) + np->p_script; break; case RELOC_LABELH: new = (old & ~RELOC_MASK) + np->p_scripth; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + vtophys(np); break; case RELOC_KVAR: if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) || ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST)) panic("ncr KVAR out of range"); new = vtophys(script_kvars[old & ~RELOC_MASK]); break; case 0: /* Don't relocate a 0 address. */ if (old == 0) { new = old; break; } /* FALLTHROUGH */ default: panic("ncr_script_copy_and_bind: weird relocation %x @ %d\n", old, (int)(src - start)); break; } WRITESCRIPT_OFF(dst, offset, new); offset += 4; } } else { WRITESCRIPT_OFF(dst, offset, *src++); offset += 4; } } } /*========================================================== ** ** ** Auto configuration. ** ** **========================================================== */ #if 0 /*---------------------------------------------------------- ** ** Reduce the transfer length to the max value ** we can transfer safely. ** ** Reading a block greater then MAX_SIZE from the ** raw (character) device exercises a memory leak ** in the vm subsystem. This is common to ALL devices. ** We have submitted a description of this bug to ** . ** It should be fixed in the current release. ** **---------------------------------------------------------- */ void ncr_min_phys (struct buf *bp) { if ((unsigned long)bp->b_bcount > MAX_SIZE) bp->b_bcount = MAX_SIZE; } #endif #if 0 /*---------------------------------------------------------- ** ** Maximal number of outstanding requests per target. ** **---------------------------------------------------------- */ u_int32_t ncr_info (int unit) { return (1); /* may be changed later */ } #endif /*---------------------------------------------------------- ** ** NCR chip devices table and chip look up function. ** Features bit are defined in ncrreg.h. Is it the ** right place? ** **---------------------------------------------------------- */ typedef struct { unsigned long device_id; unsigned short minrevid; char *name; unsigned char maxburst; unsigned char maxoffs; unsigned char clock_divn; unsigned int features; } ncr_chip; static ncr_chip ncr_chip_table[] = { {NCR_810_ID, 0x00, "ncr 53c810 fast10 scsi", 4, 8, 4, FE_ERL} , {NCR_810_ID, 0x10, "ncr 53c810a fast10 scsi", 4, 8, 4, FE_ERL|FE_LDSTR|FE_PFEN|FE_BOF} , {NCR_815_ID, 0x00, "ncr 53c815 fast10 scsi", 4, 8, 4, FE_ERL|FE_BOF} , {NCR_820_ID, 0x00, "ncr 53c820 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL} , {NCR_825_ID, 0x00, "ncr 53c825 fast10 wide scsi", 4, 8, 4, FE_WIDE|FE_ERL|FE_BOF} , {NCR_825_ID, 0x10, "ncr 53c825a fast10 wide scsi", 7, 8, 4, FE_WIDE|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_860_ID, 0x00, "ncr 53c860 fast20 scsi", 4, 8, 5, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_LDSTR|FE_PFEN} , {NCR_875_ID, 0x00, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID, 0x02, "ncr 53c875 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_875_ID2, 0x00, "ncr 53c875j fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_885_ID, 0x00, "ncr 53c885 fast20 wide scsi", 7, 16, 5, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895_ID, 0x00, "ncr 53c895 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_896_ID, 0x00, "ncr 53c896 fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_895A_ID, 0x00, "ncr 53c895a fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} , {NCR_1510D_ID, 0x00, "ncr 53c1510d fast40 wide scsi", 7, 31, 7, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} }; static int ncr_chip_lookup(u_long device_id, u_char revision_id) { int i, found; found = -1; for (i = 0; i < nitems(ncr_chip_table); i++) { if (device_id == ncr_chip_table[i].device_id && ncr_chip_table[i].minrevid <= revision_id) { if (found < 0 || ncr_chip_table[found].minrevid < ncr_chip_table[i].minrevid) { found = i; } } } return found; } /*---------------------------------------------------------- ** ** Probe the hostadapter. ** **---------------------------------------------------------- */ static int ncr_probe (device_t dev) { int i; i = ncr_chip_lookup(pci_get_devid(dev), pci_get_revid(dev)); if (i >= 0) { device_set_desc(dev, ncr_chip_table[i].name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /*========================================================== ** ** NCR chip clock divisor table. ** Divisors are multiplied by 10,000,000 in order to make ** calculations more simple. ** **========================================================== */ #define _5M 5000000 static u_long div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /*=============================================================== ** ** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128 ** transfers. 32,64,128 are only supported by 875 and 895 chips. ** We use log base 2 (burst length) as internal code, with ** value 0 meaning "burst disabled". ** **=============================================================== */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static void ncr_init_burst(ncb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /*========================================================== ** ** ** Auto configuration: attach and init a host adapter. ** ** **========================================================== */ static int ncr_attach (device_t dev) { ncb_p np = (struct ncb*) device_get_softc(dev); u_char rev = 0; u_long period; int i, rid; u_int8_t usrsync; u_int8_t usrwide; struct cam_devq *devq; /* ** allocate and initialize structures. */ np->dev = dev; mtx_init(&np->lock, "ncr", NULL, MTX_DEF); callout_init_mtx(&np->timer, &np->lock, 0); /* ** Try to map the controller chip to ** virtual and physical memory. */ np->reg_rid = PCIR_BAR(1); np->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->reg_rid, RF_ACTIVE); if (!np->reg_res) { device_printf(dev, "could not map memory\n"); return ENXIO; } /* ** Now the INB INW INL OUTB OUTW OUTL macros ** can be used safely. */ #ifdef NCR_IOMAPPED /* ** Try to map the controller chip into iospace. */ if (!pci_map_port (config_id, 0x10, &np->port)) return; #endif /* ** Save some controller register default values */ np->rv_scntl3 = INB(nc_scntl3) & 0x77; np->rv_dmode = INB(nc_dmode) & 0xce; np->rv_dcntl = INB(nc_dcntl) & 0xa9; np->rv_ctest3 = INB(nc_ctest3) & 0x01; np->rv_ctest4 = INB(nc_ctest4) & 0x88; np->rv_ctest5 = INB(nc_ctest5) & 0x24; np->rv_gpcntl = INB(nc_gpcntl); np->rv_stest2 = INB(nc_stest2) & 0x20; if (bootverbose >= 2) { printf ("\tBIOS values: SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } np->rv_dcntl |= NOCOM; /* ** Do chip dependent initialization. */ rev = pci_get_revid(dev); /* ** Get chip features from chips table. */ i = ncr_chip_lookup(pci_get_devid(dev), rev); if (i >= 0) { np->maxburst = ncr_chip_table[i].maxburst; np->maxoffs = ncr_chip_table[i].maxoffs; np->clock_divn = ncr_chip_table[i].clock_divn; np->features = ncr_chip_table[i].features; } else { /* Should'nt happen if probe() is ok */ np->maxburst = 4; np->maxoffs = 8; np->clock_divn = 4; np->features = FE_ERL; } np->maxwide = np->features & FE_WIDE ? 1 : 0; np->clock_khz = np->features & FE_CLK80 ? 80000 : 40000; if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; /* ** Get the frequency of the chip's clock. ** Find the right value for scntl3. */ if (np->features & (FE_ULTRA|FE_ULTRA2)) ncr_getclock(np, np->multiplier); #ifdef NCR_TEKRAM_EEPROM if (bootverbose) { device_printf(dev, "Tekram EEPROM read %s\n", read_tekram_eeprom (np, NULL) ? "succeeded" : "failed"); } #endif /* NCR_TEKRAM_EEPROM */ /* * If scntl3 != 0, we assume BIOS is present. */ if (np->rv_scntl3) np->features |= FE_BIOS; /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (i >= 0) { --i; if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & FE_ULTRA2)) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * Now, some features available with Symbios compatible boards. * LED support through GPIO0 and DIFF support. */ #ifdef SCSI_NCR_SYMBIOS_COMPAT if (!(np->rv_gpcntl & 0x01)) np->features |= FE_LED0; #if 0 /* Not safe enough without NVRAM support or user settable option */ if (!(INB(nc_gpreg) & 0x08)) np->features |= FE_DIFF; #endif #endif /* SCSI_NCR_SYMBIOS_COMPAT */ /* * Prepare initial IO registers settings. * Trust BIOS only if we believe we have one and if we want to. */ #ifdef SCSI_NCR_TRUST_BIOS if (!(np->features & FE_BIOS)) { #else if (1) { #endif np->rv_dmode = 0; np->rv_dcntl = NOCOM; np->rv_ctest3 = 0; np->rv_ctest4 = MPEE; np->rv_ctest5 = 0; np->rv_stest2 = 0; if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_PFEN) np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ if (np->features & FE_DIFF) np->rv_stest2 |= 0x20; /* Differential mode */ ncr_init_burst(np, np->maxburst); /* Max dwords burst length */ } else { np->maxburst = burst_code(np->rv_dmode, np->rv_ctest4, np->rv_ctest5); } /* ** Get on-chip SRAM address, if supported */ if ((np->features & FE_RAM) && sizeof(struct script) <= 4096) { np->sram_rid = PCIR_BAR(2); np->sram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &np->sram_rid, RF_ACTIVE); } /* ** Allocate structure for script relocation. */ if (np->sram_res != NULL) { np->script = NULL; np->p_script = rman_get_start(np->sram_res); } else if (sizeof (struct script) > PAGE_SIZE) { np->script = (struct script*) contigmalloc (round_page(sizeof (struct script)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->script = (struct script *) malloc (sizeof (struct script), M_DEVBUF, M_WAITOK); } if (sizeof (struct scripth) > PAGE_SIZE) { np->scripth = (struct scripth*) contigmalloc (round_page(sizeof (struct scripth)), M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0); } else { np->scripth = (struct scripth *) malloc (sizeof (struct scripth), M_DEVBUF, M_WAITOK); } #ifdef SCSI_NCR_PCI_CONFIG_FIXUP /* ** If cache line size is enabled, check PCI config space and ** try to fix it up if necessary. */ #ifdef PCIR_CACHELNSZ /* To be sure that new PCI stuff is present */ { u_char cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); u_short command = pci_read_config(dev, PCIR_COMMAND, 2); if (!cachelnsz) { cachelnsz = 8; device_printf(dev, "setting PCI cache line size register to %d.\n", (int)cachelnsz); pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } if (!(command & PCIM_CMD_MWRICEN)) { command |= PCIM_CMD_MWRICEN; device_printf(dev, "setting PCI command write and invalidate.\n"); pci_write_config(dev, PCIR_COMMAND, command, 2); } } #endif /* PCIR_CACHELNSZ */ #endif /* SCSI_NCR_PCI_CONFIG_FIXUP */ /* Initialize per-target user settings */ usrsync = 0; if (SCSI_NCR_DFLT_SYNC) { usrsync = SCSI_NCR_DFLT_SYNC; if (usrsync > np->maxsync) usrsync = np->maxsync; if (usrsync < np->minsync) usrsync = np->minsync; } usrwide = (SCSI_NCR_MAX_WIDE); if (usrwide > np->maxwide) usrwide=np->maxwide; for (i=0;itarget[i]; tp->tinfo.user.period = usrsync; tp->tinfo.user.offset = usrsync != 0 ? np->maxoffs : 0; tp->tinfo.user.width = usrwide; tp->tinfo.disc_tag = NCR_CUR_DISCENB | NCR_CUR_TAGENB | NCR_USR_DISCENB | NCR_USR_TAGENB; } /* ** Bells and whistles ;-) */ if (bootverbose) device_printf(dev, "minsync=%d, maxsync=%d, maxoffs=%d, %d dwords burst, %s dma fifo\n", np->minsync, np->maxsync, np->maxoffs, burst_length(np->maxburst), (np->rv_ctest5 & DFS) ? "large" : "normal"); /* ** Print some complementary information that can be helpful. */ if (bootverbose) device_printf(dev, "%s, %s IRQ driver%s\n", np->rv_stest2 & 0x20 ? "differential" : "single-ended", np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->sram_res ? ", using on-chip SRAM" : ""); /* ** Patch scripts to physical addresses */ ncr_script_fill (&script0, &scripth0); if (np->script) np->p_script = vtophys(np->script); np->p_scripth = vtophys(np->scripth); ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script, sizeof(struct script)); ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth, sizeof(struct scripth)); /* ** Patch the script for LED support. */ if (np->features & FE_LED0) { WRITESCRIPT(reselect[0], SCR_REG_REG(gpreg, SCR_OR, 0x01)); WRITESCRIPT(reselect1[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); WRITESCRIPT(reselect2[0], SCR_REG_REG(gpreg, SCR_AND, 0xfe)); } /* ** init data structure */ np->jump_tcb.l_cmd = SCR_JUMP; np->jump_tcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); /* ** Get SCSI addr of host adapter (set by bios?). */ np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR; #ifdef NCR_DUMP_REG /* ** Log the initial register contents */ { int reg; for (reg=0; reg<256; reg+=4) { if (reg%16==0) printf ("reg[%2x]", reg); printf (" %08x", (int)pci_conf_read (config_id, reg)); if (reg%16==12) printf ("\n"); } } #endif /* NCR_DUMP_REG */ /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0 ); /* ** Now check the cache handling of the pci chipset. */ if (ncr_snooptest (np)) { printf ("CACHE INCORRECTLY CONFIGURED.\n"); return EINVAL; } /* ** Install the interrupt handler. */ rid = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (np->irq_res == NULL) { device_printf(dev, "interruptless mode: reduced performance.\n"); } else { bus_setup_intr(dev, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, ncr_intr, np, &np->irq_handle); } /* ** Create the device queue. We only allow MAX_START-1 concurrent ** transactions so we can be sure to have one element free in our ** start queue to reset to the idle loop. */ devq = cam_simq_alloc(MAX_START - 1); if (devq == NULL) return ENOMEM; /* ** Now tell the generic SCSI layer ** about our bus. */ np->sim = cam_sim_alloc(ncr_action, ncr_poll, "ncr", np, device_get_unit(dev), &np->lock, 1, MAX_TAGS, devq); if (np->sim == NULL) { cam_simq_free(devq); return ENOMEM; } mtx_lock(&np->lock); if (xpt_bus_register(np->sim, dev, 0) != CAM_SUCCESS) { cam_sim_free(np->sim, /*free_devq*/ TRUE); mtx_unlock(&np->lock); return ENOMEM; } if (xpt_create_path(&np->path, /*periph*/NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/TRUE); mtx_unlock(&np->lock); return ENOMEM; } /* ** start the timeout daemon */ ncr_timeout (np); np->lasttime=0; mtx_unlock(&np->lock); return 0; } /*========================================================== ** ** ** Process pending device interrupts. ** ** **========================================================== */ static void ncr_intr(vnp) void *vnp; { ncb_p np = vnp; mtx_lock(&np->lock); ncr_intr_locked(np); mtx_unlock(&np->lock); } static void ncr_intr_locked(ncb_p np) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Repeat until no outstanding ints */ do { ncr_exception (np); } while (INB(nc_istat) & (INTF|SIP|DIP)); np->ticks = 100; } if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n"); } /*========================================================== ** ** ** Start execution of a SCSI command. ** This is called from the generic SCSI driver. ** ** **========================================================== */ static void ncr_action (struct cam_sim *sim, union ccb *ccb) { ncb_p np; np = (ncb_p) cam_sim_softc(sim); mtx_assert(&np->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { nccb_p cp; lcb_p lp; tcb_p tp; struct ccb_scsiio *csio; u_int8_t *msgptr; u_int msglen; u_int msglen2; int segments; u_int8_t nego; u_int8_t idmsg; int qidx; tp = &np->target[ccb->ccb_h.target_id]; csio = &ccb->csio; /* * Make sure we support this request. We can't do * PHYS pointers. */ if (ccb->ccb_h.flags & CAM_CDB_PHYS) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } /* * Last time we need to check if this CCB needs to * be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; /*--------------------------------------------------- ** ** Assign an nccb / bind ccb ** **---------------------------------------------------- */ cp = ncr_get_nccb (np, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (cp == NULL) { /* XXX JGibbs - Freeze SIMQ */ ccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(ccb); return; } cp->ccb = ccb; /*--------------------------------------------------- ** ** timestamp ** **---------------------------------------------------- */ /* ** XXX JGibbs - Isn't this expensive ** enough to be conditionalized?? */ bzero (&cp->phys.header.stamp, sizeof (struct tstamp)); cp->phys.header.stamp.start = ticks; nego = 0; if (tp->nego_cp == NULL) { if (tp->tinfo.current.width != tp->tinfo.goal.width) { tp->nego_cp = cp; nego = NS_WIDE; } else if ((tp->tinfo.current.period != tp->tinfo.goal.period) || (tp->tinfo.current.offset != tp->tinfo.goal.offset)) { tp->nego_cp = cp; nego = NS_SYNC; } } /*--------------------------------------------------- ** ** choose a new tag ... ** **---------------------------------------------------- */ lp = tp->lp[ccb->ccb_h.target_lun]; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && (ccb->csio.tag_action != CAM_TAG_ACTION_NONE) && (nego == 0)) { /* ** assign a tag to this nccb */ while (!cp->tag) { nccb_p cp2 = lp->next_nccb; lp->lasttag = lp->lasttag % 255 + 1; while (cp2 && cp2->tag != lp->lasttag) cp2 = cp2->next_nccb; if (cp2) continue; cp->tag=lp->lasttag; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_ADDR(ccb); printf ("using tag #%d.\n", cp->tag); } } } else { cp->tag=0; } /*---------------------------------------------------- ** ** Build the identify / tag / sdtr message ** **---------------------------------------------------- */ idmsg = MSG_IDENTIFYFLAG | ccb->ccb_h.target_lun; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) idmsg |= MSG_IDENTIFY_DISCFLAG; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; if (cp->tag) { msgptr[msglen++] = ccb->csio.tag_action; msgptr[msglen++] = cp->tag; } switch (nego) { case NS_SYNC: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_SDTR_LEN; msgptr[msglen++] = MSG_EXT_SDTR; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("sync msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-5]); printf (".\n"); }; break; case NS_WIDE: msgptr[msglen++] = MSG_EXTENDED; msgptr[msglen++] = MSG_EXT_WDTR_LEN; msgptr[msglen++] = MSG_EXT_WDTR; msgptr[msglen++] = tp->tinfo.goal.width; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(ccb); printf ("wide msgout: "); ncr_show_msg (&cp->scsi_smsg [msglen-4]); printf (".\n"); }; break; } /*---------------------------------------------------- ** ** Build the identify message for getcc. ** **---------------------------------------------------- */ cp->scsi_smsg2 [0] = idmsg; msglen2 = 1; /*---------------------------------------------------- ** ** Build the data descriptors ** **---------------------------------------------------- */ /* XXX JGibbs - Handle other types of I/O */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { segments = ncr_scatter(&cp->phys, (vm_offset_t)csio->data_ptr, (vm_size_t)csio->dxfer_len); if (segments < 0) { ccb->ccb_h.status = CAM_REQ_TOO_BIG; ncr_free_nccb(np, cp); xpt_done(ccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_in); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } else { /* CAM_DIR_OUT */ cp->phys.header.savep = NCB_SCRIPT_PHYS (np, data_out); cp->phys.header.goalp = cp->phys.header.savep +20 +segments*16; } } else { cp->phys.header.savep = NCB_SCRIPT_PHYS (np, no_data); cp->phys.header.goalp = cp->phys.header.savep; } cp->phys.header.lastp = cp->phys.header.savep; /*---------------------------------------------------- ** ** fill in nccb ** **---------------------------------------------------- ** ** ** physical -> virtual backlink ** Generic SCSI command */ cp->phys.header.cp = cp; /* ** Startqueue */ cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, select); cp->phys.header.launch.l_cmd = SCR_JUMP; /* ** select */ cp->phys.select.sel_id = ccb->ccb_h.target_id; cp->phys.select.sel_scntl3 = tp->tinfo.wval; cp->phys.select.sel_sxfer = tp->tinfo.sval; /* ** message */ cp->phys.smsg.addr = CCB_PHYS (cp, scsi_smsg); cp->phys.smsg.size = msglen; cp->phys.smsg2.addr = CCB_PHYS (cp, scsi_smsg2); cp->phys.smsg2.size = msglen2; /* ** command */ cp->phys.cmd.addr = vtophys (scsiio_cdb_ptr(csio)); cp->phys.cmd.size = csio->cdb_len; /* ** sense command */ cp->phys.scmd.addr = CCB_PHYS (cp, sensecmd); cp->phys.scmd.size = 6; /* ** patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = ccb->ccb_h.target_lun << 5; cp->sensecmd[4] = csio->sense_len; /* ** sense data */ cp->phys.sense.addr = vtophys (&csio->sense_data); cp->phys.sense.size = csio->sense_len; /* ** status */ cp->actualquirks = QUIRK_NOMSG; cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY; cp->s_status = SCSI_STATUS_ILLEGAL; cp->parity_status = 0; cp->xerr_status = XE_OK; cp->sync_status = tp->tinfo.sval; cp->nego_status = nego; cp->wide_status = tp->tinfo.wval; /*---------------------------------------------------- ** ** Critical region: start this job. ** **---------------------------------------------------- */ /* ** reselect pattern and activate this job. */ cp->jump_nccb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (cp->tag))); cp->tlimit = time_second + ccb->ccb_h.timeout / 1000 + 2; cp->magic = CCB_MAGIC; /* ** insert into start queue. */ qidx = np->squeueput + 1; if (qidx >= MAX_START) qidx = 0; np->squeue [qidx ] = NCB_SCRIPT_PHYS (np, idle); np->squeue [np->squeueput] = CCB_PHYS (cp, phys); np->squeueput = qidx; if(DEBUG_FLAGS & DEBUG_QUEUE) device_printf(np->dev, "queuepos=%d tryoffset=%d.\n", np->squeueput, (unsigned)(READSCRIPT(startpos[0]) - (NCB_SCRIPTH_PHYS (np, tryloop)))); /* ** Script processor may be waiting for reselect. ** Wake it up. */ OUTB (nc_istat, SIGP); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; tcb_p tp; u_int update_type; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= NCR_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= NCR_TRANS_USER; tp = &np->target[ccb->ccb_h.target_id]; /* Tag and disc enables */ if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_CUR_DISCENB; } if (update_type & NCR_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_DISCENB; else tp->tinfo.disc_tag &= ~NCR_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if (update_type & NCR_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_CUR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_CUR_TAGENB; } if (update_type & NCR_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) tp->tinfo.disc_tag |= NCR_USR_TAGENB; else tp->tinfo.disc_tag &= ~NCR_USR_TAGENB; } } /* Filter bus width and sync negotiation settings */ if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width > np->maxwide) spi->bus_width = np->maxwide; } if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { if (spi->sync_period != 0 && (spi->sync_period < np->minsync)) spi->sync_period = np->minsync; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; if (spi->sync_offset > np->maxoffs) spi->sync_offset = np->maxoffs; } } if ((update_type & NCR_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.user.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.user.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.user.width = spi->bus_width; } if ((update_type & NCR_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tp->tinfo.goal.period = spi->sync_period; if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tp->tinfo.goal.offset = spi->sync_offset; if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tp->tinfo.goal.width = spi->bus_width; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ncr_transinfo *tinfo; tcb_p tp = &np->target[ccb->ccb_h.target_id]; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tinfo = &tp->tinfo.current; if (tp->tinfo.disc_tag & NCR_CUR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } else { tinfo = &tp->tinfo.user; if (tp->tinfo.disc_tag & NCR_USR_DISCENB) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; else spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (tp->tinfo.disc_tag & NCR_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { /* XXX JGibbs - I'm sure the NCR uses a different strategy, * but it should be able to deal with Adaptec * geometry too. */ cam_calc_geometry(&ccb->ccg, /*extended*/1); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { OUTB (nc_scntl1, CRST); ccb->ccb_h.status = CAM_REQ_CMP; DELAY(10000); /* Wait until our interrupt handler sees it */ xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; cpi->max_lun = MAX_LUN - 1; cpi->initiator_id = np->myaddr; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /*========================================================== ** ** ** Complete execution of a SCSI command. ** Signal completion to the generic SCSI driver. ** ** **========================================================== */ static void ncr_complete (ncb_p np, nccb_p cp) { union ccb *ccb; tcb_p tp; /* ** Sanity check */ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->ccb) return; cp->magic = 1; cp->tlimit= 0; /* ** No Reselect anymore. */ cp->jump_nccb.l_cmd = (SCR_JUMP); /* ** No starting. */ cp->phys.header.launch.l_paddr= NCB_SCRIPT_PHYS (np, idle); /* ** timestamp */ ncb_profile (np, cp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("CCB=%x STAT=%x/%x\n", (int)(intptr_t)cp & 0xfff, cp->host_status,cp->s_status); ccb = cp->ccb; cp->ccb = NULL; tp = &np->target[ccb->ccb_h.target_id]; /* ** We do not queue more than 1 nccb per target ** with negotiation at any time. If this nccb was ** used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; /* ** Check for parity errors. */ /* XXX JGibbs - What about reporting them??? */ if (cp->parity_status) { PRINT_ADDR(ccb); printf ("%d parity error(s), fallback.\n", cp->parity_status); /* ** fallback to asynch transfer. */ tp->tinfo.goal.period = 0; tp->tinfo.goal.offset = 0; } /* ** Check for extended errors. */ if (cp->xerr_status != XE_OK) { PRINT_ADDR(ccb); switch (cp->xerr_status) { case XE_EXTRA_DATA: printf ("extraneous data discarded.\n"); break; case XE_BAD_PHASE: printf ("illegal scsi phase (4/5).\n"); break; default: printf ("extended error %d.\n", cp->xerr_status); break; } if (cp->host_status==HS_COMPLETE) cp->host_status = HS_FAIL; } /* ** Check the status. */ if (cp->host_status == HS_COMPLETE) { if (cp->s_status == SCSI_STATUS_OK) { /* ** All went well. */ /* XXX JGibbs - Properly calculate residual */ tp->bytes += ccb->csio.dxfer_len; tp->transfers ++; ccb->ccb_h.status = CAM_REQ_CMP; } else if ((cp->s_status & SCSI_STATUS_SENSE) != 0) { /* * XXX Could be TERMIO too. Should record * original status. */ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; cp->s_status &= ~SCSI_STATUS_SENSE; if (cp->s_status == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR; } else { ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; } } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = cp->s_status; } } else if (cp->host_status == HS_SEL_TIMEOUT) { /* ** Device failed selection */ ccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (cp->host_status == HS_TIMEOUT) { /* ** No response */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; } else if (cp->host_status == HS_STALL) { ccb->ccb_h.status = CAM_REQUEUE_REQ; } else { /* ** Other protocol messes */ PRINT_ADDR(ccb); printf ("COMMAND FAILED (%x %x) @%p.\n", cp->host_status, cp->s_status, cp); ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* ** Free this nccb */ ncr_free_nccb (np, cp); /* ** signal completion to generic driver. */ xpt_done (ccb); } /*========================================================== ** ** ** Signal all (or one) control block done. ** ** **========================================================== */ static void ncr_wakeup (ncb_p np, u_long code) { /* ** Starting at the default nccb and following ** the links, complete all jobs with a ** host_status greater than "disconnect". ** ** If the "code" parameter is not zero, ** complete all jobs that are not IDLE. */ nccb_p cp = np->link_nccb; while (cp) { switch (cp->host_status) { case HS_IDLE: break; case HS_DISCONNECT: if(DEBUG_FLAGS & DEBUG_TINY) printf ("D"); /* FALLTHROUGH */ case HS_BUSY: case HS_NEGOTIATE: if (!code) break; cp->host_status = code; /* FALLTHROUGH */ default: ncr_complete (np, cp); break; } cp = cp -> link_nccb; } } static void ncr_freeze_devq (ncb_p np, struct cam_path *path) { nccb_p cp; int i; int count; int firstskip; /* ** Starting at the first nccb and following ** the links, complete all jobs that match ** the passed in path and are in the start queue. */ cp = np->link_nccb; count = 0; firstskip = 0; while (cp) { switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: if ((cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) && (xpt_path_comp(path, cp->ccb->ccb_h.path) >= 0)) { /* Mark for removal from the start queue */ for (i = 1; i < MAX_START; i++) { int idx; idx = np->squeueput - i; if (idx < 0) idx = MAX_START + idx; if (np->squeue[idx] == CCB_PHYS(cp, phys)) { np->squeue[idx] = NCB_SCRIPT_PHYS (np, skip); if (i > firstskip) firstskip = i; break; } } cp->host_status=HS_STALL; ncr_complete (np, cp); count++; } break; default: break; } cp = cp->link_nccb; } if (count > 0) { int j; int bidx; /* Compress the start queue */ j = 0; bidx = np->squeueput; i = np->squeueput - firstskip; if (i < 0) i = MAX_START + i; for (;;) { bidx = i - j; if (bidx < 0) bidx = MAX_START + bidx; if (np->squeue[i] == NCB_SCRIPT_PHYS (np, skip)) { j++; } else if (j != 0) { np->squeue[bidx] = np->squeue[i]; if (np->squeue[bidx] == NCB_SCRIPT_PHYS(np, idle)) break; } i = (i + 1) % MAX_START; } np->squeueput = bidx; } } /*========================================================== ** ** ** Start NCR chip. ** ** **========================================================== */ static void ncr_init(ncb_p np, char * msg, u_long code) { int i; /* ** Reset chip. */ OUTB (nc_istat, SRST); DELAY (1000); OUTB (nc_istat, 0); /* ** Message. */ if (msg) device_printf(np->dev, "restart (%s).\n", msg); /* ** Clear Start Queue */ for (i=0;i squeue [i] = NCB_SCRIPT_PHYS (np, idle); /* ** Start at first entry. */ np->squeueput = 0; WRITESCRIPT(startpos[0], NCB_SCRIPTH_PHYS (np, tryloop)); WRITESCRIPT(start0 [0], SCR_INT ^ IFFALSE (0)); /* ** Wakeup all pending jobs. */ ncr_wakeup (np, code); /* ** Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort ... */ OUTB (nc_scntl0, 0xca ); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00 ); /* odd parity, and remove CRST!! */ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr);/* host adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr);/* id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* XXX modify burstlen ??? */ OUTB (nc_dcntl , np->rv_dcntl); OUTB (nc_ctest3, np->rv_ctest3); OUTB (nc_ctest5, np->rv_ctest5); OUTB (nc_ctest4, np->rv_ctest4);/* enable master parity checking */ OUTB (nc_stest2, np->rv_stest2|EXT); /* Extended Sreq/Sack filtering */ OUTB (nc_stest3, TE ); /* TolerANT enable */ OUTB (nc_stime0, 0x0b ); /* HTH = disabled, STO = 0.1 sec. */ if (bootverbose >= 2) { printf ("\tACTUAL values:SCNTL3:%02x DMODE:%02x DCNTL:%02x\n", np->rv_scntl3, np->rv_dmode, np->rv_dcntl); printf ("\t CTEST3:%02x CTEST4:%02x CTEST5:%02x\n", np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* ** Enable GPIO0 pin for writing if LED support. */ if (np->features & FE_LED0) { OUTOFFB (nc_gpcntl, 0x01); } /* ** Fill in target structure. */ for (i=0;itarget[i]; tp->tinfo.sval = 0; tp->tinfo.wval = np->rv_scntl3; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } /* ** enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST); OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID); /* ** Start script processor. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); /* * Notify the XPT of the event */ if (code == HS_RESET) xpt_async(AC_BUS_RESET, np->path, NULL); } static void ncr_poll(struct cam_sim *sim) { ncr_intr_locked(cam_sim_softc(sim)); } /*========================================================== ** ** Get clock factor and sync divisor for a given ** synchronous factor period. ** Returns the clock factor (in sxfer) and scntl3 ** synchronous divisor field. ** **========================================================== */ static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p) { u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u_long fak; /* Sync factor in sxfer */ u_long per; /* Period in tenths of ns */ u_long kpc; /* (per * clk) */ /* ** Compute the synchronous period in tenths of nano-seconds */ if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; /* ** Look for the greatest clock divisor that allows an ** input speed faster than the period. */ kpc = per * clk; while (--div >= 0) if (kpc >= (div_10M[div] * 4)) break; /* ** Calculate the lowest clock factor that allows an output ** speed not faster than the period. */ fak = (kpc - 1) / div_10M[div] + 1; #if 0 /* You can #if 1 if you think this optimization is useful */ per = (fak * div_10M[div]) / clk; /* ** Why not to try the immediate lower divisor and to choose ** the one that allows the fastest output speed ? ** We dont want input speed too much greater than output speed. */ if (div >= 1 && fak < 6) { u_long fak2, per2; fak2 = (kpc - 1) / div_10M[div-1] + 1; per2 = (fak2 * div_10M[div-1]) / clk; if (per2 < per && fak2 <= 6) { fak = fak2; per = per2; --div; } } #endif if (fak < 4) fak = 4; /* Should never happen, too bad ... */ /* ** Compute and return sync parameters for the ncr */ *fakp = fak - 4; *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); } /*========================================================== ** ** Switch sync mode for current job and its target ** **========================================================== */ static void ncr_setsync(ncb_p np, nccb_p cp, u_char scntl3, u_char sxfer, u_char period) { union ccb *ccb; struct ccb_trans_settings neg; tcb_p tp; int div; u_int target = INB (nc_sdid) & 0x0f; u_int period_10ns; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; if (!scntl3 || !(sxfer & 0x1f)) scntl3 = np->rv_scntl3; scntl3 = (scntl3 & 0xf0) | (tp->tinfo.wval & EWS) | (np->rv_scntl3 & 0x07); /* ** Deduce the value of controller sync period from scntl3. ** period is in tenths of nano-seconds. */ div = ((scntl3 >> 4) & 0x7); if ((sxfer & 0x1f) && div) period_10ns = (((sxfer>>5)+4)*div_10M[div-1])/np->clock_khz; else period_10ns = 0; tp->tinfo.goal.period = period; tp->tinfo.goal.offset = sxfer & 0x1f; tp->tinfo.current.period = period; tp->tinfo.current.offset = sxfer & 0x1f; /* ** Stop there if sync parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; if (sxfer & 0x1f) { /* ** Disable extended Sreq/Sack filtering */ if (period_10ns <= 2000) OUTOFFB (nc_stest2, EXT); } /* ** Tell the SCSI layer about the ** new transfer parameters. */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.sync_period = period; neg.xport_specific.spi.sync_offset = sxfer & 0x1f; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** Switch wide mode for current job and its target ** SCSI specs say: a SCSI device that accepts a WDTR ** message shall reset the synchronous agreement to ** asynchronous mode. ** **========================================================== */ static void ncr_setwide (ncb_p np, nccb_p cp, u_char wide, u_char ack) { union ccb *ccb; struct ccb_trans_settings neg; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp; u_char scntl3; u_char sxfer; assert (cp); if (!cp) return; ccb = cp->ccb; assert (ccb); if (!ccb) return; assert (target == ccb->ccb_h.target_id); tp = &np->target[target]; tp->tinfo.current.width = wide; tp->tinfo.goal.width = wide; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; scntl3 = (tp->tinfo.wval & (~EWS)) | (wide ? EWS : 0); sxfer = ack ? 0 : tp->tinfo.sval; /* ** Stop there if sync/wide parameters are unchanged */ if (tp->tinfo.sval == sxfer && tp->tinfo.wval == scntl3) return; tp->tinfo.sval = sxfer; tp->tinfo.wval = scntl3; /* Tell the SCSI layer about the new transfer params */ memset(&neg, 0, sizeof (neg)); neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; neg.xport_specific.spi.bus_width = (scntl3 & EWS) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; neg.xport_specific.spi.sync_period = 0; neg.xport_specific.spi.sync_offset = 0; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); /* ** set actual value and sync_status */ OUTB (nc_sxfer, sxfer); np->sync_st = sxfer; OUTB (nc_scntl3, scntl3); np->wide_st = scntl3; /* ** patch ALL nccbs of this target. */ for (cp = np->link_nccb; cp; cp = cp->link_nccb) { if (!cp->ccb) continue; if (cp->ccb->ccb_h.target_id != target) continue; cp->sync_status = sxfer; cp->wide_status = scntl3; } } /*========================================================== ** ** ** ncr timeout handler. ** ** **========================================================== ** ** Misused to keep the driver running when ** interrupts are not configured correctly. ** **---------------------------------------------------------- */ static void ncr_timeout (void *arg) { ncb_p np = arg; time_t thistime = time_second; ticks_t step = np->ticks; u_long count = 0; long signed t; nccb_p cp; mtx_assert(&np->lock, MA_OWNED); if (np->lasttime != thistime) { np->lasttime = thistime; /*---------------------------------------------------- ** ** handle ncr chip timeouts ** ** Assumption: ** We have a chance to arbitrate for the ** SCSI bus at least every 10 seconds. ** **---------------------------------------------------- */ t = thistime - np->heartbeat; if (t<2) np->latetime=0; else np->latetime++; if (np->latetime>2) { /* ** If there are no requests, the script ** processor will sleep on SEL_WAIT_RESEL. ** But we have to check whether it died. ** Let's try to wake it up. */ OUTB (nc_istat, SIGP); } /*---------------------------------------------------- ** ** handle nccb timeouts ** **---------------------------------------------------- */ for (cp=np->link_nccb; cp; cp=cp->link_nccb) { /* ** look for timed out nccbs. */ if (!cp->host_status) continue; count++; if (cp->tlimit > thistime) continue; /* ** Disable reselect. ** Remove it from startqueue. */ cp->jump_nccb.l_cmd = (SCR_JUMP); if (cp->phys.header.launch.l_paddr == NCB_SCRIPT_PHYS (np, select)) { device_printf(np->dev, "timeout nccb=%p (skip)\n", cp); cp->phys.header.launch.l_paddr = NCB_SCRIPT_PHYS (np, skip); } switch (cp->host_status) { case HS_BUSY: case HS_NEGOTIATE: /* FALLTHROUGH */ case HS_DISCONNECT: cp->host_status=HS_TIMEOUT; } cp->tag = 0; /* ** wakeup this nccb. */ ncr_complete (np, cp); } } callout_reset(&np->timer, step ? step : 1, ncr_timeout, np); if (INB(nc_istat) & (INTF|SIP|DIP)) { /* ** Process pending interrupts. */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{"); ncr_exception (np); if (DEBUG_FLAGS & DEBUG_TINY) printf ("}"); } } /*========================================================== ** ** log message for real hard errors ** ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." ** ** exception register: ** ds: dstat ** si: sist ** ** SCSI bus lines: ** so: control lines as driver by NCR. ** si: control lines as seen by NCR. ** sd: scsi data lines as seen by NCR. ** ** wide/fastmode: ** sxfer: (see the manual) ** scntl3: (see the manual) ** ** current script command: ** dsp: script address (relative to start of script). ** dbc: first word of script command. ** ** First 16 register of the chip: ** r0..rf ** **========================================================== */ static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat) { u_int32_t dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (np->p_script < dsp && dsp <= np->p_script + sizeof(struct script)) { script_ofs = dsp - np->p_script; script_size = sizeof(struct script); script_base = (u_char *) np->script; script_name = "script"; } else if (np->p_scripth < dsp && dsp <= np->p_scripth + sizeof(struct scripth)) { script_ofs = dsp - np->p_scripth; script_size = sizeof(struct scripth); script_base = (u_char *) np->scripth; script_name = "scripth"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } device_printf(np->dev, "%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { device_printf(np->dev, "script cmd = %08x\n", (int)READSCRIPT_OFF(script_base, script_ofs)); } device_printf(np->dev, "regdump:"); for (i=0; i<16;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); } /*========================================================== ** ** ** ncr chip exception handler. ** ** **========================================================== */ static void ncr_exception (ncb_p np) { u_char istat, dstat; u_short sist; /* ** interrupt on the fly ? */ while ((istat = INB (nc_istat)) & INTF) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); OUTB (nc_istat, INTF); np->profile.num_fly++; ncr_wakeup (np, 0); } if (!(istat & (SIP|DIP))) { return; } /* ** Steinbach's Guideline for Systems Programming: ** Never test for an error condition you don't know how to handle. */ sist = (istat & SIP) ? INW (nc_sist) : 0; dstat = (istat & DIP) ? INB (nc_dstat) : 0; np->profile.num_int++; if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); if ((dstat==DFE) && (sist==PAR)) return; /*========================================================== ** ** First the normal cases. ** **========================================================== */ /*------------------------------------------- ** SCSI reset **------------------------------------------- */ if (sist & RST) { ncr_init (np, bootverbose ? "scsi reset" : NULL, HS_RESET); return; } /*------------------------------------------- ** selection timeout ** ** IID excluded from dstat mask! ** (chip bug) **------------------------------------------- */ if ((sist & STO) && !(sist & (GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR))) { ncr_int_sto (np); return; } /*------------------------------------------- ** Phase mismatch. **------------------------------------------- */ if ((sist & MA) && !(sist & (STO|GEN|HTH|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { ncr_int_ma (np, dstat); return; } /*---------------------------------------- ** move command with length 0 **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_MOVE_TBL)) { /* ** Target wants more data than available. ** The "no_data" script will do it. */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, no_data)); return; } /*------------------------------------------- ** Programmed interrupt **------------------------------------------- */ if ((dstat & SIR) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|IID)) && (INB(nc_dsps) <= SIR_MAX)) { ncr_int_sir (np); return; } /*======================================== ** log message for real hard errors **======================================== */ ncr_log_hard_error(np, sist, dstat); /*======================================== ** do the register dump **======================================== */ if (time_second - np->regtime > 10) { int i; np->regtime = time_second; for (i=0; iregdump); i++) ((volatile char*)&np->regdump)[i] = INB_OFF(i); np->regdump.nc_dstat = dstat; np->regdump.nc_sist = sist; } /*---------------------------------------- ** clean up the dma fifo **---------------------------------------- */ if ( (INB(nc_sstat0) & (ILF|ORF|OLF) ) || (INB(nc_sstat1) & (FF3210) ) || (INB(nc_sstat2) & (ILF1|ORF1|OLF1)) || /* wide .. */ !(dstat & DFE)) { device_printf(np->dev, "have to clear fifos.\n"); OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ } /*---------------------------------------- ** handshake timeout **---------------------------------------- */ if (sist & HTH) { device_printf(np->dev, "handshake timeout\n"); OUTB (nc_scntl1, CRST); DELAY (1000); OUTB (nc_scntl1, 0x00); OUTB (nc_scr0, HS_FAIL); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** unexpected disconnect **---------------------------------------- */ if ((sist & UDC) && !(sist & (STO|GEN|HTH|MA|SGE|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_scr0, HS_UNEXPECTED); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup)); return; } /*---------------------------------------- ** cannot disconnect **---------------------------------------- */ if ((dstat & IID) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR)) && ((INL(nc_dbc) & 0xf8000000) == SCR_WAIT_DISC)) { /* ** Unexpected data cycle while waiting for disconnect. */ if (INB(nc_sstat2) & LDSC) { /* ** It's an early reconnect. ** Let's continue ... */ OUTB (nc_dcntl, np->rv_dcntl | STD); /* ** info message */ device_printf(np->dev, "INFO: LDSC while IID.\n"); return; } device_printf(np->dev, "target %d doesn't release the bus.\n", INB (nc_sdid)&0x0f); /* ** return without restarting the NCR. ** timeout will do the real work. */ return; } /*---------------------------------------- ** single step **---------------------------------------- */ if ((dstat & SSI) && !(sist & (STO|GEN|HTH|MA|SGE|UDC|RST|PAR)) && !(dstat & (MDPE|BF|ABRT|SIR|IID))) { OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** @RECOVER@ HTH, SGE, ABRT. ** ** We should try to recover from these interrupts. ** They may occur if there are problems with synch transfers, or ** if targets are switched on or off while the driver is running. */ if (sist & SGE) { /* clear scsi offsets */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); } /* ** Freeze controller to be able to read the messages. */ if (DEBUG_FLAGS & DEBUG_FREEZE) { int i; unsigned char val; for (i=0; i<0x60; i++) { switch (i%16) { case 0: device_printf(np->dev, "reg[%d0]: ", i / 16); break; case 4: case 8: case 12: printf (" "); break; } val = bus_read_1(np->reg_res, i); printf (" %x%x", val/16, val%16); if (i%16==15) printf (".\n"); } callout_stop(&np->timer); device_printf(np->dev, "halted!\n"); /* ** don't restart controller ... */ OUTB (nc_istat, SRST); return; } #ifdef NCR_FREEZE /* ** Freeze system to be able to read the messages. */ printf ("ncr: fatal error: system halted - press reset to reboot ..."); for (;;); #endif /* ** sorry, have to kill ALL jobs ... */ ncr_init (np, "fatal error", HS_FAIL); } /*========================================================== ** ** ncr chip exception handler for selection timeout ** **========================================================== ** ** There seems to be a bug in the 53c810. ** Although a STO-Interrupt is pending, ** it continues executing script commands. ** But it will fail and interrupt (IID) on ** the next instruction where it's looking ** for a valid phase. ** **---------------------------------------------------------- */ static void ncr_int_sto (ncb_p np) { u_long dsa, scratcha, diff; nccb_p cp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); /* ** look for nccb and set the status. */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (cp) { cp-> host_status = HS_SEL_TIMEOUT; ncr_complete (np, cp); } /* ** repair start queue */ scratcha = INL (nc_scratcha); diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop); /* assert ((diff <= MAX_START * 20) && !(diff % 20));*/ if ((diff <= MAX_START * 20) && !(diff % 20)) { WRITESCRIPT(startpos[0], scratcha); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start)); return; } ncr_init (np, "selection timeout", HS_FAIL); } /*========================================================== ** ** ** ncr chip exception handler for phase errors. ** ** **========================================================== ** ** We have to construct a new transfer descriptor, ** to transfer the rest of the current block. ** **---------------------------------------------------------- */ static void ncr_int_ma (ncb_p np, u_char dstat) { u_int32_t dbc; u_int32_t rest; u_int32_t dsa; u_int32_t dsp; u_int32_t nxtdsp; volatile void *vdsp_base; size_t vdsp_off; u_int32_t oadr, olen; u_int32_t *tblp, *newcmd; u_char cmd, sbcl, ss0, ss2, ctest5; u_short delta; nccb_p cp; dsp = INL (nc_dsp); dsa = INL (nc_dsa); dbc = INL (nc_dbc); ss0 = INB (nc_sstat0); ss2 = INB (nc_sstat2); sbcl= INB (nc_sbcl); cmd = dbc >> 24; rest= dbc & 0xffffff; ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0; if (ctest5 & DFS) delta=(((ctest5<<8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff; else delta=(INB (nc_dfifo) - rest) & 0x7f; /* ** The data in the dma fifo has not been transferred to ** the target -> add the amount to the rest ** and clear the data. ** Check the sstat2 register in case of wide transfer. */ if (!(dstat & DFE)) rest += delta; if (ss0 & OLF) rest++; if (ss0 & ORF) rest++; if (INB(nc_scntl3) & EWS) { if (ss2 & OLF1) rest++; if (ss2 & ORF1) rest++; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* ** locate matching cp */ cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; if (!cp) { device_printf(np->dev, "SCSI phase error fixup: CCB already dequeued (%p)\n", (void *)np->header.cp); return; } if (cp != np->header.cp) { device_printf(np->dev, "SCSI phase error fixup: CCB address mismatch " "(%p != %p) np->nccb = %p\n", (void *)cp, (void *)np->header.cp, (void *)np->link_nccb); /* return;*/ } /* ** find the interrupted script command, ** and the address at which to continue. */ if (dsp == vtophys (&cp->patch[2])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[0]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp == vtophys (&cp->patch[6])) { vdsp_base = cp; vdsp_off = offsetof(struct nccb, patch[4]); nxtdsp = READSCRIPT_OFF(vdsp_base, vdsp_off + 3*4); } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { vdsp_base = np->script; vdsp_off = dsp - np->p_script - 8; nxtdsp = dsp; } else { vdsp_base = np->scripth; vdsp_off = dsp - np->p_scripth - 8; nxtdsp = dsp; } /* ** log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) { printf ("P%x%x ",cmd&7, sbcl&7); printf ("RL=%d D=%d SS0=%x ", (unsigned) rest, (unsigned) delta, ss0); } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, np->header.cp, dsp, nxtdsp, (volatile char*)vdsp_base+vdsp_off, cmd); } /* ** get old startaddress and old length. */ oadr = READSCRIPT_OFF(vdsp_base, vdsp_off + 1*4); if (cmd & 0x10) { /* Table indirect */ tblp = (u_int32_t *) ((char*) &cp->phys + oadr); olen = tblp[0]; oadr = tblp[1]; } else { tblp = (u_int32_t *) 0; olen = READSCRIPT_OFF(vdsp_base, vdsp_off) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%lx OADR=%lx\n", (unsigned) (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24), (void *) tblp, (u_long) olen, (u_long) oadr); } /* ** if old phase not dataphase, leave here. */ if (cmd != (READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24)) { PRINT_ADDR(cp->ccb); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)READSCRIPT_OFF(vdsp_base, vdsp_off) >> 24); return; } if (cmd & 0x06) { PRINT_ADDR(cp->ccb); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, sbcl&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); OUTB (nc_dcntl, np->rv_dcntl | STD); return; } /* ** choose the correct patch area. ** if savep points to one, choose the other. */ newcmd = cp->patch; if (cp->phys.header.savep == vtophys (newcmd)) newcmd+=4; /* ** fillin the commands */ newcmd[0] = ((cmd & 0x0f) << 24) | rest; newcmd[1] = oadr + olen - rest; newcmd[2] = SCR_JUMP; newcmd[3] = nxtdsp; if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp->ccb); printf ("newcmd[%d] %x %x %x %x.\n", (int)(newcmd - cp->patch), (unsigned)newcmd[0], (unsigned)newcmd[1], (unsigned)newcmd[2], (unsigned)newcmd[3]); } /* ** fake the return address (to the patch). ** and restart script processor at dispatcher. */ np->profile.num_break++; OUTL (nc_temp, vtophys (newcmd)); if ((cmd & 7) == 0) OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); else OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn)); } /*========================================================== ** ** ** ncr chip exception handler for programmed interrupts. ** ** **========================================================== */ static int ncr_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==MSG_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void ncr_int_sir (ncb_p np) { u_char scntl3; u_char chg, ofs, per, fak, wide; u_char num = INB (nc_dsps); nccb_p cp=0; u_long dsa; u_int target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int i; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { case SIR_SENSE_RESTART: case SIR_STALL_RESTART: break; default: /* ** lookup the nccb */ dsa = INL (nc_dsa); cp = np->link_nccb; while (cp && (CCB_PHYS (cp, phys) != dsa)) cp = cp->link_nccb; assert (cp); if (!cp) goto out; assert (cp == np->header.cp); if (cp != np->header.cp) goto out; } switch (num) { /*-------------------------------------------------------------------- ** ** Processing of interrupted getcc selects ** **-------------------------------------------------------------------- */ case SIR_SENSE_RESTART: /*------------------------------------------ ** Script processor is idle. ** Look for interrupted "check cond" **------------------------------------------ */ if (DEBUG_FLAGS & DEBUG_RESTART) device_printf(np->dev, "int#%d", num); cp = (nccb_p) 0; for (i=0; itarget[i]; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); cp = tp->hold_cp; if (!cp) continue; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+"); if ((cp->host_status==HS_BUSY) && (cp->s_status==SCSI_STATUS_CHECK_COND)) break; if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)"); tp->hold_cp = cp = (nccb_p) 0; } if (cp) { if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+ restart job ..\n"); OUTL (nc_dsa, CCB_PHYS (cp, phys)); OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc)); return; } /* ** no job, resume normal processing */ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n"); WRITESCRIPT(start0[0], SCR_INT ^ IFFALSE (0)); break; case SIR_SENSE_FAILED: /*------------------------------------------- ** While trying to select for ** getting the condition code, ** a target reselected us. **------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_RESTART) { PRINT_ADDR(cp->ccb); printf ("in getcc reselect by t%d.\n", INB(nc_ssid) & 0x0f); } /* ** Mark this job */ cp->host_status = HS_BUSY; cp->s_status = SCSI_STATUS_CHECK_COND; np->target[cp->ccb->ccb_h.target_id].hold_cp = cp; /* ** And patch code to restart it. */ WRITESCRIPT(start0[0], SCR_INT); break; /*----------------------------------------------------------------------------- ** ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... ** ** We try to negotiate sync and wide transfer only after ** a successful inquire command. We look at byte 7 of the ** inquire data to determine the capabilities if the target. ** ** When we try to negotiate, we append the negotiation message ** to the identify and (maybe) simple tag message. ** The host status field is set to HS_NEGOTIATE to mark this ** situation. ** ** If the target doesn't answer this message immediately ** (as required by the standard), the SIR_NEGO_FAIL interrupt ** will be raised eventually. ** The handler removes the HS_NEGOTIATE status, and sets the ** negotiated value to the default (async / nowide). ** ** If we receive a matching answer immediately, we check it ** for validity, and set the values. ** ** If we receive a Reject message immediately, we assume the ** negotiation has failed, and fall back to standard values. ** ** If we receive a negotiation message while not in HS_NEGOTIATE ** state, it's a target initiated negotiation. We prepare a ** (hopefully) valid answer, set our parameters, and send back ** this answer to the target. ** ** If the target doesn't fetch the answer (no message out phase), ** we assume the negotiation has failed, and fall back to default ** settings. ** ** When we set the values, we adjust them in all nccbs belonging ** to this target, in the controller's register, and in the "phys" ** field of the controller's struct ncb. ** ** Possible cases: hs sir msg_in value send goto ** We try try to negotiate: ** -> target doesnt't msgin NEG FAIL noop defa. - dispatch ** -> target rejected our msg NEG FAIL reject defa. - dispatch ** -> target answered (ok) NEG SYNC sdtr set - clrack ** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad ** -> target answered (ok) NEG WIDE wdtr set - clrack ** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad ** -> any other msgin NEG FAIL noop defa. - dispatch ** ** Target tries to negotiate: ** -> incoming message --- SYNC sdtr set SDTR - ** -> incoming message --- WIDE wdtr set WDTR - ** We sent our answer: ** -> target doesn't msgout --- PROTO ? defa. - dispatch ** **----------------------------------------------------------------------------- */ case SIR_NEGO_FAILED: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't send an answer message, ** or target rejected our message. ** ** Remove negotiation request. ** **------------------------------------------------------- */ OUTB (HS_PRT, HS_BUSY); /* FALLTHROUGH */ case SIR_NEGO_PROTO: /*------------------------------------------------------- ** ** Negotiation failed. ** Target doesn't fetch the answer message. ** **------------------------------------------------------- */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("negotiation failed sir=%x status=%x.\n", num, cp->nego_status); } /* ** any error in negotiation: ** fall back to default mode. */ switch (cp->nego_status) { case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } np->msgin [0] = MSG_NOOP; np->msgout[0] = MSG_NOOP; cp->nego_status = 0; OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch)); break; case SIR_NEGO_SYNC: /* ** Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; if (ofs==0) per=255; /* ** check values against driver limits. */ if (per < np->minsync) {chg = 1; per = np->minsync;} if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} /* ** Check against controller limits. */ fak = 7; scntl3 = 0; if (ofs != 0) { ncr_getsync(np, per, &fak, &scntl3); if (fak > 7) { chg = 1; ofs = 0; } } if (ofs == 0) { fak = 7; per = 0; scntl3 = 0; } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n", per, scntl3, ofs, fak, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_SYNC: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setsync (np, cp, 0, 0xe0, 0); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setsync (np,cp,scntl3,(fak<<5)|ofs, per); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_WIDE: ncr_setwide (np, cp, 0, 0); break; } } /* ** It was a request. Set value and ** prepare an answer message */ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs, per); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 3; np->msgout[2] = MSG_EXT_SDTR; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("sync msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } if (!ofs) { OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); return; } np->msgin [0] = MSG_NOOP; break; case SIR_NEGO_WIDE: /* ** Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgin: "); (void) ncr_show_msg (np->msgin); printf (".\n"); } /* ** get requested values. */ chg = 0; wide = np->msgin[3]; /* ** check values against driver limits. */ if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide: wide=%d chg=%d.\n", wide, chg); } if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); switch (cp->nego_status) { case NS_WIDE: /* ** This was an answer message */ if (chg) { /* ** Answer wasn't acceptable. */ ncr_setwide (np, cp, 0, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad)); } else { /* ** Answer is ok. */ ncr_setwide (np, cp, wide, 1); OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack)); } return; case NS_SYNC: ncr_setsync (np, cp, 0, 0xe0, 0); break; } } /* ** It was a request, set value and ** prepare an answer message */ ncr_setwide (np, cp, wide, 1); np->msgout[0] = MSG_EXTENDED; np->msgout[1] = 2; np->msgout[2] = MSG_EXT_WDTR; np->msgout[3] = wide; np->msgin [0] = MSG_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp->ccb); printf ("wide msgout: "); (void) ncr_show_msg (np->msgout); printf (".\n"); } break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_REJECT_RECEIVED: /*----------------------------------------------- ** ** We received a MSG_MESSAGE_REJECT message. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT received (%x:%x).\n", (unsigned)np->lastmsg, np->msgout[0]); break; case SIR_REJECT_SENT: /*----------------------------------------------- ** ** We received an unknown message ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_MESSAGE_REJECT sent for "); (void) ncr_show_msg (np->msgin); printf (".\n"); break; /*-------------------------------------------------------------------- ** ** Processing of special messages ** **-------------------------------------------------------------------- */ case SIR_IGN_RESIDUE: /*----------------------------------------------- ** ** We received an IGNORE RESIDUE message, ** which couldn't be handled by the script. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_IGN_WIDE_RESIDUE received, but not yet implemented.\n"); break; case SIR_MISSING_SAVE: /*----------------------------------------------- ** ** We received an DISCONNECT message, ** but the datapointer wasn't saved before. ** **----------------------------------------------- */ PRINT_ADDR(cp->ccb); printf ("MSG_DISCONNECT received, but datapointer not saved:\n" "\tdata=%x save=%x goal=%x.\n", (unsigned) INL (nc_temp), (unsigned) np->header.savep, (unsigned) np->header.goalp); break; /*-------------------------------------------------------------------- ** ** Processing of a "SCSI_STATUS_QUEUE_FULL" status. ** ** XXX JGibbs - We should do the same thing for BUSY status. ** ** The current command has been rejected, ** because there are too many in the command queue. ** We have started too many commands for that target. ** **-------------------------------------------------------------------- */ case SIR_STALL_QUEUE: cp->xerr_status = XE_OK; cp->host_status = HS_COMPLETE; cp->s_status = SCSI_STATUS_QUEUE_FULL; ncr_freeze_devq(np, cp->ccb->ccb_h.path); ncr_complete(np, cp); /* FALLTHROUGH */ case SIR_STALL_RESTART: /*----------------------------------------------- ** ** Enable selecting again, ** if NO disconnected jobs. ** **----------------------------------------------- */ /* ** Look for a disconnected job. */ cp = np->link_nccb; while (cp && cp->host_status != HS_DISCONNECT) cp = cp->link_nccb; /* ** if there is one, ... */ if (cp) { /* ** wait for reselection */ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect)); return; } /* ** else remove the interrupt. */ device_printf(np->dev, "queue empty.\n"); WRITESCRIPT(start1[0], SCR_INT ^ IFFALSE (0)); break; } out: OUTB (nc_dcntl, np->rv_dcntl | STD); } /*========================================================== ** ** ** Acquire a control block ** ** **========================================================== */ static nccb_p ncr_get_nccb (ncb_p np, u_long target, u_long lun) { lcb_p lp; nccb_p cp = NULL; /* ** Lun structure available ? */ lp = np->target[target].lp[lun]; if (lp) { cp = lp->next_nccb; /* ** Look for free CCB */ while (cp && cp->magic) { cp = cp->next_nccb; } } /* ** if nothing available, create one. */ if (cp == NULL) cp = ncr_alloc_nccb(np, target, lun); if (cp != NULL) { if (cp->magic) { device_printf(np->dev, "Bogus free cp found\n"); return (NULL); } cp->magic = 1; } return (cp); } /*========================================================== ** ** ** Release one control block ** ** **========================================================== */ static void ncr_free_nccb (ncb_p np, nccb_p cp) { /* ** sanity */ assert (cp != NULL); cp -> host_status = HS_IDLE; cp -> magic = 0; } /*========================================================== ** ** ** Allocation of resources for Targets/Luns/Tags. ** ** **========================================================== */ static nccb_p ncr_alloc_nccb (ncb_p np, u_long target, u_long lun) { tcb_p tp; lcb_p lp; nccb_p cp; assert (np != NULL); if (target>=MAX_TARGET) return(NULL); if (lun >=MAX_LUN ) return(NULL); tp=&np->target[target]; if (!tp->jump_tcb.l_cmd) { /* ** initialize it. */ tp->jump_tcb.l_cmd = (SCR_JUMP^IFFALSE (DATA (0x80 + target))); tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr; tp->getscr[0] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[1] = vtophys (&tp->tinfo.sval); tp->getscr[2] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_sxfer); tp->getscr[3] = (np->features & FE_PFEN)? SCR_COPY(1) : SCR_COPY_F(1); tp->getscr[4] = vtophys (&tp->tinfo.wval); tp->getscr[5] = rman_get_start(np->reg_res) + offsetof (struct ncr_reg, nc_scntl3); assert (((offsetof(struct ncr_reg, nc_sxfer) ^ (offsetof(struct tcb ,tinfo) + offsetof(struct ncr_target_tinfo, sval))) & 3) == 0); assert (((offsetof(struct ncr_reg, nc_scntl3) ^ (offsetof(struct tcb, tinfo) + offsetof(struct ncr_target_tinfo, wval))) &3) == 0); tp->call_lun.l_cmd = (SCR_CALL); tp->call_lun.l_paddr = NCB_SCRIPT_PHYS (np, resel_lun); tp->jump_lcb.l_cmd = (SCR_JUMP); tp->jump_lcb.l_paddr = NCB_SCRIPTH_PHYS (np, abort); np->jump_tcb.l_paddr = vtophys (&tp->jump_tcb); } /* ** Logic unit control block */ lp = tp->lp[lun]; if (!lp) { /* ** Allocate a lcb */ lp = (lcb_p) malloc (sizeof (struct lcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (!lp) return(NULL); /* ** Initialize it */ lp->jump_lcb.l_cmd = (SCR_JUMP ^ IFFALSE (DATA (lun))); lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr; lp->call_tag.l_cmd = (SCR_CALL); lp->call_tag.l_paddr = NCB_SCRIPT_PHYS (np, resel_tag); lp->jump_nccb.l_cmd = (SCR_JUMP); lp->jump_nccb.l_paddr = NCB_SCRIPTH_PHYS (np, aborttag); lp->actlink = 1; /* ** Chain into LUN list */ tp->jump_lcb.l_paddr = vtophys (&lp->jump_lcb); tp->lp[lun] = lp; } /* ** Allocate a nccb */ cp = (nccb_p) malloc (sizeof (struct nccb), M_DEVBUF, M_NOWAIT|M_ZERO); if (!cp) return (NULL); if (DEBUG_FLAGS & DEBUG_ALLOC) { printf ("new nccb @%p.\n", cp); } /* ** Fill in physical addresses */ cp->p_nccb = vtophys (cp); /* ** Chain into reselect list */ cp->jump_nccb.l_cmd = SCR_JUMP; cp->jump_nccb.l_paddr = lp->jump_nccb.l_paddr; lp->jump_nccb.l_paddr = CCB_PHYS (cp, jump_nccb); cp->call_tmp.l_cmd = SCR_CALL; cp->call_tmp.l_paddr = NCB_SCRIPT_PHYS (np, resel_tmp); /* ** Chain into wakeup list */ cp->link_nccb = np->link_nccb; np->link_nccb = cp; /* ** Chain into CCB list */ cp->next_nccb = lp->next_nccb; lp->next_nccb = cp; return (cp); } /*========================================================== ** ** ** Build Scatter Gather Block ** ** **========================================================== ** ** The transfer area may be scattered among ** several non adjacent physical pages. ** ** We may use MAX_SCATTER blocks. ** **---------------------------------------------------------- */ static int ncr_scatter (struct dsb* phys, vm_offset_t vaddr, vm_size_t datalen) { u_long paddr, pnext; u_short segment = 0; u_long segsize, segaddr; u_long size, csize = 0; u_long chunk = MAX_SIZE; int free; bzero (&phys->data, sizeof (phys->data)); if (!datalen) return (0); paddr = vtophys (vaddr); /* ** insert extra break points at a distance of chunk. ** We try to reduce the number of interrupts caused ** by unexpected phase changes due to disconnects. ** A typical harddisk may disconnect before ANY block. ** If we wanted to avoid unexpected phase changes at all ** we had to use a break point every 512 bytes. ** Of course the number of scatter/gather blocks is ** limited. */ free = MAX_SCATTER - 1; if (vaddr & PAGE_MASK) free -= datalen / PAGE_SIZE; if (free>1) while ((chunk * free >= 2 * datalen) && (chunk>=1024)) chunk /= 2; if(DEBUG_FLAGS & DEBUG_SCATTER) printf("ncr?:\tscattering virtual=%p size=%d chunk=%d.\n", (void *) vaddr, (unsigned) datalen, (unsigned) chunk); /* ** Build data descriptors. */ while (datalen && (segment < MAX_SCATTER)) { /* ** this segment is empty */ segsize = 0; segaddr = paddr; pnext = paddr; if (!csize) csize = chunk; while ((datalen) && (paddr == pnext) && (csize)) { /* ** continue this segment */ pnext = (paddr & (~PAGE_MASK)) + PAGE_SIZE; /* ** Compute max size */ size = pnext - paddr; /* page size */ if (size > datalen) size = datalen; /* data size */ if (size > csize ) size = csize ; /* chunksize */ segsize += size; vaddr += size; csize -= size; datalen -= size; paddr = vtophys (vaddr); } if(DEBUG_FLAGS & DEBUG_SCATTER) printf ("\tseg #%d addr=%x size=%d (rest=%d).\n", segment, (unsigned) segaddr, (unsigned) segsize, (unsigned) datalen); phys->data[segment].addr = segaddr; phys->data[segment].size = segsize; segment++; } if (datalen) { printf("ncr?: scatter/gather failed (residue=%d).\n", (unsigned) datalen); return (-1); } return (segment); } /*========================================================== ** ** ** Test the pci bus snoop logic :-( ** ** Has to be called with interrupts disabled. ** ** **========================================================== */ #ifndef NCR_IOMAPPED static int ncr_regtest (struct ncb* np) { register volatile u_int32_t data; /* ** ncr registers may NOT be cached. ** write 0xffffffff to a read only register area, ** and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data); data = INL_OFF(offsetof(struct ncr_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int ncr_snooptest (struct ncb* np) { u_int32_t ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; int i, err=0; #ifndef NCR_IOMAPPED err |= ncr_regtest (np); if (err) return (err); #endif /* ** init */ pc = NCB_SCRIPTH_PHYS (np, snooptest); host_wr = 1; ncr_wr = 2; /* ** Set memory and register. */ ncr_cache = host_wr; OUTL (nc_temp, ncr_wr); /* ** Start script (exchange values) */ OUTL (nc_dsp, pc); /* ** Wait 'til done (with timeout) */ for (i=0; i=NCR_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* ** Check termination position. */ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) NCB_SCRIPTH_PHYS (np, snooptest), (u_long) pc, (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8); return (0x40); } /* ** Show results. */ if (host_wr != ncr_rd) { printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", (int) host_wr, (int) ncr_rd); err |= 1; } if (host_rd != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", (int) ncr_wr, (int) host_rd); err |= 2; } if (ncr_bk != ncr_wr) { printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", (int) ncr_wr, (int) ncr_bk); err |= 4; } return (err); } /*========================================================== ** ** ** Profiling the drivers and targets performance. ** ** **========================================================== */ /* ** Compute the difference in milliseconds. **/ static int ncr_delta (int *from, int *to) { if (!from) return (-1); if (!to) return (-2); return ((to - from) * 1000 / hz); } #define PROFILE cp->phys.header.stamp static void ncb_profile (ncb_p np, nccb_p cp) { int co, da, st, en, di, se, post,work,disc; u_long diff; PROFILE.end = ticks; st = ncr_delta (&PROFILE.start,&PROFILE.status); if (st<0) return; /* status not reached */ da = ncr_delta (&PROFILE.start,&PROFILE.data); if (da<0) return; /* No data transfer phase */ co = ncr_delta (&PROFILE.start,&PROFILE.command); if (co<0) return; /* command not executed */ en = ncr_delta (&PROFILE.start,&PROFILE.end), di = ncr_delta (&PROFILE.start,&PROFILE.disconnect), se = ncr_delta (&PROFILE.start,&PROFILE.select); post = en - st; /* ** @PROFILE@ Disconnect time invalid if multiple disconnects */ if (di>=0) disc = se-di; else disc = 0; work = (st - co) - disc; diff = (np->disc_phys - np->disc_ref) & 0xff; np->disc_ref += diff; np->profile.num_trans += 1; if (cp->ccb) np->profile.num_bytes += cp->ccb->csio.dxfer_len; np->profile.num_disc += diff; np->profile.ms_setup += co; np->profile.ms_data += work; np->profile.ms_disc += disc; np->profile.ms_post += post; } #undef PROFILE /*========================================================== ** ** Determine the ncr's clock frequency. ** This is essential for the negotiation ** of the synchronous transfer rate. ** **========================================================== ** ** Note: we have to return the correct value. ** THERE IS NO SAVE DEFAULT VALUE. ** ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. ** 53C860 and 53C875 rev. 1 support fast20 transfers but ** do not have a clock doubler and so are provided with a ** 80 MHz clock. All other fast20 boards incorporate a doubler ** and so should be delivered with a 40 MHz clock. ** The future fast40 chips (895/895) use a 40 Mhz base clock ** and provide a clock quadrupler (160 Mhz). The code below ** tries to deal as cleverly as possible with all this stuff. ** **---------------------------------------------------------- */ /* * Select NCR SCSI clock frequency */ static void ncr_selectclock(ncb_p np, u_char scntl3) { if (np->multiplier < 2) { OUTB(nc_scntl3, scntl3); return; } if (bootverbose >= 2) device_printf(np->dev, "enabling clock multiplier\n"); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */ int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) DELAY(20); if (!i) device_printf(np->dev, "the chip cannot lock the frequency\n"); } else /* Wait 20 micro-seconds for doubler */ DELAY(20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate NCR SCSI clock frequency (in KHz) */ static unsigned ncrgetfreq (ncb_p np, int gen) { int ms = 0; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of (1<= 2) printf ("\tDelay (GEN=%d): %u msec\n", gen, ms); /* * adjust for prescaler, and convert into KHz */ return ms ? ((1 << gen) * 4440) / ms : 0; } static void ncr_getclock (ncb_p np, u_char multiplier) { unsigned char scntl3; unsigned char stest1; scntl3 = INB(nc_scntl3); stest1 = INB(nc_stest1); np->multiplier = 1; if (multiplier > 1) { np->multiplier = multiplier; np->clock_khz = 40000 * multiplier; } else { if ((scntl3 & 7) == 0) { unsigned f1, f2; /* throw away first result */ (void) ncrgetfreq (np, 11); f1 = ncrgetfreq (np, 11); f2 = ncrgetfreq (np, 11); if (bootverbose >= 2) printf ("\tNCR clock is %uKHz, %uKHz\n", f1, f2); if (f1 > f2) f1 = f2; /* trust lower result */ if (f1 > 45000) { scntl3 = 5; /* >45Mhz: assume 80MHz */ } else { scntl3 = 3; /* <45Mhz: assume 40MHz */ } } else if ((scntl3 & 7) == 5) np->clock_khz = 80000; /* Probably a 875 rev. 1 ? */ } } /*=========================================================================*/ #ifdef NCR_TEKRAM_EEPROM struct tekram_eeprom_dev { u_char devmode; #define TKR_PARCHK 0x01 #define TKR_TRYSYNC 0x02 #define TKR_ENDISC 0x04 #define TKR_STARTUNIT 0x08 #define TKR_USETAGS 0x10 #define TKR_TRYWIDE 0x20 u_char syncparam; /* max. sync transfer rate (table ?) */ u_char filler1; u_char filler2; }; struct tekram_eeprom { struct tekram_eeprom_dev dev[16]; u_char adaptid; u_char adaptmode; #define TKR_ADPT_GT2DRV 0x01 #define TKR_ADPT_GT1GB 0x02 #define TKR_ADPT_RSTBUS 0x04 #define TKR_ADPT_ACTNEG 0x08 #define TKR_ADPT_NOSEEK 0x10 #define TKR_ADPT_MORLUN 0x20 u_char delay; /* unit ? ( table ??? ) */ u_char tags; /* use 4 times as many ... */ u_char filler[60]; }; static void tekram_write_bit (ncb_p np, int bit) { u_char val = 0x10 + ((bit & 1) << 1); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); OUTB (nc_gpreg, val | 0x04); DELAY(10); OUTB (nc_gpreg, val); DELAY(10); } static int tekram_read_bit (ncb_p np) { OUTB (nc_gpreg, 0x10); DELAY(10); OUTB (nc_gpreg, 0x14); DELAY(10); return INB (nc_gpreg) & 1; } static u_short read_tekram_eeprom_reg (ncb_p np, int reg) { int bit; u_short result = 0; int cmd = 0x80 | reg; OUTB (nc_gpreg, 0x10); tekram_write_bit (np, 1); for (bit = 7; bit >= 0; bit--) { tekram_write_bit (np, cmd >> bit); } for (bit = 0; bit < 16; bit++) { result <<= 1; result |= tekram_read_bit (np); } OUTB (nc_gpreg, 0x00); return result; } static int read_tekram_eeprom(ncb_p np, struct tekram_eeprom *buffer) { u_short *p = (u_short *) buffer; u_short sum = 0; int i; if (INB (nc_gpcntl) != 0x09) { return 0; } for (i = 0; i < 64; i++) { u_short val; if((i&0x0f) == 0) printf ("%02x:", i*2); val = read_tekram_eeprom_reg (np, i); if (p) *p++ = val; sum += val; if((i&0x01) == 0x00) printf (" "); printf ("%02x%02x", val & 0xff, (val >> 8) & 0xff); if((i&0x0f) == 0x0f) printf ("\n"); } printf ("Sum = %04x\n", sum); return sum == 0x1234; } #endif /* NCR_TEKRAM_EEPROM */ static device_method_t ncr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ncr_probe), DEVMETHOD(device_attach, ncr_attach), { 0, 0 } }; static driver_t ncr_driver = { "ncr", ncr_methods, sizeof(struct ncb), }; static devclass_t ncr_devclass; DRIVER_MODULE(ncr, pci, ncr_driver, ncr_devclass, 0, 0); MODULE_DEPEND(ncr, cam, 1, 1, 1); MODULE_DEPEND(ncr, pci, 1, 1, 1); /*=========================================================================*/ #endif /* _KERNEL */ Index: stable/11/sys/dev/siis/siis.c =================================================================== --- stable/11/sys/dev/siis/siis.c (revision 314380) +++ stable/11/sys/dev/siis/siis.c (revision 314381) @@ -1,1991 +1,1987 @@ /*- * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "siis.h" #include #include #include #include #include /* local prototypes */ static int siis_setup_interrupt(device_t dev); static void siis_intr(void *data); static int siis_suspend(device_t dev); static int siis_resume(device_t dev); static int siis_ch_init(device_t dev); static int siis_ch_deinit(device_t dev); static int siis_ch_suspend(device_t dev); static int siis_ch_resume(device_t dev); static void siis_ch_intr_locked(void *data); static void siis_ch_intr(void *data); static void siis_ch_led(void *priv, int onoff); static void siis_begin_transaction(device_t dev, union ccb *ccb); static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void siis_execute_transaction(struct siis_slot *slot); static void siis_timeout(struct siis_slot *slot); static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et); static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag); static void siis_dmainit(device_t dev); static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void siis_dmafini(device_t dev); static void siis_slotsalloc(device_t dev); static void siis_slotsfree(device_t dev); static void siis_reset(device_t dev); static void siis_portinit(device_t dev); static int siis_wait_ready(device_t dev, int t); static int siis_sata_connect(struct siis_channel *ch); static void siis_issue_recovery(device_t dev); static void siis_process_read_log(device_t dev, union ccb *ccb); static void siis_process_request_sense(device_t dev, union ccb *ccb); static void siisaction(struct cam_sim *sim, union ccb *ccb); static void siispoll(struct cam_sim *sim); static MALLOC_DEFINE(M_SIIS, "SIIS driver", "SIIS driver data buffers"); static struct { uint32_t id; const char *name; int ports; int quirks; #define SIIS_Q_SNTF 1 #define SIIS_Q_NOMSI 2 } siis_ids[] = { {0x31241095, "SiI3124", 4, 0}, {0x31248086, "SiI3124", 4, 0}, {0x31321095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02421095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02441095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x31311095, "SiI3131", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x35311095, "SiI3531", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0, NULL, 0, 0} }; #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static int siis_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) { snprintf(buf, sizeof(buf), "%s SATA controller", siis_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int siis_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); uint32_t devid = pci_get_devid(dev); device_t child; int error, i, unit; ctlr->dev = dev; for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) break; } ctlr->quirks = siis_ids[i].quirks; /* Global memory */ ctlr->r_grid = PCIR_BAR(0); if (!(ctlr->r_gmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_grid, RF_ACTIVE))) return (ENXIO); ctlr->gctl = ATA_INL(ctlr->r_gmem, SIIS_GCTL); /* Channels memory */ ctlr->r_rid = PCIR_BAR(2); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return (ENXIO); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); /* Reset controller */ siis_resume(dev); /* Number of HW channels */ ctlr->channels = siis_ids[i].ports; /* Setup interrupts. */ if (siis_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "siisch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int siis_detach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (0); } static int siis_suspend(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return 0; } static int siis_resume(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Set PCIe max read request size to at least 1024 bytes */ if (pci_get_max_read_req(dev) < 1024) pci_set_max_read_req(dev, 1024); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); DELAY(10000); /* Get controller out of reset state and enable port interrupts. */ ctlr->gctl &= ~(SIIS_GCTL_GRESET | SIIS_GCTL_I2C_IE); ctlr->gctl |= 0x0000000f; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return (bus_generic_resume(dev)); } static int siis_setup_interrupt(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); int msi = ctlr->quirks & SIIS_Q_NOMSI ? 0 : 1; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, siis_intr, ctlr, &ctlr->irq.handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } return (0); } /* * Common case interrupt handler. */ static void siis_intr(void *data) { struct siis_controller *ctlr = (struct siis_controller *)data; u_int32_t is; void *arg; int unit; is = ATA_INL(ctlr->r_gmem, SIIS_IS); for (unit = 0; unit < ctlr->channels; unit++) { if ((is & SIIS_IS_PORT(unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* Acknowledge interrupt, if MSI enabled. */ if (ctlr->irq.r_irq_rid) { ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl | SIIS_GCTL_MSIACK); } } static struct resource * siis_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct siis_controller *ctlr = device_get_softc(dev); int unit = ((struct siis_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = unit << 13; rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + 0x2000, 0x2000, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 0x2000, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int siis_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int siis_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("siis.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int siis_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int siis_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int siis_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t siis_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } devclass_t siis_devclass; static device_method_t siis_methods[] = { DEVMETHOD(device_probe, siis_probe), DEVMETHOD(device_attach, siis_attach), DEVMETHOD(device_detach, siis_detach), DEVMETHOD(device_suspend, siis_suspend), DEVMETHOD(device_resume, siis_resume), DEVMETHOD(bus_print_child, siis_print_child), DEVMETHOD(bus_alloc_resource, siis_alloc_resource), DEVMETHOD(bus_release_resource, siis_release_resource), DEVMETHOD(bus_setup_intr, siis_setup_intr), DEVMETHOD(bus_teardown_intr,siis_teardown_intr), DEVMETHOD(bus_child_location_str, siis_child_location_str), DEVMETHOD(bus_get_dma_tag, siis_get_dma_tag), { 0, 0 } }; static driver_t siis_driver = { "siis", siis_methods, sizeof(struct siis_controller) }; DRIVER_MODULE(siis, pci, siis_driver, siis_devclass, 0, 0); MODULE_VERSION(siis, 1); MODULE_DEPEND(siis, cam, 1, 1, 1); static int siis_ch_probe(device_t dev) { device_set_desc_copy(dev, "SIIS channel"); return (BUS_PROBE_DEFAULT); } static int siis_ch_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(device_get_parent(dev)); struct siis_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->quirks = ctlr->quirks; ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = SIIS_MAX_SLOTS; ch->curr[i] = ch->user[i]; if (ch->pm_level) ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ; ch->user[i].caps |= CTS_SATA_CAPS_H_AN; } mtx_init(&ch->mtx, "SIIS channel lock", NULL, MTX_DEF); rid = ch->unit; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); siis_dmainit(dev); siis_slotsalloc(dev); siis_ch_init(dev); mtx_lock(&ch->mtx); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, siis_ch_intr_locked, dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(SIIS_MAX_SLOTS); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(siisaction, siispoll, "siisch", ch, device_get_unit(dev), &ch->mtx, 2, SIIS_MAX_SLOTS, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } mtx_unlock(&ch->mtx); ch->led = led_create(siis_ch_led, dev, device_get_nameunit(dev)); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int siis_ch_detach(device_t dev) { struct siis_channel *ch = device_get_softc(dev); led_destroy(ch->led); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); siis_ch_deinit(dev); siis_slotsfree(dev); siis_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int siis_ch_init(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); return (0); } static int siis_ch_deinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Put port into reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); return (0); } static int siis_ch_suspend(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "siissusp", hz/100); siis_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int siis_ch_resume(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_init(dev); siis_reset(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t siisch_devclass; static device_method_t siisch_methods[] = { DEVMETHOD(device_probe, siis_ch_probe), DEVMETHOD(device_attach, siis_ch_attach), DEVMETHOD(device_detach, siis_ch_detach), DEVMETHOD(device_suspend, siis_ch_suspend), DEVMETHOD(device_resume, siis_ch_resume), { 0, 0 } }; static driver_t siisch_driver = { "siisch", siisch_methods, sizeof(struct siis_channel) }; DRIVER_MODULE(siisch, siis, siisch_driver, siis_devclass, 0, 0); static void siis_ch_led(void *priv, int onoff) { device_t dev; struct siis_channel *ch; dev = (device_t)priv; ch = device_get_softc(dev); if (onoff == 0) ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_LED_ON); else ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_LED_ON); } struct siis_dc_cb_args { bus_addr_t maddr; int error; }; static void siis_dmainit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct siis_dc_cb_args dcba; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_WORK_SIZE, 1, SIIS_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, SIIS_WORK_SIZE, siis_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_SG_ENTRIES * PAGE_SIZE * SIIS_MAX_SLOTS, SIIS_SG_ENTRIES, 0xFFFFFFFF, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); siis_dmafini(dev); } static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_dc_cb_args *dcba = (struct siis_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void siis_dmafini(device_t dev) { struct siis_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work_map = NULL; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void siis_slotsalloc(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; slot->dev = dev; slot->slot = i; slot->state = SIIS_SLOT_EMPTY; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void siis_slotsfree(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static void siis_notify_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct cam_path *dpath; u_int32_t status; int i; if (ch->quirks & SIIS_Q_SNTF) { status = ATA_INL(ch->r_mem, SIIS_P_SNTF); ATA_OUTL(ch->r_mem, SIIS_P_SNTF, status); } else { /* * Without SNTF we have no idea which device sent notification. * If PMP is connected, assume it, else - device. */ status = (ch->pm_present) ? 0x8000 : 0x0001; } if (bootverbose) device_printf(dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void siis_phy_check_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* If we have a connection event, deal with it */ if (ch->pm_level == 0) { u_int32_t status = ATA_INL(ch->r_mem, SIIS_P_SSTS); union ccb *ccb; if (bootverbose) { if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) { device_printf(dev, "CONNECT requested\n"); } else device_printf(dev, "DISCONNECT requested\n"); } siis_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } } static void siis_ch_intr_locked(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_intr(data); mtx_unlock(&ch->mtx); } static void siis_ch_intr(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); uint32_t istatus, sstatus, ctx, estatus, ok, err = 0; enum siis_err_type et; int i, ccs, port, tslots; mtx_assert(&ch->mtx, MA_OWNED); /* Read command statuses. */ sstatus = ATA_INL(ch->r_mem, SIIS_P_SS); ok = ch->rslots & ~sstatus; /* Complete all successful commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if ((ok >> i) & 1) siis_end_transaction(&ch->slot[i], SIIS_ERR_NONE); } /* Do we have any other events? */ if ((sstatus & SIIS_P_SS_ATTN) == 0) return; /* Read and clear interrupt statuses. */ istatus = ATA_INL(ch->r_mem, SIIS_P_IS) & (0xFFFF & ~SIIS_P_IX_COMMCOMP); ATA_OUTL(ch->r_mem, SIIS_P_IS, istatus); /* Process PHY events */ if (istatus & SIIS_P_IX_PHYRDYCHG) siis_phy_check_events(dev); /* Process NOTIFY events */ if (istatus & SIIS_P_IX_SDBN) siis_notify_events(dev); /* Process command errors */ if (istatus & SIIS_P_IX_COMMERR) { estatus = ATA_INL(ch->r_mem, SIIS_P_CMDERR); ctx = ATA_INL(ch->r_mem, SIIS_P_CTX); ccs = (ctx & SIIS_P_CTX_SLOT) >> SIIS_P_CTX_SLOT_SHIFT; port = (ctx & SIIS_P_CTX_PMP) >> SIIS_P_CTX_PMP_SHIFT; err = ch->rslots & sstatus; //device_printf(dev, "%s ERROR ss %08x is %08x rs %08x es %d act %d port %d serr %08x\n", // __func__, sstatus, istatus, ch->rslots, estatus, ccs, port, // ATA_INL(ch->r_mem, SIIS_P_SERR)); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } if (estatus == SIIS_P_CMDERR_DEV || estatus == SIIS_P_CMDERR_SDB || estatus == SIIS_P_CMDERR_DATAFIS) { tslots = ch->numtslots[port]; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; if (ch->slot[i].ccb->ccb_h.target_id != port) continue; if (tslots == 0) { /* Untagged operation. */ if (i == ccs) et = SIIS_ERR_TFE; else et = SIIS_ERR_INNOCENT; } else { /* Tagged operation. */ et = SIIS_ERR_NCQ; } siis_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_RESUME); } else { if (estatus == SIIS_P_CMDERR_SENDFIS || estatus == SIIS_P_CMDERR_INCSTATE || estatus == SIIS_P_CMDERR_PPE || estatus == SIIS_P_CMDERR_SERVICE) { et = SIIS_ERR_SATA; } else et = SIIS_ERR_INVALID; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; siis_end_transaction(&ch->slot[i], et); } } } } /* Must be called with channel locked. */ static int siis_check_collision(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0x7fffffff >> (31 - ch->curr[ccb->ccb_h.target_id].tags))) == 0) return (1); } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void siis_begin_transaction(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); struct siis_slot *slot; int tag, tags; mtx_assert(&ch->mtx, MA_OWNED); /* Choose empty slot. */ tags = SIIS_MAX_SLOTS; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; tag = fls((~ch->oslots) & (0x7fffffff >> (31 - tags))) - 1; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Update channel stats. */ ch->oslots |= (1 << slot->slot); ch->numrslots++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]++; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; /* If request moves data, setup and load SG list */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = SIIS_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, siis_dmasetprd, slot, 0); } else siis_execute_transaction(slot); } /* Locked by busdma engine. */ static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_slot *slot = arg; struct siis_channel *ch = device_get_softc(slot->dev); struct siis_cmd *ctp; struct siis_dma_prd *prd; int i; mtx_assert(&ch->mtx, MA_OWNED); if (error) { device_printf(slot->dev, "DMA load error\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } KASSERT(nsegs <= SIIS_SG_ENTRIES, ("too many DMA segment entries\n")); slot->dma.nsegs = nsegs; if (nsegs != 0) { /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *)(ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot)); /* Fill S/G table */ if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) prd = &ctp->u.ata.prd[0]; else prd = &ctp->u.atapi.prd[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32(segs[i].ds_len); prd[i].control = 0; } prd[nsegs - 1].control = htole32(SIIS_PRD_TRM); bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } siis_execute_transaction(slot); } /* Must be called with channel locked. */ static void siis_execute_transaction(struct siis_slot *slot) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); struct siis_cmd *ctp; union ccb *ccb = slot->ccb; u_int64_t prb_bus; mtx_assert(&ch->mtx, MA_OWNED); /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *) (ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot)); ctp->control = 0; ctp->protocol_override = 0; ctp->transfer_count = 0; /* Special handling for Soft Reset command. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { if (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) { ctp->control |= htole16(SIIS_PRB_SOFT_RESET); } else { ctp->control |= htole16(SIIS_PRB_PROTOCOL_OVERRIDE); if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_NCQ); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_READ); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_WRITE); } } } else if (ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ctp->control |= htole16(SIIS_PRB_PACKET_READ); else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ctp->control |= htole16(SIIS_PRB_PACKET_WRITE); } /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { /* Kick controller into sane state */ siis_portinit(dev); } /* Setup the FIS for this request */ if (!siis_setup_fis(dev, ctp, ccb, slot->slot)) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREWRITE); /* Issue command to the controller. */ slot->state = SIIS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); prb_bus = ch->dma.work_bus + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot); ATA_OUTL(ch->r_mem, SIIS_P_CACTL(slot->slot), prb_bus); ATA_OUTL(ch->r_mem, SIIS_P_CACTH(slot->slot), prb_bus >> 32); /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, (timeout_t*)siis_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void siis_process_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } /* Handle the rest of commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; siis_end_transaction(&ch->slot[i], SIIS_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void siis_rearm_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < SIIS_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout, 0, (timeout_t*)siis_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void siis_timeout(struct siis_slot *slot) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; mtx_assert(&ch->mtx, MA_OWNED); /* Check for stale timeout. */ if (slot->state < SIIS_SLOT_RUNNING) return; /* Handle soft-reset timeouts without doing hard-reset. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { xpt_freeze_simq(ch->sim, ch->numrslots); siis_end_transaction(slot, SIIS_ERR_TFE); return; } device_printf(dev, "Timeout on slot %d\n", slot->slot); device_printf(dev, "%s is %08x ss %08x rs %08x es %08x sts %08x serr %08x\n", __func__, ATA_INL(ch->r_mem, SIIS_P_IS), ATA_INL(ch->r_mem, SIIS_P_SS), ch->rslots, ATA_INL(ch->r_mem, SIIS_P_CMDERR), ATA_INL(ch->r_mem, SIIS_P_STS), ATA_INL(ch->r_mem, SIIS_P_SERR)); if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) siis_process_timeout(dev); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } /* Must be called with channel locked. */ static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; int lastto; mtx_assert(&ch->mtx, MA_OWNED); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTWRITE); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == SIIS_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { int offs = SIIS_P_LRAM_SLOT(slot->slot) + 8; res->status = ATA_INB(ch->r_mem, offs + 2); res->error = ATA_INB(ch->r_mem, offs + 3); res->lba_low = ATA_INB(ch->r_mem, offs + 4); res->lba_mid = ATA_INB(ch->r_mem, offs + 5); res->lba_high = ATA_INB(ch->r_mem, offs + 6); res->device = ATA_INB(ch->r_mem, offs + 7); res->lba_low_exp = ATA_INB(ch->r_mem, offs + 8); res->lba_mid_exp = ATA_INB(ch->r_mem, offs + 9); res->lba_high_exp = ATA_INB(ch->r_mem, offs + 10); res->sector_count = ATA_INB(ch->r_mem, offs + 12); res->sector_count_exp = ATA_INB(ch->r_mem, offs + 13); } else bzero(res, sizeof(*res)); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->ataio.resid = ccb->ataio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->csio.resid = ccb->csio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } /* Set proper result status. */ if (et != SIIS_ERR_NONE || ch->recovery) { ch->eslots |= (1 << slot->slot); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } /* In case of error, freeze device for proper recovery. */ if (et != SIIS_ERR_NONE && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case SIIS_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case SIIS_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case SIIS_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case SIIS_ERR_TFE: case SIIS_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case SIIS_ERR_SATA: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case SIIS_ERR_TIMEOUT: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = SIIS_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != SIIS_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { siis_process_read_log(dev, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { siis_process_request_sense(dev, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == SIIS_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else xpt_done(ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there were timeouts or fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { siis_reset(dev); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) siis_portinit(dev); /* if there commands on hold, we can do recovery. */ if (!ch->recoverycmd && ch->numhslots) siis_issue_recovery(dev); } /* If all the reset of commands are in timeout - abort them. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != SIIS_ERR_TIMEOUT) siis_rearm_timeout(dev); /* Unfreeze frozen command. */ if (ch->frozen && !siis_check_collision(dev, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; siis_begin_transaction(dev, fccb); xpt_release_simq(ch->sim, TRUE); } } static void siis_issue_recovery(device_t dev) { struct siis_channel *ch = device_get_softc(dev); union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i]) break; } if (i == SIIS_MAX_SLOTS) return; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } siis_reset(dev); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_SIIS, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } ch->recoverycmd = 1; siis_begin_transaction(dev, ccb); } static void siis_process_read_log(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_SIIS); xpt_free_ccb(ccb); } static void siis_process_request_sense(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); } static void siis_portinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; ch->eslots = 0; ch->recovery = 0; ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_RESUME); for (i = 0; i < 16; i++) { ATA_OUTL(ch->r_mem, SIIS_P_PMPSTS(i), 0), ATA_OUTL(ch->r_mem, SIIS_P_PMPQACT(i), 0); } ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_INIT); siis_wait_ready(dev, 1000); } static int siis_devreset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_DEV_RESET); while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_DEV_RESET) != 0) { DELAY(100); if (timeout++ > 1000) { device_printf(dev, "device reset stuck " "(timeout 100ms) status = %08x\n", val); return (EBUSY); } } return (0); } static int siis_wait_ready(device_t dev, int t) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_READY) == 0) { DELAY(1000); if (timeout++ > t) { device_printf(dev, "port is not ready (timeout %dms) " "status = %08x\n", t, val); return (EBUSY); } } return (0); } static void siis_reset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i, retry = 0, sata_rev; uint32_t val; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(dev, "SIIS reset...\n"); if (!ch->recoverycmd && !ch->recovery) xpt_freeze_simq(ch->sim, ch->numrslots); /* Requeue frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } /* Requeue all running commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ siis_end_transaction(&ch->slot[i], SIIS_ERR_INNOCENT); } /* Finish all held commands as-is. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->recovery = 0; ch->toslots = 0; ch->fatalerr = 0; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IECLR, 0x0000FFFF); /* Set speed limit. */ sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, SIIS_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); retry: siis_devreset(dev); /* Reset and reconnect PHY, */ if (!siis_sata_connect(ch)) { ch->devices = 0; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: phy reset found no device\n"); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); return; } /* Wait for port ready status. */ if (siis_wait_ready(dev, 1000)) { device_printf(dev, "port ready timeout\n"); if (!retry) { device_printf(dev, "trying full port reset ...\n"); /* Get port to the reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); DELAY(10000); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); siis_wait_ready(dev, 5000); retry = 1; goto retry; } } ch->devices = 1; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IS, 0xFFFFFFFF); ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: devices=%08x\n", ch->devices); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); } static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag) { struct siis_channel *ch = device_get_softc(dev); u_int8_t *fis = &ctp->fis[0]; bzero(fis, 24); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bzero(ctp->u.atapi.ccb, 16); bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->u.atapi.ccb, ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] = tag << 3; fis[13] = 0; } else { fis[12] = ccb->ataio.cmd.sector_count; fis[13] = ccb->ataio.cmd.sector_count_exp; } fis[15] = ATA_A_4BIT; if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } } else { /* Soft reset. */ } return (20); } static int siis_sata_connect(struct siis_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, SIIS_P_SERR, 0xffffffff); return (1); } static int siis_check_ids(device_t dev, union ccb *ccb) { if (ccb->ccb_h.target_id > 15) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } return (0); } static void siisaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct siis_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("siisaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct siis_channel *)cam_sim_softc(sim); dev = ch->dev; mtx_assert(&ch->mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (siis_check_ids(dev, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (siis_check_collision(dev, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } siis_begin_transaction(dev, ccb); return; - case XPT_EN_LUN: /* Enable LUN as a target */ - case XPT_TARGET_IO: /* Execute target I/O request */ - case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ - case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(SIIS_MAX_SLOTS, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) { ch->pm_present = cts->xport_specific.sata.pm_present; if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; uint32_t status; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ch->quirks & SIIS_Q_SNTF) == 0) cts->xport_specific.sata.caps &= ~CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ siis_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_ATA_EXT; cpi->hba_eng_cnt = 0; cpi->max_target = 15; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "SIIS", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void siispoll(struct cam_sim *sim) { struct siis_channel *ch = (struct siis_channel *)cam_sim_softc(sim); siis_ch_intr(ch->dev); } Index: stable/11/sys/dev/sym/sym_hipd.c =================================================================== --- stable/11/sys/dev/sym/sym_hipd.c (revision 314380) +++ stable/11/sys/dev/sym/sym_hipd.c (revision 314381) @@ -1,9625 +1,9620 @@ /*- * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 * PCI-SCSI controllers. * * Copyright (C) 1999-2001 Gerard Roudier * * This driver also supports the following Symbios/LSI PCI-SCSI chips: * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. * * * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-1999 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier * Stefan Esser * Copyright (C) 1994 Wolfgang Stanglmeier * * The initialisation code, and part of the code that addresses * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM * written by Justin T. Gibbs. * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham * *----------------------------------------------------------------------------- * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define SYM_DRIVER_NAME "sym-1.6.5-20000902" /* #define SYM_DEBUG_GENERIC_SUPPORT */ #include /* * Driver configuration options. */ #include "opt_sym.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __sparc64__ #include #include #endif #include #include #include #include #include #include #include #include /* Short and quite clear integer types */ typedef int8_t s8; typedef int16_t s16; typedef int32_t s32; typedef u_int8_t u8; typedef u_int16_t u16; typedef u_int32_t u32; /* * Driver definitions. */ #include #include /* * IA32 architecture does not reorder STORES and prevents * LOADS from passing STORES. It is called `program order' * by Intel and allows device drivers to deal with memory * ordering by only ensuring that the code is not reordered * by the compiler when ordering is required. * Other architectures implement a weaker ordering that * requires memory barriers (and also IO barriers when they * make sense) to be used. */ #if defined __i386__ || defined __amd64__ #define MEMORY_BARRIER() do { ; } while(0) #elif defined __powerpc__ #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory") #elif defined __sparc64__ #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory") #elif defined __arm__ #define MEMORY_BARRIER() dmb() #elif defined __aarch64__ #define MEMORY_BARRIER() dmb(sy) #elif defined __riscv__ #define MEMORY_BARRIER() fence() #else #error "Not supported platform" #endif /* * A la VMS/CAM-3 queue management. */ typedef struct sym_quehead { struct sym_quehead *flink; /* Forward pointer */ struct sym_quehead *blink; /* Backward pointer */ } SYM_QUEHEAD; #define sym_que_init(ptr) do { \ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ } while (0) static __inline void __sym_que_add(struct sym_quehead * new, struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = new; new->flink = flink; new->blink = blink; blink->flink = new; } static __inline void __sym_que_del(struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = blink; blink->flink = flink; } static __inline int sym_que_empty(struct sym_quehead *head) { return head->flink == head; } static __inline void sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) { struct sym_quehead *first = list->flink; if (first != list) { struct sym_quehead *last = list->blink; struct sym_quehead *at = head->flink; first->blink = head; head->flink = first; last->flink = at; at->blink = last; } } #define sym_que_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member))) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) { struct sym_quehead *elem = head->flink; if (elem != head) __sym_que_del(head, elem->flink); else elem = NULL; return elem; } #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) /* * This one may be useful. */ #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ for (qp = (head)->flink; qp != (head); qp = qp->flink) /* * FreeBSD does not offer our kind of queue in the CAM CCB. * So, we have to cast. */ #define sym_qptr(p) ((struct sym_quehead *) (p)) /* * Simple bitmap operations. */ #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) /* * Number of tasks per device we want to handle. */ #if SYM_CONF_MAX_TAG_ORDER > 8 #error "more than 256 tags per logical unit not allowed." #endif #define SYM_CONF_MAX_TASK (1< SYM_CONF_MAX_TASK #undef SYM_CONF_MAX_TAG #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK #endif /* * This one means 'NO TAG for this job' */ #define NO_TAG (256) /* * Number of SCSI targets. */ #if SYM_CONF_MAX_TARGET > 16 #error "more than 16 targets not allowed." #endif /* * Number of logical units per target. */ #if SYM_CONF_MAX_LUN > 64 #error "more than 64 logical units per target not allowed." #endif /* * Asynchronous pre-scaler (ns). Shall be 40 for * the SCSI timings to be compliant. */ #define SYM_CONF_MIN_ASYNC (40) /* * Number of entries in the START and DONE queues. * * We limit to 1 PAGE in order to succeed allocation of * these queues. Each entry is 8 bytes long (2 DWORDS). */ #ifdef SYM_CONF_MAX_START #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) #else #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 #undef SYM_CONF_MAX_QUEUE #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 #undef SYM_CONF_MAX_START #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif /* * For this one, we want a short name :-) */ #define MAX_QUEUE SYM_CONF_MAX_QUEUE /* * Active debugging tags and verbosity. */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_POINTER (0x0800) #if 0 static int sym_debug = 0; #define DEBUG_FLAGS sym_debug #else /* #define DEBUG_FLAGS (0x0631) */ #define DEBUG_FLAGS (0x0000) #endif #define sym_verbose (np->verbose) /* * Insert a delay in micro-seconds and milli-seconds. */ static void UDELAY(int us) { DELAY(us); } static void MDELAY(int ms) { while (ms--) UDELAY(1000); } /* * Simple power of two buddy-like allocator. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developed for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ #if 0 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ #endif #define MEMO_WARN 1 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) #define free_pages(p) free((p), M_DEVBUF) typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ typedef struct m_link { /* Link between free memory chunks */ struct m_link *next; } m_link_s; typedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; bus_dmamap_t dmamap; /* Map for this chunk */ m_addr_t vaddr; /* Virtual address */ m_addr_t baddr; /* Bus physical address */ } m_vtob_s; /* Hash this stuff a bit to speed up translations */ #define VTOB_HASH_SHIFT 5 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) #define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) typedef struct m_pool { /* Memory pool of a given kind */ bus_dma_tag_t dev_dmat; /* Identifies the pool */ bus_dma_tag_t dmat; /* Tag for our fixed allocations */ m_addr_t (*getp)(struct m_pool *); #ifdef MEMO_FREE_UNUSED void (*freep)(struct m_pool *, m_addr_t); #endif #define M_GETP() mp->getp(mp) #define M_FREEP(p) mp->freep(mp, p) int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next; struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; } m_pool_s; static void *___sym_malloc(m_pool_s *mp, int size) { int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > MEMO_CLUSTER_SIZE) return NULL; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == MEMO_CLUSTER_SIZE) { h[j].next = (m_link_s *) M_GETP(); if (h[j].next) h[j].next->next = NULL; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = NULL; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return (void *) a; } static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) { int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > MEMO_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) { #ifdef MEMO_FREE_UNUSED if (s == MEMO_CLUSTER_SIZE) { M_FREEP(a); break; } #endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("new %-10s[%4d] @%p.\n", name, size, p); if (p) bzero(p, size); else if (uflags & MEMO_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. */ /* * With the `bus dma abstraction', we use a separate pool for * memory we donnot need to involve in DMA. */ static m_addr_t ___mp0_getp(m_pool_s *mp) { m_addr_t m = (m_addr_t) get_pages(); if (m) ++mp->nump; return m; } #ifdef MEMO_FREE_UNUSED static void ___mp0_freep(m_pool_s *mp, m_addr_t m) { free_pages(m); --mp->nump; } #endif #ifdef MEMO_FREE_UNUSED static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; #else static m_pool_s mp0 = {0, 0, ___mp0_getp}; #endif /* * Actual memory allocation routine for non-DMAed memory. */ static void *sym_calloc(int size, char *name) { void *m; /* Lock */ m = __sym_calloc(&mp0, size, name); /* Unlock */ return m; } /* * Actual memory allocation routine for non-DMAed memory. */ static void sym_mfree(void *ptr, int size, char *name) { /* Lock */ __sym_mfree(&mp0, ptr, size, name); /* Unlock */ } /* * DMAable pools. */ /* * With `bus dma abstraction', we use a separate pool per parent * BUS handle. A reverse table (hashed) is maintained for virtual * to BUS address translation. */ static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg __unused, int error) { bus_addr_t *baddr; KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); baddr = (bus_addr_t *)arg; if (error) *baddr = 0; else *baddr = segs->ds_addr; } static m_addr_t ___dma_getp(m_pool_s *mp) { m_vtob_s *vbp; void *vaddr = NULL; bus_addr_t baddr = 0; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; if (bus_dmamem_alloc(mp->dmat, &vaddr, BUS_DMA_COHERENT | BUS_DMA_WAITOK, &vbp->dmamap)) goto out_err; bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, BUS_DMA_NOWAIT); if (baddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->vaddr = (m_addr_t) vaddr; vbp->baddr = (m_addr_t) baddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return (m_addr_t) vaddr; } out_err: if (baddr) bus_dmamap_unload(mp->dmat, vbp->dmamap); if (vaddr) bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); if (vbp) __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; } #ifdef MEMO_FREE_UNUSED static void ___dma_freep(m_pool_s *mp, m_addr_t m) { m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; bus_dmamap_unload(mp->dmat, vbp->dmamap); bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp; for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); return mp; } static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp = NULL; mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MEMO_CLUSTER_SIZE, 1, MEMO_CLUSTER_SIZE, 0, NULL, NULL, &mp->dmat)) { mp->getp = ___dma_getp; #ifdef MEMO_FREE_UNUSED mp->freep = ___dma_freep; #endif mp->next = mp0.next; mp0.next = mp; return mp; } } if (mp) __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); return NULL; } #ifdef MEMO_FREE_UNUSED static void ___del_dma_pool(m_pool_s *p) { struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; bus_dma_tag_destroy(p->dmat); __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) { struct m_pool *mp; void *m = NULL; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (mp) m = __sym_calloc(mp, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ return m; } static void __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) { struct m_pool *mp; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) __sym_mfree(mp, m, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ } static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) { m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = NULL; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } /* Unlock */ if (!vp) panic("sym: VTOBUS FAILED!\n"); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; } /* * Verbs for DMAable memory handling. * The _uvptv_ macro avoids a nasty warning about pointer to volatile * being discarded. */ #define _uvptv_(p) ((void *)((vm_offset_t)(p))) #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) #define _sym_mfree_dma(np, p, s, n) \ __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) #define vtobus(p) _vtobus(np, p) /* * Print a buffer in hexadecimal format. */ static void sym_printb_hex (u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); } /* * Same with a label at beginning and .\n at end. */ static void sym_printl_hex (char *label, u_char *p, int n) { printf ("%s", label); sym_printb_hex (p, n); printf (".\n"); } /* * Return a string for SCSI BUS mode. */ static const char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Some poor and bogus sync table that refers to Tekram NVRAM layout. */ #ifdef SYM_CONF_NVRAM_SUPPORT static const u_char Tekram_sync[16] = {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; #endif /* * Union of supported NVRAM formats. */ struct sym_nvram { int type; #define SYM_SYMBIOS_NVRAM (1) #define SYM_TEKRAM_NVRAM (2) #ifdef SYM_CONF_NVRAM_SUPPORT union { Symbios_nvram Symbios; Tekram_nvram Tekram; } data; #endif }; /* * This one is hopefully useless, but actually useful. :-) */ #ifndef assert #define assert(expression) { \ if (!(expression)) { \ (void)panic( \ "assertion \"%s\" failed: file \"%s\", line %d\n", \ #expression, \ __FILE__, __LINE__); \ } \ } #endif /* * Some provision for a possible big endian mode supported by * Symbios chips (never seen, by the way). * For now, this stuff does not deserve any comments. :) */ #define sym_offb(o) (o) #define sym_offw(o) (o) /* * Some provision for support for BIG ENDIAN CPU. */ #define cpu_to_scr(dw) htole32(dw) #define scr_to_cpu(dw) le32toh(dw) /* * Access to the chip IO registers and on-chip RAM. * We use the `bus space' interface under FreeBSD-4 and * later kernel versions. */ #if defined(SYM_CONF_IOMAPPED) #define INB_OFF(o) bus_read_1(np->io_res, (o)) #define INW_OFF(o) bus_read_2(np->io_res, (o)) #define INL_OFF(o) bus_read_4(np->io_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->io_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->io_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->io_res, (o), (v)) #else /* Memory mapped IO */ #define INB_OFF(o) bus_read_1(np->mmio_res, (o)) #define INW_OFF(o) bus_read_2(np->mmio_res, (o)) #define INL_OFF(o) bus_read_4(np->mmio_res, (o)) #define OUTB_OFF(o, v) bus_write_1(np->mmio_res, (o), (v)) #define OUTW_OFF(o, v) bus_write_2(np->mmio_res, (o), (v)) #define OUTL_OFF(o, v) bus_write_4(np->mmio_res, (o), (v)) #endif /* SYM_CONF_IOMAPPED */ #define OUTRAM_OFF(o, a, l) \ bus_write_region_1(np->ram_res, (o), (a), (l)) /* * Common definitions for both bus space and legacy IO methods. */ #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /* * We normally want the chip to have a consistent view * of driver internal data structures when we restart it. * Thus these macros. */ #define OUTL_DSP(v) \ do { \ MEMORY_BARRIER(); \ OUTL (nc_dsp, (v)); \ } while (0) #define OUTONB_STD() \ do { \ MEMORY_BARRIER(); \ OUTONB (nc_dcntl, (STD|NOCOM)); \ } while (0) /* * Command control block states. */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_WAIT (4) /* waiting for resource */ #define HS_DONEMASK (0x80) #define HS_COMPLETE (4|HS_DONEMASK) #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ /* * Software Interrupt Codes */ #define SIR_BAD_SCSI_STATUS (1) #define SIR_SEL_ATN_NO_MSG_OUT (2) #define SIR_MSG_RECEIVED (3) #define SIR_MSG_WEIRD (4) #define SIR_NEGO_FAILED (5) #define SIR_NEGO_PROTO (6) #define SIR_SCRIPT_STOPPED (7) #define SIR_REJECT_TO_SEND (8) #define SIR_SWIDE_OVERRUN (9) #define SIR_SODL_UNDERRUN (10) #define SIR_RESEL_NO_MSG_IN (11) #define SIR_RESEL_NO_IDENTIFY (12) #define SIR_RESEL_BAD_LUN (13) #define SIR_TARGET_SELECTED (14) #define SIR_RESEL_BAD_I_T_L (15) #define SIR_RESEL_BAD_I_T_L_Q (16) #define SIR_ABORT_SENT (17) #define SIR_RESEL_ABORTED (18) #define SIR_MSG_OUT_DONE (19) #define SIR_COMPLETE_ERROR (20) #define SIR_DATA_OVERRUN (21) #define SIR_BAD_PHASE (22) #define SIR_MAX (22) /* * Extended error bit codes. * xerr_status field of struct sym_ccb. */ #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ /* * Negotiation status. * nego_status field of struct sym_ccb. */ #define NS_SYNC (1) #define NS_WIDE (2) #define NS_PPR (3) /* * A CCB hashed table is used to retrieve CCB address * from DSA value. */ #define CCB_HASH_SHIFT 8 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) #define CCB_HASH_MASK (CCB_HASH_SIZE-1) #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) /* * Device flags. */ #define SYM_DISC_ENABLED (1) #define SYM_TAGS_ENABLED (1<<1) #define SYM_SCAN_BOOT_DISABLED (1<<2) #define SYM_SCAN_LUNS_DISABLED (1<<3) /* * Host adapter miscellaneous flags. */ #define SYM_AVOID_BUS_RESET (1) #define SYM_SCAN_TARGETS_HILO (1<<1) /* * Device quirks. * Some devices, for example the CHEETAH 2 LVD, disconnects without * saving the DATA POINTER then reselects and terminates the IO. * On reselection, the automatic RESTORE DATA POINTER makes the * CURRENT DATA POINTER not point at the end of the IO. * This behaviour just breaks our calculation of the residual. * For now, we just force an AUTO SAVE on disconnection and will * fix that in a further driver version. */ #define SYM_QUIRK_AUTOSAVE 1 /* * Misc. */ #define SYM_LOCK() mtx_lock(&np->mtx) #define SYM_LOCK_ASSERT(_what) mtx_assert(&np->mtx, (_what)) #define SYM_LOCK_DESTROY() mtx_destroy(&np->mtx) #define SYM_LOCK_INIT() mtx_init(&np->mtx, "sym_lock", NULL, MTX_DEF) #define SYM_LOCK_INITIALIZED() mtx_initialized(&np->mtx) #define SYM_UNLOCK() mtx_unlock(&np->mtx) #define SYM_SNOOP_TIMEOUT (10000000) #define SYM_PCI_IO PCIR_BAR(0) #define SYM_PCI_MMIO PCIR_BAR(1) #define SYM_PCI_RAM PCIR_BAR(2) #define SYM_PCI_RAM64 PCIR_BAR(3) /* * Back-pointer from the CAM CCB to our data structures. */ #define sym_hcb_ptr spriv_ptr0 /* #define sym_ccb_ptr spriv_ptr1 */ /* * We mostly have to deal with pointers. * Thus these typedef's. */ typedef struct sym_tcb *tcb_p; typedef struct sym_lcb *lcb_p; typedef struct sym_ccb *ccb_p; typedef struct sym_hcb *hcb_p; /* * Gather negotiable parameters value */ struct sym_trans { u8 scsi_version; u8 spi_version; u8 period; u8 offset; u8 width; u8 options; /* PPR options */ }; struct sym_tinfo { struct sym_trans current; struct sym_trans goal; struct sym_trans user; }; #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT /* * Global TCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the TCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_tcbh { /* * Scripts bus addresses of LUN table accessed from scripts. * LUN #0 is a special case, since multi-lun devices are rare, * and we we want to speed-up the general case and not waste * resources. */ u32 luntbl_sa; /* bus address of this table */ u32 lun0_sa; /* bus address of LCB #0 */ /* * Actual SYNC/WIDE IO registers value for this target. * 'sval', 'wval' and 'uval' are read from SCRIPTS and * so have alignment constraints. */ /*0*/ u_char uval; /* -> SCNTL4 register */ /*1*/ u_char sval; /* -> SXFER io register */ /*2*/ u_char filler1; /*3*/ u_char wval; /* -> SCNTL3 io register */ }; /* * Target Control Block */ struct sym_tcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_tcbh head; /* * LUN table used by the SCRIPTS processor. * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ /* * LUN table used by the C code. */ lcb_p lun0p; /* LCB of LUN #0 (usual case) */ #if SYM_CONF_MAX_LUN > 1 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ #endif /* * Bitmap that tells about LUNs that succeeded at least * 1 IO and therefore assumed to be a real device. * Avoid useless allocation of the LCB structure. */ u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Bitmap that tells about LUNs that haven't yet an LCB * allocated (not discovered or LCB allocation failed). */ u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Transfer capabilities (SIP) */ struct sym_tinfo tinfo; /* * Keep track of the CCB used for the negotiation in order * to ensure that only 1 negotiation is queued at a time. */ ccb_p nego_cp; /* CCB used for the nego */ /* * Set when we want to reset the device. */ u_char to_reset; /* * Other user settable limits and options. * These limits are read from the NVRAM if present. */ u_char usrflags; u_short usrtags; }; /* * Assert some alignments required by the chip. */ CTASSERT(((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); CTASSERT(((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); /* * Global LCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the LCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_lcbh { /* * SCRIPTS address jumped by SCRIPTS on reselection. * For not probed logical units, this address points to * SCRIPTS that deal with bad LU handling (must be at * offset zero of the LCB for that reason). */ /*0*/ u32 resel_sa; /* * Task (bus address of a CCB) read from SCRIPTS that points * to the unique ITL nexus allowed to be disconnected. */ u32 itl_task_sa; /* * Task table bus address (read from SCRIPTS). */ u32 itlq_tbl_sa; }; /* * Logical Unit Control Block */ struct sym_lcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_lcbh head; /* * Task table read from SCRIPTS that contains pointers to * ITLQ nexuses. The bus address read from SCRIPTS is * inside the header. */ u32 *itlq_tbl; /* Kernel virtual address */ /* * Busy CCBs management. */ u_short busy_itlq; /* Number of busy tagged CCBs */ u_short busy_itl; /* Number of busy untagged CCBs */ /* * Circular tag allocation buffer. */ u_short ia_tag; /* Tag allocation index */ u_short if_tag; /* Tag release index */ u_char *cb_tags; /* Circular tags buffer */ /* * Set when we want to clear all tasks. */ u_char to_clear; /* * Capabilities. */ u_char user_flags; u_char current_flags; }; /* * Action from SCRIPTS on a task. * Is part of the CCB, but is also used separately to plug * error handling action to perform from SCRIPTS. */ struct sym_actscr { u32 start; /* Jumped by SCRIPTS after selection */ u32 restart; /* Jumped by SCRIPTS on relection */ }; /* * Phase mismatch context. * * It is part of the CCB and is used as parameters for the * DATA pointer. We need two contexts to handle correctly the * SAVED DATA POINTER. */ struct sym_pmc { struct sym_tblmove sg; /* Updated interrupted SG block */ u32 ret; /* SCRIPT return address */ }; /* * LUN control block lookup. * We use a direct pointer for LUN #0, and a table of * pointers which is only allocated for devices that support * LUN(s) > 0. */ #if SYM_CONF_MAX_LUN <= 1 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : 0 #else #define sym_lp(tp, lun) \ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 #endif /* * Status are used by the host and the script processor. * * The last four bytes (status[4]) are copied to the * scratchb register (declared as scr0..scr3) just after the * select/reselect, and copied back just after disconnecting. * Inside the script the XX_REG are used. */ /* * Last four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define SS_PRT nc_scr2 #define HF_REG scr3 #define HF_PRT nc_scr3 /* * Last four bytes (host) */ #define actualquirks phys.head.status[0] #define host_status phys.head.status[1] #define ssss_status phys.head.status[2] #define host_flags phys.head.status[3] /* * Host flags */ #define HF_IN_PM0 1u #define HF_IN_PM1 (1u<<1) #define HF_ACT_PM (1u<<2) #define HF_DP_SAVED (1u<<3) #define HF_SENSE (1u<<4) #define HF_EXT_ERR (1u<<5) #define HF_DATA_IN (1u<<6) #ifdef SYM_CONF_IARB_SUPPORT #define HF_HINT_IARB (1u<<7) #endif /* * Global CCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the ccb to a global * address after selection (or reselection) and copied back * before disconnect. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_ccbh { /* * Start and restart SCRIPTS addresses (must be at 0). */ /*0*/ struct sym_actscr go; /* * SCRIPTS jump address that deal with data pointers. * 'savep' points to the position in the script responsible * for the actual transfer of data. * It's written on reception of a SAVE_DATA_POINTER message. */ u32 savep; /* Jump address to saved data pointer */ u32 lastp; /* SCRIPTS address at end of data */ u32 goalp; /* Not accessed for now from SCRIPTS */ /* * Status fields. */ u8 status[4]; }; /* * Data Structure Block * * During execution of a ccb by the script processor, the * DSA (data structure address) register points to this * substructure of the ccb. */ struct sym_dsb { /* * CCB header. * Also assumed at offset 0 of the sym_ccb structure. */ /*0*/ struct sym_ccbh head; /* * Phase mismatch contexts. * We need two to handle correctly the SAVED DATA POINTER. * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic * for address calculation from SCRIPTS. */ struct sym_pmc pm0; struct sym_pmc pm1; /* * Table data for Script */ struct sym_tblsel select; struct sym_tblmove smsg; struct sym_tblmove smsg_ext; struct sym_tblmove cmd; struct sym_tblmove sense; struct sym_tblmove wresid; struct sym_tblmove data [SYM_CONF_MAX_SG]; }; /* * Our Command Control Block */ struct sym_ccb { /* * This is the data structure which is pointed by the DSA * register when it is executed by the script processor. * It must be the first entry. */ struct sym_dsb phys; /* * Pointer to CAM ccb and related stuff. */ struct callout ch; /* callout handle */ union ccb *cam_ccb; /* CAM scsiio ccb */ u8 cdb_buf[16]; /* Copy of CDB */ u8 *sns_bbuf; /* Bounce buffer for sense data */ #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) int data_len; /* Total data length */ int segments; /* Number of SG segments */ /* * Miscellaneous status'. */ u_char nego_status; /* Negotiation status */ u_char xerr_status; /* Extended error flags */ u32 extra_bytes; /* Extraneous bytes transferred */ /* * Message areas. * We prepare a message to be sent after selection. * We may use a second one if the command is rescheduled * due to CHECK_CONDITION or COMMAND TERMINATED. * Contents are IDENTIFY and SIMPLE_TAG. * While negotiating sync or wide transfer, * a SDTR or WDTR message is appended. */ u_char scsi_smsg [12]; u_char scsi_smsg2[12]; /* * Auto request sense related fields. */ u_char sensecmd[6]; /* Request Sense command */ u_char sv_scsi_status; /* Saved SCSI status */ u_char sv_xerr_status; /* Saved extended status */ int sv_resid; /* Saved residual */ /* * Map for the DMA of user data. */ void *arg; /* Argument for some callback */ bus_dmamap_t dmamap; /* DMA map for user data */ u_char dmamapped; #define SYM_DMA_NONE 0 #define SYM_DMA_READ 1 #define SYM_DMA_WRITE 2 /* * Other fields. */ u32 ccb_ba; /* BUS address of this CCB */ u_short tag; /* Tag for this transfer */ /* NO_TAG means no tag */ u_char target; u_char lun; ccb_p link_ccbh; /* Host adapter CCB hash chain */ SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ u32 startp; /* Initial data pointer */ int ext_sg; /* Extreme data pointer, used */ int ext_ofs; /* to calculate the residual. */ u_char to_abort; /* Want this IO to be aborted */ }; #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) /* * Host Control Block */ struct sym_hcb { struct mtx mtx; /* * Global headers. * Due to poorness of addressing capabilities, earlier * chips (810, 815, 825) copy part of the data structures * (CCB, TCB and LCB) in fixed areas. */ #ifdef SYM_CONF_GENERIC_SUPPORT struct sym_ccbh ccb_head; struct sym_tcbh tcb_head; struct sym_lcbh lcb_head; #endif /* * Idle task and invalid task actions and * their bus addresses. */ struct sym_actscr idletask, notask, bad_itl, bad_itlq; vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; /* * Dummy lun table to protect us against target * returning bad lun number on reselection. */ u32 *badluntbl; /* Table physical address */ u32 badlun_sa; /* SCRIPT handler BUS address */ /* * Bus address of this host control block. */ u32 hcb_ba; /* * Bit 32-63 of the on-chip RAM bus address in LE format. * The START_RAM64 script loads the MMRS and MMWS from this * field. */ u32 scr_ram_seg; /* * Chip and controller indentification. */ device_t device; /* * Initial value of some IO register bits. * These values are assumed to have been set by BIOS, and may * be used to probe adapter implementation differences. */ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, sv_stest1; /* * Actual initial value of IO register bits used by the * driver. They are loaded at initialisation according to * features that are to be enabled/disabled. */ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; /* * Target data. */ #ifdef __amd64__ struct sym_tcb *target; #else struct sym_tcb target[SYM_CONF_MAX_TARGET]; #endif /* * Target control block bus address array used by the SCRIPT * on reselection. */ u32 *targtbl; u32 targtbl_ba; /* * CAM SIM information for this instance. */ struct cam_sim *sim; struct cam_path *path; /* * Allocated hardware resources. */ struct resource *irq_res; struct resource *io_res; struct resource *mmio_res; struct resource *ram_res; int ram_id; void *intr; /* * Bus stuff. * * My understanding of PCI is that all agents must share the * same addressing range and model. * But some hardware architecture guys provide complex and * brain-deaded stuff that makes shit. * This driver only support PCI compliant implementations and * deals with part of the BUS stuff complexity only to fit O/S * requirements. */ /* * DMA stuff. */ bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ bus_dma_tag_t data_dmat; /* DMA tag for user data */ /* * BUS addresses of the chip */ vm_offset_t mmio_ba; /* MMIO BUS address */ int mmio_ws; /* MMIO Window size */ vm_offset_t ram_ba; /* RAM BUS address */ int ram_ws; /* RAM window size */ /* * SCRIPTS virtual and physical bus addresses. * 'script' is loaded in the on-chip RAM if present. * 'scripth' stays in main memory for all chips except the * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. */ u_char *scripta0; /* Copies of script and scripth */ u_char *scriptb0; /* Copies of script and scripth */ vm_offset_t scripta_ba; /* Actual script and scripth */ vm_offset_t scriptb_ba; /* bus addresses. */ vm_offset_t scriptb0_ba; u_short scripta_sz; /* Actual size of script A */ u_short scriptb_sz; /* Actual size of script B */ /* * Bus addresses, setup and patch methods for * the selected firmware. */ struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ void (*fw_setup)(hcb_p np, const struct sym_fw *fw); void (*fw_patch)(hcb_p np); const char *fw_name; /* * General controller parameters and configuration. */ u_short device_id; /* PCI device id */ u_char revision_id; /* PCI device revision id */ u_int features; /* Chip features map */ u_char myaddr; /* SCSI id of the adapter */ u_char maxburst; /* log base 2 of dwords burst */ u_char maxwide; /* Maximum transfer width */ u_char minsync; /* Min sync period factor (ST) */ u_char maxsync; /* Max sync period factor (ST) */ u_char maxoffs; /* Max scsi offset (ST) */ u_char minsync_dt; /* Min sync period factor (DT) */ u_char maxsync_dt; /* Max sync period factor (DT) */ u_char maxoffs_dt; /* Max scsi offset (DT) */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char clock_divn; /* Number of clock divisors */ u32 clock_khz; /* SCSI clock frequency in KHz */ u32 pciclk_khz; /* Estimated PCI clock in KHz */ /* * Start queue management. * It is filled up by the host processor and accessed by the * SCRIPTS processor in order to start SCSI commands. */ volatile /* Prevent code optimizations */ u32 *squeue; /* Start queue virtual address */ u32 squeue_ba; /* Start queue BUS address */ u_short squeueput; /* Next free slot of the queue */ u_short actccbs; /* Number of allocated CCBs */ /* * Command completion queue. * It is the same size as the start queue to avoid overflow. */ u_short dqueueget; /* Next position to scan */ volatile /* Prevent code optimizations */ u32 *dqueue; /* Completion (done) queue */ u32 dqueue_ba; /* Done queue BUS address */ /* * Miscellaneous buffers accessed by the scripts-processor. * They shall be DWORD aligned, because they may be read or * written with a script command. */ u_char msgout[8]; /* Buffer for MESSAGE OUT */ u_char msgin [8]; /* Buffer for MESSAGE IN */ u32 lastmsg; /* Last SCSI message sent */ u_char scratch; /* Scratch for SCSI receive */ /* * Miscellaneous configuration and status parameters. */ u_char usrflags; /* Miscellaneous user flags */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char verbose; /* Verbosity for this controller*/ u32 cache; /* Used for cache test at init. */ /* * CCB lists and queue. */ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ /* * During error handling and/or recovery, * active CCBs that are to be completed with * error or requeued are moved from the busy_ccbq * to the comp_ccbq prior to completion. */ SYM_QUEHEAD comp_ccbq; /* * CAM CCB pending queue. */ SYM_QUEHEAD cam_ccbq; /* * IMMEDIATE ARBITRATION (IARB) control. * * We keep track in 'last_cp' of the last CCB that has been * queued to the SCRIPTS processor and clear 'last_cp' when * this CCB completes. If last_cp is not zero at the moment * we queue a new CCB, we set a flag in 'last_cp' that is * used by the SCRIPTS as a hint for setting IARB. * We donnot set more than 'iarb_max' consecutive hints for * IARB in order to leave devices a chance to reselect. * By the way, any non zero value of 'iarb_max' is unfair. :) */ #ifdef SYM_CONF_IARB_SUPPORT u_short iarb_max; /* Max. # consecutive IARB hints*/ u_short iarb_count; /* Actual # of these hints */ ccb_p last_cp; #endif /* * Command abort handling. * We need to synchronize tightly with the SCRIPTS * processor in order to handle things correctly. */ u_char abrt_msg[4]; /* Message to send buffer */ struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ struct sym_tblsel abrt_sel; /* Sync params for selection */ u_char istat_sem; /* Tells the chip to stop (SEM) */ }; #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) /* * Return the name of the controller. */ static __inline const char *sym_name(hcb_p np) { return device_get_nameunit(np->device); } /*--------------------------------------------------------------------------*/ /*------------------------------ FIRMWARES ---------------------------------*/ /*--------------------------------------------------------------------------*/ /* * This stuff will be moved to a separate source file when * the driver will be broken into several source modules. */ /* * Macros used for all firmwares. */ #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) #ifdef SYM_CONF_GENERIC_SUPPORT /* * Allocate firmware #1 script area. */ #define SYM_FWA_SCR sym_fw1a_scr #define SYM_FWB_SCR sym_fw1b_scr #include static const struct sym_fwa_ofs sym_fw1a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw1b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Allocate firmware #2 script area. */ #define SYM_FWA_SCR sym_fw2a_scr #define SYM_FWB_SCR sym_fw2b_scr #include static const struct sym_fwa_ofs sym_fw2a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; static const struct sym_fwb_ofs sym_fw2b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) SYM_GEN_B(struct SYM_FWB_SCR, start64) SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_GEN_A #undef SYM_GEN_B #undef PADDR_A #undef PADDR_B #ifdef SYM_CONF_GENERIC_SUPPORT /* * Patch routine for firmware #1. */ static void sym_fw1_patch(hcb_p np) { struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some data in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Patch routine for firmware #2. */ static void sym_fw2_patch(hcb_p np) { struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some variable in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); /* * Remove the load of SCNTL4 on reselection if not a C10. */ if (!(np->features & FE_C10)) { scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); scripta0->resel_scntl4[1] = cpu_to_scr(0); } /* * Remove a couple of work-arounds specific to C1010 if * they are not desirable. See `sym_fw2.h' for more details. */ if (!(np->device_id == PCI_ID_LSI53C1010_2 && np->revision_id < 0x1 && np->pciclk_khz < 60000)) { scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); scripta0->datao_phase[1] = cpu_to_scr(0); } if (!(np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1)) { scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); scripta0->sel_done[1] = cpu_to_scr(0); } /* * Patch some other variables in SCRIPTS. * These ones are loaded by the SCRIPTS processor. */ scriptb0->pm0_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm0_data)); scriptb0->pm1_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm1_data)); } /* * Fill the data area in scripts. * To be done for all firmwares. */ static void sym_fw_fill_data (u32 *in, u32 *out) { int i; for (i = 0; i < SYM_CONF_MAX_SG; i++) { *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; *in++ = offsetof (struct sym_dsb, data[i]); *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; *out++ = offsetof (struct sym_dsb, data[i]); } } /* * Setup useful script bus addresses. * To be done for all firmwares. */ static void sym_fw_setup_bus_addresses(hcb_p np, const struct sym_fw *fw) { u32 *pa; const u_short *po; int i; /* * Build the bus address table for script A * from the script A offset table. */ po = (const u_short *) fw->a_ofs; pa = (u32 *) &np->fwa_bas; for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) pa[i] = np->scripta_ba + po[i]; /* * Same for script B. */ po = (const u_short *) fw->b_ofs; pa = (u32 *) &np->fwb_bas; for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) pa[i] = np->scriptb_ba + po[i]; } #ifdef SYM_CONF_GENERIC_SUPPORT /* * Setup routine for firmware #1. */ static void sym_fw1_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw1a_scr *scripta0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Setup routine for firmware #2. */ static void sym_fw2_setup(hcb_p np, const struct sym_fw *fw) { struct sym_fw2a_scr *scripta0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } /* * Allocate firmware descriptors. */ #ifdef SYM_CONF_GENERIC_SUPPORT static const struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); #endif /* SYM_CONF_GENERIC_SUPPORT */ static const struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); /* * Find the most appropriate firmware for a chip. */ static const struct sym_fw * sym_find_firmware(const struct sym_pci_chip *chip) { if (chip->features & FE_LDSTR) return &sym_fw2; #ifdef SYM_CONF_GENERIC_SUPPORT else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) return &sym_fw1; #endif else return NULL; } /* * Bind a script to physical addresses. */ static void sym_fw_bind_script (hcb_p np, u32 *start, int len) { u32 opcode, new, old, tmp1, tmp2; u32 *end, *cur; int relocs; cur = start; end = start + len/4; while (cur < end) { opcode = *cur; /* * If we forget to change the length * in scripts, a field will be * padded with 0. This is an illegal * command. */ if (opcode == 0) { printf ("%s: ERROR0 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); ++cur; continue; } /* * We use the bogus value 0xf00ff00f ;-) * to reserve data area in SCRIPTS. */ if (opcode == SCR_DATA_ZERO) { *cur++ = 0; continue; } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%d: <%x>\n", (int) (cur-start), (unsigned)opcode); /* * We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xf: /* * LOAD / STORE DSA relative, don't relocate. */ relocs = 0; break; case 0xe: /* * LOAD / STORE absolute. */ relocs = 1; break; case 0xc: /* * COPY has TWO arguments. */ relocs = 2; tmp1 = cur[1]; tmp2 = cur[2]; if ((tmp1 ^ tmp2) & 3) { printf ("%s: ERROR1 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); } /* * If PREFETCH feature not enabled, remove * the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { opcode = (opcode & ~SCR_NO_FLUSH); } break; case 0x0: /* * MOVE/CHMOV (absolute address) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 1; break; case 0x1: /* * MOVE/CHMOV (table indirect) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 0; break; case 0x8: /* * JUMP / CALL * dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ relocs = 2; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; } /* * Scriptify:) the opcode. */ *cur++ = cpu_to_scr(opcode); /* * If no relocation, assume 1 argument * and just scriptize:) it. */ if (!relocs) { *cur = cpu_to_scr(*cur); ++cur; continue; } /* * Otherwise performs all needed relocations. */ while (relocs--) { old = *cur; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->mmio_ba; break; case RELOC_LABEL_A: new = (old & ~RELOC_MASK) + np->scripta_ba; break; case RELOC_LABEL_B: new = (old & ~RELOC_MASK) + np->scriptb_ba; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->hcb_ba; break; case 0: /* * Don't relocate a 0 address. * They are mostly used for patched or * script self-modified areas. */ if (old == 0) { new = old; break; } /* fall through */ default: new = 0; panic("sym_fw_bind_script: " "weird relocation %x\n", old); break; } *cur++ = cpu_to_scr(new); } } } /*---------------------------------------------------------------------------*/ /*--------------------------- END OF FIRMWARES -----------------------------*/ /*---------------------------------------------------------------------------*/ /* * Function prototypes. */ static void sym_save_initial_setting (hcb_p np); static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); static void sym_put_start_queue (hcb_p np, ccb_p cp); static void sym_chip_reset (hcb_p np); static void sym_soft_reset (hcb_p np); static void sym_start_reset (hcb_p np); static int sym_reset_scsi_bus (hcb_p np, int enab_int); static int sym_wakeup_done (hcb_p np); static void sym_flush_busy_queue (hcb_p np, int cam_status); static void sym_flush_comp_queue (hcb_p np, int cam_status); static void sym_init (hcb_p np, int reason); static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp); static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak); static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); static void sym_intr (void *arg); static void sym_poll (struct cam_sim *sim); static void sym_recover_scsi_int (hcb_p np, u_char hsts); static void sym_int_sto (hcb_p np); static void sym_int_udc (hcb_p np); static void sym_int_sbmc (hcb_p np); static void sym_int_par (hcb_p np, u_short sist); static void sym_int_ma (hcb_p np); static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task); static void sym_sir_bad_scsi_status (hcb_p np, ccb_p cp); static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); static void sym_sir_task_recovery (hcb_p np, int num); static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs); static int sym_compute_residual (hcb_p np, ccb_p cp); static int sym_show_msg (u_char * msg); static void sym_print_msg (ccb_p cp, char *label, u_char *msg); static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); static void sym_int_sir (hcb_p np); static void sym_free_ccb (hcb_p np, ccb_p cp); static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); static ccb_p sym_alloc_ccb (hcb_p np); static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa); static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); static int sym_snooptest (hcb_p np); static void sym_selectclock(hcb_p np, u_char scntl3); static void sym_getclock (hcb_p np, int mult); static int sym_getpciclock (hcb_p np); static void sym_complete_ok (hcb_p np, ccb_p cp); static void sym_complete_error (hcb_p np, ccb_p cp); static void sym_callout (void *arg); static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); static void sym_reset_dev (hcb_p np, union ccb *ccb); static void sym_action (struct cam_sim *sim, union ccb *ccb); static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static void sym_action2 (struct cam_sim *sim, union ccb *ccb); static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts); static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts); static const struct sym_pci_chip *sym_find_pci_chip (device_t dev); static int sym_pci_probe (device_t dev); static int sym_pci_attach (device_t dev); static void sym_pci_free (hcb_p np); static int sym_cam_attach (hcb_p np); static void sym_cam_free (hcb_p np); static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); /* * Print something which allows to retrieve the controller type, * unit, target, lun concerned by a kernel message. */ static void PRINT_TARGET (hcb_p np, int target) { printf ("%s:%d:", sym_name(np), target); } static void PRINT_LUN(hcb_p np, int target, int lun) { printf ("%s:%d:%d:", sym_name(np), target, lun); } static void PRINT_ADDR (ccb_p cp) { if (cp && cp->cam_ccb) xpt_print_path(cp->cam_ccb->ccb_h.path); } /* * Take into account this ccb in the freeze count. */ static void sym_freeze_cam_ccb(union ccb *ccb) { if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } } /* * Set the status field of a CAM CCB. */ static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } /* * Get the status field of a CAM CCB. */ static __inline int sym_get_cam_status(union ccb *ccb) { return ccb->ccb_h.status & CAM_STATUS_MASK; } /* * Enqueue a CAM CCB. */ static void sym_enqueue_cam_ccb(ccb_p cp) { hcb_p np; union ccb *ccb; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); ccb->ccb_h.status = CAM_REQ_INPROG; callout_reset_sbt(&cp->ch, SBT_1MS * ccb->ccb_h.timeout, 0, sym_callout, (caddr_t)ccb, 0); ccb->ccb_h.status |= CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = np; sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); } /* * Complete a pending CAM CCB. */ static void sym_xpt_done(hcb_p np, union ccb *ccb, ccb_p cp) { SYM_LOCK_ASSERT(MA_OWNED); if (ccb->ccb_h.status & CAM_SIM_QUEUED) { callout_stop(&cp->ch); sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = NULL; } xpt_done(ccb); } static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) { SYM_LOCK_ASSERT(MA_OWNED); sym_set_cam_status(ccb, cam_status); xpt_done(ccb); } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static __inline void sym_init_burst(hcb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Print out the list of targets that have some flag disabled by user. */ static void sym_print_targets_flag(hcb_p np, int mask, char *msg) { int cnt; int i; for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { if (i == np->myaddr) continue; if (np->target[i].usrflags & mask) { if (!cnt++) printf("%s: %s disabled for targets", sym_name(np), msg); printf(" %d", i); } } if (cnt) printf(".\n"); } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (hcb_p np) { np->sv_scntl0 = INB(nc_scntl0) & 0x0a; np->sv_scntl3 = INB(nc_scntl3) & 0x07; np->sv_dmode = INB(nc_dmode) & 0xce; np->sv_dcntl = INB(nc_dcntl) & 0xa8; np->sv_ctest3 = INB(nc_ctest3) & 0x01; np->sv_ctest4 = INB(nc_ctest4) & 0x80; np->sv_gpcntl = INB(nc_gpcntl); np->sv_stest1 = INB(nc_stest1); np->sv_stest2 = INB(nc_stest2) & 0x20; np->sv_stest4 = INB(nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(nc_scntl4); np->sv_ctest5 = INB(nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(nc_ctest5) & 0x24; } /* * Prepare io register values used by sym_init() according * to selected and supported features. */ static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) { u_char burst_max; u32 period; int i; /* * Wide ? */ np->maxwide = (np->features & FE_WIDE)? 1 : 0; /* * Get the frequency of the chip's clock. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; np->clock_khz *= np->multiplier; if (np->clock_khz != 40000) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = howmany(4 * div_10M[0], np->clock_khz); if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = howmany(period, 40); /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = 62; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) #ifdef __LP64__ np->rv_ccntl1 |= (XTIMOD | EXTIBMV); #else np->rv_ccntl1 |= (DDAC); #endif /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010 Errata. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x2) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((np->device_id == PCI_ID_SYM53C810 && np->revision_id >= 0x10 && np->revision_id <= 0x11) || (np->device_id == PCI_ID_SYM53C860 && np->revision_id <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ if (SYM_SETUP_PCI_PARITY) np->rv_ctest4 |= MPEE; /* Master parity checking */ if (SYM_SETUP_SCSI_PARITY) np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; sym_nvram_setup_host (np, nvram); #ifdef __sparc64__ np->myaddr = OF_getscsinitid(np->device); #endif /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the * current BUS mode through the STEST4 IO register. * - For previous generation chips (825/825A/875), * user has to tell us how to check against HVD, * since a 100% safe algorithm is not possible. */ np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && np->device_id == PCI_ID_SYM53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tcb_p tp = &np->target[i]; tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2; tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2; tp->tinfo.user.period = np->minsync; if (np->features & FE_ULTRA3) tp->tinfo.user.period = np->minsync_dt; tp->tinfo.user.offset = np->maxoffs; tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; sym_nvram_setup_target (np, i, nvram); /* * For now, guess PPR/DT support from the period * and BUS width. */ if (np->features & FE_ULTRA3) { if (tp->tinfo.user.period <= 9 && tp->tinfo.user.width == BUS_16_BIT) { tp->tinfo.user.options |= PPR_OPT_DT; tp->tinfo.user.offset = np->maxoffs_dt; tp->tinfo.user.spi_version = 3; } } if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ i = nvram->type; printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), i == SYM_SYMBIOS_NVRAM ? "Symbios" : (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose > 1) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* * Let user be aware of targets that have some disable flags set. */ sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); if (sym_verbose) sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, "SCAN FOR LUNS"); return 0; } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) { tcb_p tp = &np->target[cp->target]; int msglen = 0; /* * Early C1010 chips need a work-around for DT * data transfer to work. */ if (!(np->features & FE_U3EN)) tp->tinfo.goal.options = 0; /* * negotiate using PPR ? */ if (tp->tinfo.goal.options & PPR_OPT_MASK) nego = NS_PPR; /* * negotiate wide transfers ? */ else if (tp->tinfo.current.width != tp->tinfo.goal.width) nego = NS_WIDE; /* * negotiate synchronous transfers? */ else if (tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset) nego = NS_SYNC; switch (nego) { case NS_SYNC: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 3; msgptr[msglen++] = M_X_SYNC_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; break; case NS_WIDE: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 2; msgptr[msglen++] = M_X_WIDE_REQ; msgptr[msglen++] = tp->tinfo.goal.width; break; case NS_PPR: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 6; msgptr[msglen++] = M_X_PPR_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = 0; msgptr[msglen++] = tp->tinfo.goal.offset; msgptr[msglen++] = tp->tinfo.goal.width; msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; break; } cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); } } return msglen; } /* * Insert a job into the start queue. */ static void sym_put_start_queue(hcb_p np, ccb_p cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif /* * Insert first the idle task and then our job. * The MB should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_BARRIER(); OUTB (nc_istat, SIGP|np->istat_sem); } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (hcb_p np) { OUTB (nc_istat, SRST); UDELAY (10); OUTB (nc_istat, 0); UDELAY(2000); /* For BUS MODE to settle */ } /* * Soft reset the chip. * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (hcb_p np) { u_char istat; int i; OUTB (nc_istat, CABRT); for (i = 1000000 ; i ; --i) { istat = INB (nc_istat); if (istat & SIP) { INW (nc_sist); continue; } if (istat & DIP) { OUTB (nc_istat, 0); INB (nc_dstat); break; } } if (!i) printf("%s: unable to abort current chip operation.\n", sym_name(np)); sym_chip_reset (np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(hcb_p np) { (void) sym_reset_scsi_bus(np, 1); } static int sym_reset_scsi_bus(hcb_p np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW (nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB (nc_stest3, TE); OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); OUTB (nc_scntl1, CRST); UDELAY (200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!(np->features & FE_WIDE)) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB (nc_scntl1, 0); /* MDELAY(100); */ return retv; } /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On architectures that may reorder LOAD/STORE operations, * a memory barrier may be needed after the reading of the * so-called `flag' and prior to dealing with the data. */ static int sym_wakeup_done (hcb_p np) { ccb_p cp; int i, n; u32 dsa; SYM_LOCK_ASSERT(MA_OWNED); n = 0; i = np->dqueueget; while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (hcb_p np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ static void sym_init (hcb_p np, int reason) { int i; u32 phys; SYM_LOCK_ASSERT(MA_OWNED); /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(np); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); /* * Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ UDELAY (2000); /* The 895 needs time for the bus mode to settle */ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr); /* Id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB (nc_stest2, np->rv_stest2); else OUTB (nc_stest2, EXT|np->rv_stest2); OUTB (nc_stest3, TE); /* TolerANT enable */ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (np->device_id == PCI_ID_LSI53C1010_2) OUTB (nc_aipcntl1, DISAIP); /* * C10101 Errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1) OUTB (nc_stest1, INB(nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (np->device_id == PCI_ID_SYM53C875) OUTB (nc_ctest0, (1<<5)); else if (np->device_id == PCI_ID_SYM53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB (nc_ccntl0, np->rv_ccntl0); OUTB (nc_ccntl1, np->rv_ccntl1); } /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW (nc_sien, SBMC); if (reason == 0) { MDELAY(100); INW (nc_sist); } np->scsi_mode = INB (nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;itarget[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. */ if (np->ram_ba) { if (sym_verbose > 1) printf ("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); if (np->ram_ws == 8192) { OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz); OUTL (nc_mmws, np->scr_ram_seg); OUTL (nc_mmrs, np->scr_ram_seg); OUTL (nc_sfs, np->scr_ram_seg); phys = SCRIPTB_BA (np, start64); } else phys = SCRIPTA_BA (np, init); OUTRAM_OFF(0, np->scripta0, np->scripta_sz); } else phys = SCRIPTA_BA (np, init); np->istat_sem = 0; OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) xpt_async(AC_BUS_RESET, np->path, NULL); } /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak > 2) {fak = 2; ret = -1;} /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * Tell the SCSI layer about the new transfer parameters. */ static void sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid) { struct ccb_trans_settings cts; struct cam_path *path; int sts; tcb_p tp = &np->target[target]; sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target, CAM_LUN_WILDCARD); if (sts != CAM_REQ_CMP) return; bzero(&cts, sizeof(cts)); #define cts__scsi (cts.proto_specific.scsi) #define cts__spi (cts.xport_specific.spi) cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; cts.protocol_version = tp->tinfo.current.scsi_version; cts.transport_version = tp->tinfo.current.spi_version; cts__spi.valid = spi_valid; if (spi_valid & CTS_SPI_VALID_SYNC_RATE) cts__spi.sync_period = tp->tinfo.current.period; if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET) cts__spi.sync_offset = tp->tinfo.current.offset; if (spi_valid & CTS_SPI_VALID_BUS_WIDTH) cts__spi.bus_width = tp->tinfo.current.width; if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS) cts__spi.ppr_options = tp->tinfo.current.options; #undef cts__spi #undef cts__scsi xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, path, &cts); xpt_free_path(path); } #define SYM_SPI_VALID_WDTR \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_SDTR \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_PPR \ CTS_SPI_VALID_PPR_OPTIONS | \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.current.offset = 0; tp->tinfo.current.period = 0; tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; sym_settrans(np, cp, 0, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, dt, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = dt; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR); } /* * Switch trans mode for current job and it's target. */ static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; union ccb *ccb; tcb_p tp; u_char target = INB (nc_sdid) & 0x0f; u_char sval, wval, uval; assert (cp); if (!cp) return; ccb = cp->cam_ccb; assert (ccb); if (!ccb) return; assert (target == (cp->target & 0xf)); tp = &np->target[target]; sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (dt) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB (nc_stest2, EXT); /* * set actual value and sync_status */ OUTB (nc_sxfer, tp->head.sval); OUTB (nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB (nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sxfer: (see the manual) * scntl3: (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) { u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), (unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf ("%s: regdump:", sym_name(np)); for (i=0; i<24;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); /* * PCI BUS error, read the PCI ststus register. */ if (dstat & (MDPE|BF)) { u_short pci_sts; pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); if (pci_sts & 0xf900) { pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); printf("%s: PCI STATUS = 0x%04x\n", sym_name(np), pci_sts & 0xf900); } } } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When a parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ static void sym_intr1 (hcb_p np) { u_char istat, istatc; u_char dstat; u_short sist; SYM_LOCK_ASSERT(MA_OWNED); /* * interrupt on the fly ? * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * before the scanning of the DONE queue. */ istat = INB (nc_istat); if (istat & INTF) { OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat = INB (nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); (void)sym_wakeup_done (np); } if (!(istat & (SIP|DIP))) return; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB (nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW (nc_sist); if (istatc & DIP) dstat |= INB (nc_dstat); istatc = INB (nc_istat); istat |= istatc; } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); /* * On paper, a memory barrier may be needed here. * And since we are paranoid ... :) */ MEMORY_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir (np); else if (dstat & SSI) OUTONB_STD (); else goto unknown_int; return; } /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { xpt_print_path(np->path); printf("SCSI BUS reset detected.\n"); sym_init (np, 1); return; } OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc (np); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return; } /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(np, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return; } unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); } static void sym_intr(void *arg) { hcb_p np = arg; SYM_LOCK(); if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); sym_intr1((hcb_p) arg); if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); SYM_UNLOCK(); } static void sym_poll(struct cam_sim *sim) { sym_intr1(cam_sim_softc(sim)); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (hcb_p np, u_char hsts) { u32 dsp = INL (nc_dsp); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical paths, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && dsp < SCRIPTA_BA (np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA (np, ungetjob) && dsp < SCRIPTA_BA (np, reselect) + 1)) && (!(dsp > SCRIPTB_BA (np, sel_for_abort) && dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA (np, done) && dsp < SCRIPTA_BA (np, done_end) + 1))) { OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP (SCRIPTA_BA (np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL (nc_dsa, 0xffffff); OUTL_DSP (SCRIPTA_BA (np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (hcb_p np) { u32 dsp = INL (nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (hcb_p np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc (hcb_p np) { u_char scsi_mode = INB (nc_stest4) & SMODE; /* * Notify user. */ xpt_print_path(np->path); printf("SCSI BUS mode change from %s to %s.\n", sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_init (np, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (hcb_p np, u_short sist) { u_char hsts = INB (HS_PRT); u32 dsp = INL (nc_dsp); u32 dbc = INL (nc_dbc); u32 dsa = INL (nc_dsa); u_char sbcl = INB (nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; ccb_p cp = sym_ccb_from_dsa(np, dsa); printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB (nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA (np, pm_handle)) OUTL_DSP (dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { OUTL (nc_temp, dsp); OUTL_DSP (SCRIPTA_BA (np, dispatch)); } } else OUTL_DSP (SCRIPTA_BA (np, clrack)); return; reset_all: sym_start_reset(np); } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (hcb_p np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; ccb_p cp; dsp = INL (nc_dsp); dbc = INL (nc_dbc); dsa = INL (nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW (nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB (nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB (nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; } /* * Clear fifos. */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB (nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = 0; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); } if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; } if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); } /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { PRINT_ADDR(cp); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); goto reset_all; } /* * if old phase not dataphase, leave here. */ if (cmd & 2) { PRINT_ADDR(cp); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; } /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB (HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB (HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA (np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB (nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp); printf ("PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ OUTL (nc_temp, newcmd); OUTL_DSP (nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonnable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA (np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = M_IDENTIFY | cp->lun; nxtdsp = SCRIPTB_BA (np, ident_break_atn); } else nxtdsp = SCRIPTB_BA (np, ident_break); } else if (dsp == SCRIPTB_BA (np, send_wdtr) || dsp == SCRIPTB_BA (np, send_sdtr) || dsp == SCRIPTB_BA (np, send_ppr)) { nxtdsp = SCRIPTB_BA (np, nego_bad_phase); } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA (np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP (nxtdsp); return; } reset_all: sym_start_reset(np); } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with CAM_REQUEUE_REQ status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) { int j; ccb_p cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(hcb_p np, int cam_status) { SYM_QUEHEAD *qp; ccb_p cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; ccb = cp->cam_ccb; if (cam_status) sym_set_cam_status(ccb, cam_status); sym_freeze_cam_ccb(ccb); sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int nego; int i; SYM_LOCK_ASSERT(MA_OWNED); /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = NULL; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { PRINT_ADDR(cp); printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP (SCRIPTA_BA (np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ /* * identify message */ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; nego = 0; if (tp->tinfo.current.options & PPR_OPT_MASK) nego = NS_PPR; else if (tp->tinfo.current.width != BUS_8_BIT) nego = NS_WIDE; else if (tp->tinfo.current.offset != 0) nego = NS_SYNC; if (nego) msglen += sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = cp->lun << 5; if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7) cp->sensecmd[1] = 0; cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA (np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.goalp = cpu_to_scr(startp + 16); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ static int sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; ccb_p cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != NULL) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); ccb = cp->cam_ccb; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) sym_set_cam_status(ccb, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(hcb_p np, int num) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(nc_dsa, np->hcb_ba); OUTL_DSP (SCRIPTB_BA (np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB (nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); else sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { lcb_p lp = sym_lp(tp, lun); lp->to_clear = 0; /* We donnot expect to fail here */ np->abrt_msg[0] = M_IDENTIFY | lun; np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = M_IDENTIFY | cp->lun; /* * If we want to abort an untagged command, we * will send an IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * an IDENTIFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancellation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, target, lun, -1); (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make uper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) xpt_async(AC_SENT_BDR, np->path, NULL); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { PRINT_TARGET(np, target); sym_printl_hex("control msgout:", np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD (); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lignes for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA (np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA (np, pm1_data)) pm = &cp->phys.pm1; else pm = NULL; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size); } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->phys.head.goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(hcb_p np, ccb_p cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = INL (nc_temp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->phys.head.goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB (HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB (HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: OUTL (nc_temp, dp_scr); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ static int sym_compute_residual(hcb_p np, ccb_p cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->phys.head.goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } /* * Hopefully, the result is not too wrong. */ return resid; } /* * Print out the content of a SCSI message. */ static int sym_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==M_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); } return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); } return (1); } static void sym_print_msg (ccb_p cp, char *label, u_char *msg) { PRINT_ADDR(cp); if (label) printf ("%s: ", label); (void) sym_show_msg (msg); printf (".\n"); } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, div; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgin", np->msgin); } /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setsync (np, cp, ofs, per, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setsync (np, cp, ofs, per, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; reject_it: sym_setsync (np, cp, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, dt, div, wide; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgin", np->msgin); } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[5]; wide = np->msgin[6]; dt = np->msgin[7] & PPR_OPT_DT; /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * check values against our limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (!wide || !(np->features & FE_ULTRA3)) dt &= ~PPR_OPT_DT; if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ dt &= ~PPR_OPT_DT; if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; if (ofs) { if (dt) { if (ofs > np->maxoffs_dt) {chg = 1; ofs = np->maxoffs_dt;} } else if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (dt) { if (per < np->minsync_dt) {chg = 1; per = np->minsync_dt;} } else if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("ppr: " "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", dt, ofs, per, wide, div, fak, chg); } /* * It was an answer. */ if (req == 0) { if (chg) /* Answer wasn't acceptable */ goto reject_it; sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 6; np->msgout[2] = M_X_PPR_REQ; np->msgout[3] = per; np->msgout[4] = 0; np->msgout[5] = ofs; np->msgout[6] = wide; np->msgout[7] = dt; cp->nego_status = NS_PPR; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, ppr_resp)); return; reject_it: sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); /* * If it was a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !dt) { tp->tinfo.goal.options = 0; tp->tinfo.goal.width = wide; tp->tinfo.goal.period = per; tp->tinfo.goal.offset = ofs; } } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, wide; int req = 1; /* * Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgin", np->msgin); } /* * Is it a request from the device? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; wide = np->msgin[3]; /* * check values against driver limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("wdtr: wide=%d chg=%d.\n", wide, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setwide (np, cp, wide); /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tinfo.goal.offset) { np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = tp->tinfo.goal.period; np->msgout[4] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB (HS_PRT, HS_NEGOTIATE); OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; } OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request, set value and * prepare an answer message */ sym_setwide (np, cp, wide); np->msgout[0] = M_EXTENDED; np->msgout[1] = 2; np->msgout[2] = M_X_WIDE_REQ; np->msgout[3] = wide; np->msgin [0] = M_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgout", np->msgout); } OUTL_DSP (SCRIPTB_BA (np, wdtr_resp)); return; reject_it: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * Reset SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * If it was a PPR that made problems, we may want to * try a legacy negotiation later. */ static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) { /* * any error in negotiation: * fall back to default mode. */ switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); #else tp->tinfo.goal.options = 0; if (tp->tinfo.goal.period < np->minsync) tp->tinfo.goal.period = np->minsync; if (tp->tinfo.goal.offset > np->maxoffs) tp->tinfo.goal.offset = np->maxoffs; #endif break; case NS_SYNC: sym_setsync (np, cp, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp, 0); break; } np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * a WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) { sym_nego_default(np, tp, cp); OUTB (HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir (hcb_p np) { u_char num = INB (nc_dsps); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); u_char target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int tmp; SYM_LOCK_ASSERT(MA_OWNED); if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We donnot want to handle * that. */ case SIR_SEL_ATN_NO_MSG_OUT: printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", sym_name (np), target); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reseleted the initiator. */ case SIR_RESEL_NO_MSG_IN: printf ("%s:%d: No MSG IN phase after reselection.\n", sym_name (np), target); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: printf ("%s:%d: No IDENTIFY after reselection.\n", sym_name (np), target); goto out_stuck; /* * The device reselected a LUN we donnot know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we donnot * have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; printf ("%s:%d: message %x sent on bad reselection.\n", sym_name (np), target, np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB (HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to transfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL (nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"modify DP",np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"ign wide residue", np->msgin); sym_modify_dp(np, cp, -1); return; case M_REJECT: if (INB (HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { PRINT_ADDR(cp); printf ("M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP (SCRIPTB_BA (np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB (HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; } out: OUTONB_STD (); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); return; out_clrack: OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; ccb_p cp = (ccb_p) NULL; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) goto out; qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); /* * If the LCB is not yet available and the LUN * has been probed ok, try to allocate the LCB. */ if (!lp && sym_is_bit(tp->lun_map, ln)) { lp = sym_alloc_lcb(np, tn, ln); if (!lp) goto out_free; } /* * If the LCB is not available here, then the * logical unit is not yet discovered. For those * ones only accept 1 SCSI IO per logical unit, * since we cannot allow disconnections. */ if (!lp) { if (!sym_is_bit(tp->busy0_map, ln)) sym_set_bit(tp->busy0_map, ln); else goto out_free; } else { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ assert(lp->busy_itl == 0); /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); ++lp->busy_itlq; lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_tag)); } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ assert(lp->busy_itl == 0 && lp->busy_itlq == 0); /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ if (++lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); } else goto out_free; } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* * Remember all informations needed to free this CCB. */ cp->to_abort = 0; cp->tag = tag; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, tn, ln); printf ("ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return NULL; } /* * Release one control block */ static void sym_free_ccb(hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; lcb_p lp = sym_lp(tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, cp->target, cp->lun); printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); } /* * Otherwise, we only accept 1 IO per LUN. * Clear the bit that keeps track of this IO. */ else sym_clr_bit(tp->busy0_map, cp->lun); /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = NULL; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = NULL; #endif /* * Unmap user data from DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_unload(np->data_dmat, cp->dmamap); cp->dmamapped = 0; } /* * Make this CCB available. */ cp->cam_ccb = NULL; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); } /* * Allocate a CCB from memory and initialize its fixed part. */ static ccb_p sym_alloc_ccb(hcb_p np) { ccb_p cp = NULL; int hcode; SYM_LOCK_ASSERT(MA_NOTOWNED); /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return NULL; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) return NULL; /* * Allocate a bounce buffer for sense data. */ cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); if (!cp->sns_bbuf) goto out_free; /* * Allocate a map for the DMA of user data. */ if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) goto out_free; /* * Count it. */ np->actccbs++; /* * Initialize the callout. */ callout_init(&cp->ch, 1); /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialize the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return cp; out_free: if (cp->sns_bbuf) sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); return NULL; } /* * Look up a CCB from a DSA value. */ static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa) { int hcode; ccb_p cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Lun control block allocation and initialization. */ static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); /* * Already done, just return. */ if (lp) return lp; /* * Check against some race. */ assert(!sym_is_bit(tp->busy0_map, ln)); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), "LUNMP"); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(tp, ln); int i; /* * If LCB not available, try to allocate it. */ if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) return; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) return; lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = 0; return; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifndef SYM_CONF_IOMAPPED static int sym_regtest (hcb_p np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); } return (0); } #endif static int sym_snooptest (hcb_p np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err=0; #ifndef SYM_CONF_IOMAPPED err |= sym_regtest (np); if (err) return (err); #endif restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB (nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTB0_BA (np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->cache = cpu_to_scr(host_wr); OUTL (nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (pc); /* * Wait 'til done (with timeout) */ for (i=0; i=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); } /* * Check for fatal DMA errors. */ dstat = INB (nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL (nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->cache); sym_rd = INL (nc_scratcha); sym_bk = INL (nc_temp); /* * Check termination position. */ if (pc != SCRIPTB0_BA (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, (u_long) SCRIPTB0_BA (np, snoopend) +8); return (0x40); } /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; } if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; } if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; } return (err); } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * Select SCSI clock frequency */ static void sym_selectclock(hcb_p np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 20 micro-seconds. */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) UDELAY (20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else UDELAY (20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (hcb_p np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of 1<= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms, f); return f; } static unsigned sym_getfreq (hcb_p np) { u_int f1, f2; int gen = 11; (void) getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (hcb_p np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; /* * For the C10 core, assume 40 MHz. */ if (np->features & FE_C10) { np->multiplier = mult; np->clock_khz = 40000 * mult; return; } np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB (nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (hcb_p np) { int f = 0; /* * For the C1010-33, this doesn't work. * For the C1010-66, this will be tested when I'll have * such a beast to play with. */ if (!(np->features & FE_C10)) { OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = (int) sym_getfreq (np); OUTB (nc_stest1, 0); } np->pciclk_khz = f; return f; } /*============= DRIVER ACTION/COMPLETION ====================*/ /* * Print something that tells about extended errors. */ static void sym_print_xerr(ccb_p cp, int x_status) { if (x_status & XE_PARITY_ERR) { PRINT_ADDR(cp); printf ("unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { PRINT_ADDR(cp); printf ("extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { PRINT_ADDR(cp); printf ("illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA IN phase.\n"); } } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = CAM_UNCOR_PARITY; else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) cam_status = CAM_DATA_RUN_ERR; else if (x_status & XE_BAD_PHASE) cam_status = CAM_REQ_CMP_ERR; else cam_status = CAM_REQ_CMP_ERR; } return cam_status; } /* * Complete execution of a SCSI command with extented * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_complete_error (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; u_int cam_status; int i, sense_returned; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, cp->host_status, cp->ssss_status, cp->host_flags, cp->target, cp->lun); MDELAY(100); } /* * Get CAM command pointer. */ csio = &cp->cam_ccb->csio; /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cp, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ csio->sense_resid = 0; csio->resid = sym_compute_residual(np, cp); if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ csio->resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } if (cp->host_flags & HF_SENSE) { /* Auto sense */ csio->scsi_status = cp->sv_scsi_status; /* Restore status */ csio->sense_resid = csio->resid; /* Swap residuals */ csio->resid = cp->sv_resid; cp->sv_resid = 0; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cp, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, cp->sv_xerr_status); cam_status |= CAM_AUTOSNS_VALID; /* * Bounce back the sense data to user and * fix the residual. */ bzero(&csio->sense_data, sizeof(csio->sense_data)); sense_returned = SYM_SNS_BBUF_LEN - csio->sense_resid; if (sense_returned < csio->sense_len) csio->sense_resid = csio->sense_len - sense_returned; else csio->sense_resid = 0; bcopy(cp->sns_bbuf, &csio->sense_data, MIN(csio->sense_len, sense_returned)); #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) csio->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, CAM_REQ_ABORTED, cp->target,cp->lun, -1); } #endif } else cam_status = CAM_AUTOSENSE_FAIL; } else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ csio->scsi_status = cp->ssss_status; cam_status = CAM_SCSI_STATUS_ERROR; } else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = CAM_SEL_TIMEOUT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = CAM_UNEXP_BUSFREE; else { /* Extended error */ if (sym_verbose) { PRINT_ADDR(cp); printf ("COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } csio->scsi_status = cp->ssss_status; /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, cp->xerr_status); } /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP (SCRIPTA_BA (np, start)); /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Add this one to the COMP queue. * Complete all those commands with either error * or requeue condition. */ sym_set_cam_status((union ccb *) csio, cam_status); sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); sym_flush_comp_queue(np, 0); } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ static void sym_complete_ok (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; tcb_p tp; lcb_p lp; SYM_LOCK_ASSERT(MA_OWNED); /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; assert (cp->host_status == HS_COMPLETE); /* * Get command, target and lun pointers. */ csio = &cp->cam_ccb->csio; tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); /* * Assume device discovered on first success. */ if (!lp) sym_set_bit(tp->lun_map, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ csio->resid = 0; if (cp->phys.head.lastp != cp->phys.head.goalp) csio->resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature from * sym_conf.h. Residual support is enabled by default. */ if (!SYM_CONF_RESIDUAL_SUPPORT) csio->resid = 0; /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } /* * Set status and complete the command. */ csio->scsi_status = cp->ssss_status; sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); sym_xpt_done(np, (union ccb *) csio, cp); sym_free_ccb(np, cp); } /* * Our callout handler */ static void sym_callout(void *arg) { union ccb *ccb = (union ccb *) arg; hcb_p np = ccb->ccb_h.sym_hcb_ptr; /* * Check that the CAM CCB is still queued. */ if (!np) return; SYM_LOCK(); switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: (void) sym_abort_scsiio(np, ccb, 1); break; default: break; } SYM_UNLOCK(); } /* * Abort an SCSI IO. */ static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) { ccb_p cp; SYM_QUEHEAD *qp; SYM_LOCK_ASSERT(MA_OWNED); /* * Look up our CCB control block. */ cp = NULL; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cam_ccb == ccb) { cp = cp2; break; } } if (!cp || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; callout_reset(&cp->ch, 10 * hz, sym_callout, (caddr_t) ccb); /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); return 0; } /* * Reset a SCSI device (all LUNs of a target). */ static void sym_reset_dev(hcb_p np, union ccb *ccb) { tcb_p tp; struct ccb_hdr *ccb_h = &ccb->ccb_h; SYM_LOCK_ASSERT(MA_OWNED); if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } tp = &np->target[ccb_h->target_id]; tp->to_reset = 1; sym_xpt_done2(np, ccb, CAM_REQ_CMP); np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); } /* * SIM action entry point. */ static void sym_action(struct cam_sim *sim, union ccb *ccb) { hcb_p np; tcb_p tp; lcb_p lp; ccb_p cp; int tmp; u_char idmsg, *msgptr; u_int msglen; struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); /* * The common case is SCSI IO. * We deal with other ones elsewhere. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO) { sym_action2(sim, ccb); return; } csio = &ccb->csio; ccb_h = &csio->ccb_h; /* * Work around races. */ if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } /* * Minimal checkings, so that we will not * go outside our tables. */ if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } /* * Retrieve the target and lun descriptors. */ tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); /* * Complete the 1st INQUIRY command with error * condition if the device is flagged NOSCAN * at BOOT in the NVRAM. This may speed up * the boot and maintain coherency with BIOS * device numbering. Clearing the flag allows * user to rescan skipped devices later. * We also return error for devices not flagged * for SCAN LUNS in the NVRAM since some mono-lun * devices behave badly when asked for some non * zero LUN. Btw, this is an absolute hack.:-) */ if (!(ccb_h->flags & CAM_CDB_PHYS) && (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && ccb_h->target_lun != 0)) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } } /* * Get a control block for this IO. */ tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); if (!cp) { sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); return; } /* * Keep track of the IO in our CCB. */ cp->cam_ccb = ccb; /* * Build the IDENTIFY message. */ idmsg = M_IDENTIFY | cp->lun; if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) idmsg |= 0x40; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = csio->tag_action; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) */ cp->nego_status = 0; if (tp->tinfo.current.width != tp->tinfo.goal.width || tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset || tp->tinfo.current.options != tp->tinfo.goal.options) { if (!tp->nego_cp && lp) msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); } /* * Fill in our ccb */ /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * command */ if (sym_setup_cdb(np, csio, cp) < 0) { sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); return; } /* * status */ #if 0 /* Provision */ cp->actualquirks = tp->quirks; #endif cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the data descriptor block * and start the IO. */ sym_setup_data_and_start(np, csio, cp); } /* * Setup buffers and pointers that address the CDB. * I bet, physical CDBs will never be used on the planet, * since they can be bounced without significant overhead. */ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; u32 cmd_ba; int cmd_len; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * CDB is 16 bytes max. */ if (csio->cdb_len > sizeof(cp->cdb_buf)) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; } cmd_len = csio->cdb_len; if (ccb_h->flags & CAM_CDB_POINTER) { /* CDB is a pointer */ if (!(ccb_h->flags & CAM_CDB_PHYS)) { /* CDB pointer is virtual */ bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } else { /* CDB pointer is physical */ #if 0 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; #else sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; #endif } } else { /* CDB is in the CAM ccb (buffer) */ bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } cp->phys.cmd.addr = cpu_to_scr(cmd_ba); cp->phys.cmd.size = cpu_to_scr(cmd_len); return 0; } /* * Set up data pointers used by SCRIPTS. */ static void __inline sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) { u32 lastp, goalp; SYM_LOCK_ASSERT(MA_OWNED); /* * No segments means no data. */ if (!cp->segments) dir = CAM_DIR_NONE; /* * Set the data pointer. */ switch(dir) { case CAM_DIR_OUT: goalp = SCRIPTA_BA (np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_IN: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA (np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_NONE: default: lastp = goalp = SCRIPTB_BA (np, no_data); break; } cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.goalp = cpu_to_scr(goalp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; } /* * Call back routine for the DMA map service. * If bounce buffers are used (why ?), we may sleep and then * be called there in another context. */ static void sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) { ccb_p cp; hcb_p np; union ccb *ccb; cp = (ccb_p) arg; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; SYM_LOCK_ASSERT(MA_OWNED); /* * Deal with weird races. */ if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) goto out_abort; /* * Deal with weird errors. */ if (error) { cp->dmamapped = 0; sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); goto out_abort; } /* * Build the data descriptor for the chip. */ if (nsegs) { int retv; /* 896 rev 1 requires to be careful about boundaries */ if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); else retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); if (retv < 0) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); goto out_abort; } } /* * Synchronize the DMA map only if we have * actually mapped the data. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } /* * Set host status to busy state. * May have been set back to HS_WAIT to avoid a race. */ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; /* * Set data pointers. */ sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); /* * Enqueue this IO in our pending queue. */ sym_enqueue_cam_ccb(cp); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); MDELAY(10000); break; default: break; } #endif /* * Activate this job. */ sym_put_start_queue(np, cp); return; out_abort: sym_xpt_done(np, ccb, cp); sym_free_ccb(np, cp); } /* * How complex it gets to deal with the data in CAM. * The Bus Dma stuff makes things still more complex. */ static void sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; int dir, retv; SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &csio->ccb_h; /* * Now deal with the data. */ cp->data_len = csio->dxfer_len; cp->arg = np; /* * No direction means no data. */ dir = (ccb_h->flags & CAM_DIR_MASK); if (dir == CAM_DIR_NONE) { sym_execute_ccb(cp, NULL, 0, 0); return; } cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE; retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap, (union ccb *)csio, sym_execute_ccb, cp, 0); if (retv == EINPROGRESS) { cp->host_status = HS_WAIT; xpt_freeze_simq(np->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } } /* * Move the scatter list to our data block. */ static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { struct sym_tblmove *data; bus_dma_segment_t *psegs2; SYM_LOCK_ASSERT(MA_OWNED); if (nsegs > SYM_CONF_MAX_SG) return -1; data = &cp->phys.data[SYM_CONF_MAX_SG-1]; psegs2 = &psegs[nsegs-1]; cp->segments = nsegs; while (1) { data->addr = cpu_to_scr(psegs2->ds_addr); data->size = cpu_to_scr(psegs2->ds_len); if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), (long) psegs2->ds_addr, (long) psegs2->ds_len); } if (psegs2 != psegs) { --data; --psegs2; continue; } break; } return 0; } /* * Scatter a SG list with physical addresses into bus addressable chunks. */ static int sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { u_long ps, pe, pn; u_long k; int s, t; SYM_LOCK_ASSERT(MA_OWNED); s = SYM_CONF_MAX_SG - 1; t = nsegs - 1; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; while (s >= 0) { pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY); if (pn <= ps) pn = ps; k = pe - pn; if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), pn, k); } cp->phys.data[s].addr = cpu_to_scr(pn); cp->phys.data[s].size = cpu_to_scr(k); --s; if (pn == ps) { if (--t < 0) break; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; } else pe = pn; } cp->segments = SYM_CONF_MAX_SG - 1 - s; return t >= 0 ? -1 : 0; } /* * SIM action for non performance critical stuff. */ static void sym_action2(struct cam_sim *sim, union ccb *ccb) { union ccb *abort_ccb; struct ccb_hdr *ccb_h; struct ccb_pathinq *cpi; struct ccb_trans_settings *cts; struct sym_trans *tip; hcb_p np; tcb_p tp; lcb_p lp; u_char dflags; /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; /* * Update SPI transport settings in TARGET control block. * Update SCSI device settings in LUN control block. */ lp = sym_lp(tp, ccb_h->target_lun); if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { sym_update_trans(np, &tp->tinfo.goal, cts); if (lp) sym_update_dflags(np, &lp->current_flags, cts); } if (cts->type == CTS_TYPE_USER_SETTINGS) { sym_update_trans(np, &tp->tinfo.user, cts); if (lp) sym_update_dflags(np, &lp->user_flags, cts); } sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; lp = sym_lp(tp, ccb_h->target_lun); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tip = &tp->tinfo.current; dflags = lp ? lp->current_flags : 0; } else { tip = &tp->tinfo.user; dflags = lp ? lp->user_flags : tp->usrflags; } cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; cts->protocol_version = tip->scsi_version; cts->transport_version = tip->spi_version; cts__spi->sync_period = tip->period; cts__spi->sync_offset = tip->offset; cts__spi->bus_width = tip->width; cts__spi->ppr_options = tip->options; cts__spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (dflags & SYM_DISC_ENABLED) cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB; cts__spi->valid |= CTS_SPI_VALID_DISC; cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; if (dflags & SYM_TAGS_ENABLED) cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; cts__scsi->valid |= CTS_SCSI_VALID_TQ; #undef cts__spi #undef cts__scsi sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_PATH_INQ: cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_UNMAPPED; if (np->usrflags & SYM_SCAN_TARGETS_HILO) cpi->hba_misc |= PIM_SCANHILO; if (np->usrflags & SYM_AVOID_BUS_RESET) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; /* Semantic problem:)LUN number max = max number of LUNs - 1 */ cpi->max_lun = SYM_CONF_MAX_LUN-1; if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) cpi->max_lun = SYM_SETUP_MAX_LUN-1; cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = np->myaddr; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; if (np->features & FE_ULTRA3) { cpi->transport_version = 3; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; } cpi->maxio = SYM_CONF_MAX_SG * PAGE_SIZE; sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; case XPT_ABORT: abort_ccb = ccb->cab.abort_ccb; switch(abort_ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } default: sym_xpt_done2(np, ccb, CAM_UA_ABORT); break; } break; case XPT_RESET_DEV: sym_reset_dev(np, ccb); break; case XPT_RESET_BUS: sym_reset_scsi_bus(np, 0); if (sym_verbose) { xpt_print_path(np->path); printf("SCSI BUS reset delivered.\n"); } sym_init (np, 1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; - case XPT_ACCEPT_TARGET_IO: - case XPT_CONT_TARGET_IO: - case XPT_EN_LUN: - case XPT_NOTIFY_ACK: - case XPT_IMMED_NOTIFY: case XPT_TERM_IO: default: sym_xpt_done2(np, ccb, CAM_REQ_INVALID); break; } } /* * Asynchronous notification handler. */ static void sym_async(void *cb_arg, u32 code, struct cam_path *path, void *args __unused) { hcb_p np; struct cam_sim *sim; u_int tn; tcb_p tp; sim = (struct cam_sim *) cb_arg; np = (hcb_p) cam_sim_softc(sim); SYM_LOCK_ASSERT(MA_OWNED); switch (code) { case AC_LOST_DEVICE: tn = xpt_path_target_id(path); if (tn >= SYM_CONF_MAX_TARGET) break; tp = &np->target[tn]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = tp->tinfo.goal.period = 0; tp->tinfo.current.offset = tp->tinfo.goal.offset = 0; tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT; tp->tinfo.current.options = tp->tinfo.goal.options = 0; break; default: break; } } /* * Update transfer settings of a target. */ static void sym_update_trans(hcb_p np, struct sym_trans *tip, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); /* * Update the infos. */ #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tip->width = cts__spi->bus_width; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tip->offset = cts__spi->sync_offset; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tip->period = cts__spi->sync_period; if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0) tip->options = (cts__spi->ppr_options & PPR_OPT_DT); if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED && cts->protocol_version != PROTO_VERSION_UNKNOWN) tip->scsi_version = cts->protocol_version; if (cts->transport_version != XPORT_VERSION_UNSPECIFIED && cts->transport_version != XPORT_VERSION_UNKNOWN) tip->spi_version = cts->transport_version; #undef cts__spi /* * Scale against driver configuration limits. */ if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE; if (tip->period && tip->offset) { if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS; if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC; } else { tip->offset = 0; tip->period = 0; } /* * Scale against actual controller BUS width. */ if (tip->width > np->maxwide) tip->width = np->maxwide; /* * Only accept DT if controller supports and SYNC/WIDE asked. */ if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) || !(tip->width == BUS_16_BIT && tip->offset)) { tip->options &= ~PPR_OPT_DT; } /* * Scale period factor and offset against controller limits. */ if (tip->offset && tip->period) { if (tip->options & PPR_OPT_DT) { if (tip->period < np->minsync_dt) tip->period = np->minsync_dt; if (tip->period > np->maxsync_dt) tip->period = np->maxsync_dt; if (tip->offset > np->maxoffs_dt) tip->offset = np->maxoffs_dt; } else { if (tip->period < np->minsync) tip->period = np->minsync; if (tip->period > np->maxsync) tip->period = np->maxsync; if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; } } } /* * Update flags for a device (logical unit). */ static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) { SYM_LOCK_ASSERT(MA_OWNED); #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *flags |= SYM_DISC_ENABLED; else *flags &= ~SYM_DISC_ENABLED; } if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *flags |= SYM_TAGS_ENABLED; else *flags &= ~SYM_TAGS_ENABLED; } #undef cts__spi #undef cts__scsi } /*============= DRIVER INITIALISATION ==================*/ static device_method_t sym_pci_methods[] = { DEVMETHOD(device_probe, sym_pci_probe), DEVMETHOD(device_attach, sym_pci_attach), DEVMETHOD_END }; static driver_t sym_pci_driver = { "sym", sym_pci_methods, 1 /* no softc */ }; static devclass_t sym_devclass; DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, NULL, NULL); MODULE_DEPEND(sym, cam, 1, 1, 1); MODULE_DEPEND(sym, pci, 1, 1, 1); static const struct sym_pci_chip sym_pci_dev_table[] = { {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ static const struct sym_pci_chip * sym_find_pci_chip(device_t dev) { const struct sym_pci_chip *chip; int i; u_short device_id; u_char revision; if (pci_get_vendor(dev) != PCI_VENDOR_NCR) return NULL; device_id = pci_get_device(dev); revision = pci_get_revid(dev); for (i = 0; i < nitems(sym_pci_dev_table); i++) { chip = &sym_pci_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return NULL; } /* * Tell upper layer if the chip is supported. */ static int sym_pci_probe(device_t dev) { const struct sym_pci_chip *chip; chip = sym_find_pci_chip(dev); if (chip && sym_find_firmware(chip)) { device_set_desc(dev, chip->name); return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? BUS_PROBE_LOW_PRIORITY : BUS_PROBE_DEFAULT; } return ENXIO; } /* * Attach a sym53c8xx device. */ static int sym_pci_attach(device_t dev) { const struct sym_pci_chip *chip; u_short command; u_char cachelnsz; struct sym_hcb *np = NULL; struct sym_nvram nvram; const struct sym_fw *fw = NULL; int i; bus_dma_tag_t bus_dmat; bus_dmat = bus_get_dma_tag(dev); /* * Only probed devices should be attached. * We just enjoy being paranoid. :) */ chip = sym_find_pci_chip(dev); if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) return (ENXIO); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); if (np) np->bus_dmat = bus_dmat; else return (ENXIO); device_set_softc(dev, np); SYM_LOCK_INIT(); /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = bootverbose; np->device = dev; np->device_id = pci_get_device(dev); np->revision_id = pci_get_revid(dev); np->features = chip->features; np->clock_divn = chip->nr_divisor; np->maxoffs = chip->offset_max; np->maxburst = chip->burst_max; np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; #ifdef __amd64__ np->target = sym_calloc_dma(SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); if (!np->target) goto attach_failed; #endif /* * Initialize the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); sym_que_init(&np->cam_ccbq); /* * Allocate a tag for the DMA of user data. */ if (bus_dma_tag_create(np->bus_dmat, 1, SYM_CONF_DMA_BOUNDARY, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, SYM_CONF_MAX_SG, SYM_CONF_DMA_BOUNDARY, 0, busdma_lock_mutex, &np->mtx, &np->data_dmat)) { device_printf(dev, "failed to create DMA tag.\n"); goto attach_failed; } /* * Read and apply some fix-ups to the PCI COMMAND * register. We want the chip to be enabled for: * - BUS mastering * - PCI parity checking (reporting would also be fine) * - Write And Invalidate. */ command = pci_read_config(dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, command, 2); /* * Let the device know about the cache line size, * if it doesn't yet. */ cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (!cachelnsz) { cachelnsz = 8; pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); } /* * Alloc/get/map/retrieve everything that deals with MMIO. */ i = SYM_PCI_MMIO; np->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (!np->mmio_res) { device_printf(dev, "failed to allocate MMIO resources\n"); goto attach_failed; } np->mmio_ba = rman_get_start(np->mmio_res); /* * Allocate the IRQ. */ i = 0; np->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (!np->irq_res) { device_printf(dev, "failed to allocate IRQ resource\n"); goto attach_failed; } #ifdef SYM_CONF_IOMAPPED /* * User want us to use normal IO with PCI. * Alloc/get/map/retrieve everything that deals with IO. */ i = SYM_PCI_IO; np->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (!np->io_res) { device_printf(dev, "failed to allocate IO resources\n"); goto attach_failed; } #endif /* SYM_CONF_IOMAPPED */ /* * If the chip has RAM. * Alloc/get/map/retrieve the corresponding resources. */ if (np->features & (FE_RAM|FE_RAM8K)) { int regs_id = SYM_PCI_RAM; if (np->features & FE_64BIT) regs_id = SYM_PCI_RAM64; np->ram_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, ®s_id, RF_ACTIVE); if (!np->ram_res) { device_printf(dev,"failed to allocate RAM resources\n"); goto attach_failed; } np->ram_id = regs_id; np->ram_ba = rman_get_start(np->ram_res); } /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset (np); /* * Try to read the user set-up. */ (void) sym_read_nvram(np, &nvram); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ (void) sym_prepare_setting(np, &nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000) device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); /* * Allocate the start queue. */ np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); if (!np->scripta0 || !np->scriptb0) goto attach_failed; /* * Allocate the CCBs. We need at least ONE. */ for (i = 0; sym_alloc_ccb(np) != NULL; i++) ; if (i < 1) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptb0_ba = np->scriptb_ba; if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->ram_ws = 8192; np->scriptb_ba = np->scripta_ba + 4096; #ifdef __LP64__ np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } else np->ram_ws = 4096; } /* * Copy scripts to controller instance. */ bcopy(fw->a_base, np->scripta0, np->scripta_sz); bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); goto attach_failed; } /* * Now deal with CAM. * Hopefully, we will succeed with that one.:) */ if (!sym_cam_attach(np)) goto attach_failed; /* * Sigh! we are done. */ return 0; /* * We have failed. * We will try to free all the resources we have * allocated, but if we are a boot device, this * will not help that much.;) */ attach_failed: if (np) sym_pci_free(np); return ENXIO; } /* * Free everything that have been allocated for this device. */ static void sym_pci_free(hcb_p np) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; lcb_p lp; int target, lun; /* * First free CAM resources. */ sym_cam_free(np); /* * Now every should be quiet for us to * free other resources. */ if (np->ram_res) bus_release_resource(np->device, SYS_RES_MEMORY, np->ram_id, np->ram_res); if (np->mmio_res) bus_release_resource(np->device, SYS_RES_MEMORY, SYM_PCI_MMIO, np->mmio_res); if (np->io_res) bus_release_resource(np->device, SYS_RES_IOPORT, SYM_PCI_IO, np->io_res); if (np->irq_res) bus_release_resource(np->device, SYS_RES_IRQ, 0, np->irq_res); if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); bus_dmamap_destroy(np->data_dmat, cp->dmamap); sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { lp = sym_lp(tp, lun); if (!lp) continue; if (lp->itlq_tbl) sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (lp->cb_tags) sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, "CB_TAGS"); sym_mfree_dma(lp, sizeof(*lp), "LCB"); } #if SYM_CONF_MAX_LUN > 1 if (tp->lunmp) sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), "LUNMP"); #endif } #ifdef __amd64__ if (np->target) sym_mfree_dma(np->target, SYM_CONF_MAX_TARGET * sizeof(*(np->target)), "TARGET"); #endif if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); if (np->data_dmat) bus_dma_tag_destroy(np->data_dmat); if (SYM_LOCK_INITIALIZED() != 0) SYM_LOCK_DESTROY(); device_set_softc(np->device, NULL); sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Allocate CAM resources and register a bus to CAM. */ static int sym_cam_attach(hcb_p np) { struct cam_devq *devq = NULL; struct cam_sim *sim = NULL; struct cam_path *path = NULL; int err; /* * Establish our interrupt handler. */ err = bus_setup_intr(np->device, np->irq_res, INTR_ENTROPY | INTR_MPSAFE | INTR_TYPE_CAM, NULL, sym_intr, np, &np->intr); if (err) { device_printf(np->device, "bus_setup_intr() failed: %d\n", err); goto fail; } /* * Create the device queue for our sym SIM. */ devq = cam_simq_alloc(SYM_CONF_MAX_START); if (!devq) goto fail; /* * Construct our SIM entry. */ sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, device_get_unit(np->device), &np->mtx, 1, SYM_SETUP_MAX_TAG, devq); if (!sim) goto fail; SYM_LOCK(); if (xpt_bus_register(sim, np->device, 0) != CAM_SUCCESS) goto fail; np->sim = sim; sim = NULL; if (xpt_create_path(&path, NULL, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { goto fail; } np->path = path; /* * Establish our async notification handler. */ if (xpt_register_async(AC_LOST_DEVICE, sym_async, np->sim, path) != CAM_REQ_CMP) goto fail; /* * Start the chip now, without resetting the BUS, since * it seems that this must stay under control of CAM. * With LVD/SE capable chips and BUS in SE mode, we may * get a spurious SMBC interrupt. */ sym_init (np, 0); SYM_UNLOCK(); return 1; fail: if (sim) cam_sim_free(sim, FALSE); if (devq) cam_simq_free(devq); SYM_UNLOCK(); sym_cam_free(np); return 0; } /* * Free everything that deals with CAM. */ static void sym_cam_free(hcb_p np) { SYM_LOCK_ASSERT(MA_NOTOWNED); if (np->intr) { bus_teardown_intr(np->device, np->irq_res, np->intr); np->intr = NULL; } SYM_LOCK(); if (np->sim) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/ TRUE); np->sim = NULL; } if (np->path) { xpt_free_path(np->path); np->path = NULL; } SYM_UNLOCK(); } /*============ OPTIONNAL NVRAM SUPPORT =================*/ /* * Get host setup from NVRAM. */ static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get parity checking, host ID, verbose mode * and miscellaneous host flags from NVRAM. */ switch(nvram->type) { case SYM_SYMBIOS_NVRAM: if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) np->rv_scntl0 &= ~0x0a; np->myaddr = nvram->data.Symbios.host_id & 0x0f; if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) np->verbose += 1; if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) np->usrflags |= SYM_SCAN_TARGETS_HILO; if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) np->usrflags |= SYM_AVOID_BUS_RESET; break; case SYM_TEKRAM_NVRAM: np->myaddr = nvram->data.Tekram.host_id & 0x0f; break; default: break; } #endif } /* * Get target setup from NVRAM. */ #ifdef SYM_CONF_NVRAM_SUPPORT static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); #endif static void sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT switch(nvp->type) { case SYM_SYMBIOS_NVRAM: sym_Symbios_setup_target (np, target, &nvp->data.Symbios); break; case SYM_TEKRAM_NVRAM: sym_Tekram_setup_target (np, target, &nvp->data.Tekram); break; default: break; } #endif } #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get target set-up from Symbios format NVRAM. */ static void sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) { tcb_p tp = &np->target[target]; Symbios_target *tn = &nvram->target[target]; tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; tp->usrtags = (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) tp->usrflags &= ~SYM_DISC_ENABLED; if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) tp->usrflags |= SYM_SCAN_BOOT_DISABLED; if (!(tn->flags & SYMBIOS_SCAN_LUNS)) tp->usrflags |= SYM_SCAN_LUNS_DISABLED; } /* * Get target set-up from Tekram format NVRAM. */ static void sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) { tcb_p tp = &np->target[target]; struct Tekram_target *tn = &nvram->target[target]; int i; if (tn->flags & TEKRAM_SYNC_NEGO) { i = tn->sync_index & 0xf; tp->tinfo.user.period = Tekram_sync[i]; } tp->tinfo.user.width = (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; if (tn->flags & TEKRAM_TAGGED_COMMANDS) { tp->usrtags = 2 << nvram->max_tags_index; } if (tn->flags & TEKRAM_DISCONNECT_ENABLE) tp->usrflags |= SYM_DISC_ENABLED; /* If any device does not support parity, we will not use this option */ if (!(tn->flags & TEKRAM_PARITY_CHECK)) np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ } #ifdef SYM_CONF_DEBUG_NVRAM /* * Dump Symbios format NVRAM for debugging purpose. */ static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) { int i; /* display Symbios nvram host data */ printf("%s: HOST ID=%d%s%s%s%s%s%s\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); /* display Symbios nvram drive data */ for (i = 0 ; i < 15 ; i++) { struct Symbios_target *tn = &nvram->target[i]; printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", sym_name(np), i, (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", tn->bus_width, tn->sync_period / 4, tn->timeout); } } /* * Dump TEKRAM format NVRAM for debugging purpose. */ static const u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) { int i, tags, boot_delay; char *rem; /* display Tekram nvram host data */ tags = 2 << nvram->max_tags_index; boot_delay = 0; if (nvram->boot_delay_index < 6) boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { default: case 0: rem = ""; break; case 1: rem = " REMOVABLE=boot device"; break; case 2: rem = " REMOVABLE=all"; break; } printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", rem, boot_delay, tags); /* display Tekram nvram drive data */ for (i = 0; i <= 15; i++) { int sync, j; struct Tekram_target *tn = &nvram->target[i]; j = tn->sync_index & 0xf; sync = Tekram_sync[j]; printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", sym_name(np), i, (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & TEKRAM_START_CMD) ? " START" : "", (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", sync); } } #endif /* SYM_CONF_DEBUG_NVRAM */ #endif /* SYM_CONF_NVRAM_SUPPORT */ /* * Try reading Symbios or Tekram NVRAM */ #ifdef SYM_CONF_NVRAM_SUPPORT static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); #endif static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Try to read SYMBIOS nvram. * Try to read TEKRAM nvram if Symbios nvram not found. */ if (SYM_SETUP_SYMBIOS_NVRAM && !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) { nvp->type = SYM_SYMBIOS_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Symbios_nvram(np, &nvp->data.Symbios); #endif } else if (SYM_SETUP_TEKRAM_NVRAM && !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) { nvp->type = SYM_TEKRAM_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Tekram_nvram(np, &nvp->data.Tekram); #endif } else nvp->type = 0; #else nvp->type = 0; #endif return nvp->type; } #ifdef SYM_CONF_NVRAM_SUPPORT /* * 24C16 EEPROM reading. * * GPOI0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ #define SET_BIT 0 #define CLR_BIT 1 #define SET_CLK 2 #define CLR_CLK 3 /* * Set/clear data/clock bit in GPIO0 */ static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, int bit_mode) { UDELAY (5); switch (bit_mode){ case SET_BIT: *gpreg |= write_bit; break; case CLR_BIT: *gpreg &= 0xfe; break; case SET_CLK: *gpreg |= 0x02; break; case CLR_CLK: *gpreg &= 0xfd; break; } OUTB (nc_gpreg, *gpreg); UDELAY (5); } /* * Send START condition to NVRAM to wake it up. */ static void S24C16_start(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 1, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); S24C16_set_bit(np, 0, gpreg, CLR_CLK); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! */ static void S24C16_stop(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 1, gpreg, SET_BIT); } /* * Read or write a bit to the NVRAM, * read if GPIO0 input else write if GPIO0 output */ static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, u_char *gpreg) { S24C16_set_bit(np, write_bit, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); if (read_bit) *read_bit = INB (nc_gpreg); S24C16_set_bit(np, 0, gpreg, CLR_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); } /* * Output an ACK to the NVRAM after reading, * change GPIO0 to output and when done back to an input */ static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl & 0xfe); S24C16_do_bit(np, 0, write_bit, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * Input an ACK from NVRAM after writing, * change GPIO0 to input and when done back to an output */ static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl | 0x01); S24C16_do_bit(np, read_bit, 1, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, * GPIO0 must already be set as an output */ static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl) { int x; for (x = 0; x < 8; x++) S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); S24C16_read_ack(np, ack_data, gpreg, gpcntl); } /* * READ a byte from the NVRAM and then send an ACK to say we have got it, * GPIO0 must already be set as an input */ static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl) { int x; u_char read_bit; *read_data = 0; for (x = 0; x < 8; x++) { S24C16_do_bit(np, &read_bit, 1, gpreg); *read_data |= ((read_bit & 0x01) << (7 - x)); } S24C16_write_ack(np, ack_data, gpreg, gpcntl); } /* * Read 'len' bytes starting at 'offset'. */ static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int retv = 1; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB (nc_gpreg, old_gpreg); OUTB (nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* activate NVRAM */ S24C16_start(np, &gpreg); /* write device code and random address MSB */ S24C16_write_byte(np, &ack_data, 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* write random address LSB */ S24C16_write_byte(np, &ack_data, offset & 0xff, &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* regenerate START state to set up for reading */ S24C16_start(np, &gpreg); /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ S24C16_write_byte(np, &ack_data, 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* now set up GPIO0 for inputting data */ gpcntl |= 0x01; OUTB (nc_gpcntl, gpcntl); /* input all requested data - only part of total NVRAM */ for (x = 0; x < len; x++) S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); /* finally put NVRAM back in inactive mode */ gpcntl &= 0xfe; OUTB (nc_gpcntl, gpcntl); S24C16_stop(np, &gpreg); retv = 0; out: /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } #undef SET_BIT /* 0 */ #undef CLR_BIT /* 1 */ #undef SET_CLK /* 2 */ #undef CLR_CLK /* 3 */ /* * Try reading Symbios NVRAM. * Return 0 if OK. */ static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) { static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; /* probe the 24c16 and read the SYMBIOS 24c16 area */ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) return 1; /* check valid NVRAM signature, verify byte count and checksum */ if (nvram->type != 0 || bcmp(nvram->trailer, Symbios_trailer, 6) || nvram->byte_count != len - 12) return 1; /* verify checksum */ for (x = 6, csum = 0; x < len - 6; x++) csum += data[x]; if (csum != nvram->checksum) return 1; return 0; } /* * 93C46 EEPROM reading. * * GPOI0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select * * Used by Tekram. */ /* * Pulse clock bit in GPIO0 */ static void T93C46_Clk(hcb_p np, u_char *gpreg) { OUTB (nc_gpreg, *gpreg | 0x04); UDELAY (2); OUTB (nc_gpreg, *gpreg); } /* * Read bit from NVRAM */ static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) { UDELAY (2); T93C46_Clk(np, gpreg); *read_bit = INB (nc_gpreg); } /* * Write bit to GPIO0 */ static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) { if (write_bit & 0x01) *gpreg |= 0x02; else *gpreg &= 0xfd; *gpreg |= 0x10; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! */ static void T93C46_Stop(hcb_p np, u_char *gpreg) { *gpreg &= 0xef; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send read command and address to NVRAM */ static void T93C46_Send_Command(hcb_p np, u_short write_data, u_char *read_bit, u_char *gpreg) { int x; /* send 9 bits, start bit (1), command (2), address (6) */ for (x = 0; x < 9; x++) T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); *read_bit = INB (nc_gpreg); } /* * READ 2 bytes from the NVRAM */ static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) { int x; u_char read_bit; *nvram_data = 0; for (x = 0; x < 16; x++) { T93C46_Read_Bit(np, &read_bit, gpreg); if (read_bit & 0x01) *nvram_data |= (0x01 << (15 - x)); else *nvram_data &= ~(0x01 << (15 - x)); } } /* * Read Tekram NvRAM data. */ static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) { u_char read_bit; int x; for (x = 0; x < len; x++) { /* output read command and address */ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); if (read_bit & 0x01) return 1; /* Bad */ T93C46_Read_Word(np, &data[x], gpreg); T93C46_Stop(np, gpreg); } return 0; } /* * Try reading 93C46 Tekram NVRAM. */ static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; int retv = 1; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 1/2/4 out */ gpreg = old_gpreg & 0xe9; OUTB (nc_gpreg, gpreg); gpcntl = (old_gpcntl & 0xe9) | 0x09; OUTB (nc_gpcntl, gpcntl); /* input all of NVRAM, 64 words */ retv = T93C46_Read_Data(np, (u_short *) nvram, sizeof(*nvram) / sizeof(short), &gpreg); /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } /* * Try reading Tekram NVRAM. * Return 0 if OK. */ static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) { u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; switch (np->device_id) { case PCI_ID_SYM53C885: case PCI_ID_SYM53C895: case PCI_ID_SYM53C896: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); break; case PCI_ID_SYM53C875: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); if (!x) break; default: x = sym_read_T93C46_nvram(np, nvram); break; } if (x) return 1; /* verify checksum */ for (x = 0, csum = 0; x < len - 1; x += 2) csum += data[x] + (data[x+1] << 8); if (csum != 0x1234) return 1; return 0; } #endif /* SYM_CONF_NVRAM_SUPPORT */ Index: stable/11/sys/dev/trm/trm.c =================================================================== --- stable/11/sys/dev/trm/trm.c (revision 314380) +++ stable/11/sys/dev/trm/trm.c (revision 314381) @@ -1,3703 +1,3559 @@ /* * O.S : FreeBSD CAM * FILE NAME : trm.c * BY : C.L. Huang (ching@tekram.com.tw) * Erich Chen (erich@tekram.com.tw) * Description: Device Driver for Tekram SCSI adapters * DC395U/UW/F ,DC315/U(TRM-S1040) * DC395U2D/U2W(TRM-S2080) * PCI SCSI Bus Master Host Adapter * (SCSI chip set used Tekram ASIC TRM-S1040,TRM-S2080) */ #include __FBSDID("$FreeBSD$"); /* * HISTORY: * * REV# DATE NAME DESCRIPTION * 1.05 05/01/1999 ERICH CHEN First released for 3.x.x (CAM) * 1.06 07/29/1999 ERICH CHEN Modify for NEW PCI * 1.07 12/12/1999 ERICH CHEN Modify for 3.3.x ,DCB no free * 1.08 06/12/2000 ERICH CHEN Modify for 4.x.x * 1.09 11/03/2000 ERICH CHEN Modify for 4.1.R ,new sim * 1.10 10/10/2001 Oscar Feng Fixed CAM rescan hang up bug. * 1.11 10/13/2001 Oscar Feng Fixed wrong Async speed display bug. */ /*- * (C)Copyright 1995-2001 Tekram Technology Co.,Ltd. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * Imported into FreeBSD source repository, and updated to compile under * FreeBSD-3.0-DEVELOPMENT, by Stefan Esser , 1996-12-17 */ /* * Updated to compile under FreeBSD 5.0-CURRENT by Olivier Houchard * , 2002-03-04 */ #include #include #include #include #if __FreeBSD_version >= 500000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define trm_reg_read8(reg) bus_space_read_1(pACB->tag, pACB->bsh, reg) #define trm_reg_read16(reg) bus_space_read_2(pACB->tag, pACB->bsh, reg) #define trm_reg_read32(reg) bus_space_read_4(pACB->tag, pACB->bsh, reg) #define trm_reg_write8(value,reg) bus_space_write_1(pACB->tag, pACB->bsh,\ reg, value) #define trm_reg_write16(value,reg) bus_space_write_2(pACB->tag, pACB->bsh,\ reg, value) #define trm_reg_write32(value,reg) bus_space_write_4(pACB->tag, pACB->bsh,\ reg, value) #define PCI_Vendor_ID_TEKRAM 0x1DE1 #define PCI_Device_ID_TRM_S1040 0x0391 #define PCI_DEVICEID_TRMS1040 0x03911DE1 #define PCI_DEVICEID_TRMS2080 0x03921DE1 #ifdef trm_DEBUG1 #define TRM_DPRINTF(fmt, arg...) printf("trm: " fmt, ##arg) #else #define TRM_DPRINTF(fmt, arg...) {} #endif /* TRM_DEBUG */ static void trm_check_eeprom(PNVRAMTYPE pEEpromBuf,PACB pACB); static void NVRAM_trm_read_all(PNVRAMTYPE pEEpromBuf,PACB pACB); static u_int8_t NVRAM_trm_get_data(PACB pACB, u_int8_t bAddr); static void NVRAM_trm_write_all(PNVRAMTYPE pEEpromBuf,PACB pACB); static void NVRAM_trm_set_data(PACB pACB, u_int8_t bAddr, u_int8_t bData); static void NVRAM_trm_write_cmd(PACB pACB, u_int8_t bCmd, u_int8_t bAddr); static void NVRAM_trm_wait_30us(PACB pACB); static void trm_Interrupt(void *vpACB); static void trm_DataOutPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataInPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_CommandPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_StatusPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgOutPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgInPhase0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataOutPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_DataInPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_CommandPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_StatusPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgOutPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_MsgInPhase1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_Nop0(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_Nop1(PACB pACB, PSRB pSRB, u_int16_t * pscsi_status); static void trm_SetXferRate(PACB pACB, PSRB pSRB,PDCB pDCB); static void trm_DataIO_transfer(PACB pACB, PSRB pSRB, u_int16_t ioDir); static void trm_Disconnect(PACB pACB); static void trm_Reselect(PACB pACB); static void trm_SRBdone(PACB pACB, PDCB pDCB, PSRB pSRB); static void trm_DoingSRB_Done(PACB pACB); static void trm_ScsiRstDetect(PACB pACB); static void trm_ResetSCSIBus(PACB pACB); static void trm_RequestSense(PACB pACB, PDCB pDCB, PSRB pSRB); static void trm_EnableMsgOutAbort2(PACB pACB, PSRB pSRB); static void trm_EnableMsgOutAbort1(PACB pACB, PSRB pSRB); static void trm_SendSRB(PACB pACB, PSRB pSRB); static int trm_probe(device_t tag); static int trm_attach(device_t tag); static void trm_reset(PACB pACB); static u_int16_t trm_StartSCSI(PACB pACB, PDCB pDCB, PSRB pSRB); static int trm_initAdapter(PACB pACB, u_int16_t unit); static void trm_initDCB(PACB pACB, PDCB pDCB, u_int16_t unit, u_int32_t i, u_int32_t j); static int trm_initSRB(PACB pACB); static void trm_initACB(PACB pACB, u_int8_t adaptType, u_int16_t unit); /* CAM SIM entry points */ #define ccb_trmsrb_ptr spriv_ptr0 #define ccb_trmacb_ptr spriv_ptr1 static void trm_action(struct cam_sim *psim, union ccb *pccb); static void trm_poll(struct cam_sim *psim); static void * trm_SCSI_phase0[] = { trm_DataOutPhase0, /* phase:0 */ trm_DataInPhase0, /* phase:1 */ trm_CommandPhase0, /* phase:2 */ trm_StatusPhase0, /* phase:3 */ trm_Nop0, /* phase:4 */ trm_Nop1, /* phase:5 */ trm_MsgOutPhase0, /* phase:6 */ trm_MsgInPhase0, /* phase:7 */ }; /* * * stateV = (void *) trm_SCSI_phase1[phase] * */ static void * trm_SCSI_phase1[] = { trm_DataOutPhase1, /* phase:0 */ trm_DataInPhase1, /* phase:1 */ trm_CommandPhase1, /* phase:2 */ trm_StatusPhase1, /* phase:3 */ trm_Nop0, /* phase:4 */ trm_Nop1, /* phase:5 */ trm_MsgOutPhase1, /* phase:6 */ trm_MsgInPhase1, /* phase:7 */ }; NVRAMTYPE trm_eepromBuf[TRM_MAX_ADAPTER_NUM]; /* *Fast20: 000 50ns, 20.0 Mbytes/s * 001 75ns, 13.3 Mbytes/s * 010 100ns, 10.0 Mbytes/s * 011 125ns, 8.0 Mbytes/s * 100 150ns, 6.6 Mbytes/s * 101 175ns, 5.7 Mbytes/s * 110 200ns, 5.0 Mbytes/s * 111 250ns, 4.0 Mbytes/s * *Fast40: 000 25ns, 40.0 Mbytes/s * 001 50ns, 20.0 Mbytes/s * 010 75ns, 13.3 Mbytes/s * 011 100ns, 10.0 Mbytes/s * 100 125ns, 8.0 Mbytes/s * 101 150ns, 6.6 Mbytes/s * 110 175ns, 5.7 Mbytes/s * 111 200ns, 5.0 Mbytes/s */ /* real period: */ u_int8_t dc395x_clock_period[] = { 12,/* 48 ns 20 MB/sec */ 18,/* 72 ns 13.3 MB/sec */ 25,/* 100 ns 10.0 MB/sec */ 31,/* 124 ns 8.0 MB/sec */ 37,/* 148 ns 6.6 MB/sec */ 43,/* 172 ns 5.7 MB/sec */ 50,/* 200 ns 5.0 MB/sec */ 62 /* 248 ns 4.0 MB/sec */ }; u_int8_t dc395u2x_clock_period[]={ 10,/* 25 ns 40.0 MB/sec */ 12,/* 48 ns 20.0 MB/sec */ 18,/* 72 ns 13.3 MB/sec */ 25,/* 100 ns 10.0 MB/sec */ 31,/* 124 ns 8.0 MB/sec */ 37,/* 148 ns 6.6 MB/sec */ 43,/* 172 ns 5.7 MB/sec */ 50,/* 200 ns 5.0 MB/sec */ }; #define dc395x_tinfo_period dc395x_clock_period #define dc395u2x_tinfo_period dc395u2x_clock_period static PSRB trm_GetSRB(PACB pACB) { int intflag; PSRB pSRB; intflag = splcam(); pSRB = pACB->pFreeSRB; if (pSRB) { pACB->pFreeSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; } splx(intflag); return (pSRB); } static void trm_RewaitSRB0(PDCB pDCB, PSRB pSRB) { PSRB psrb1; int intflag; intflag = splcam(); if ((psrb1 = pDCB->pWaitingSRB)) { pSRB->pNextSRB = psrb1; pDCB->pWaitingSRB = pSRB; } else { pSRB->pNextSRB = NULL; pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } splx(intflag); } static void trm_RewaitSRB(PDCB pDCB, PSRB pSRB) { PSRB psrb1; int intflag; intflag = splcam(); pDCB->GoingSRBCnt--; psrb1 = pDCB->pGoingSRB; if (pSRB == psrb1) /* * if this SRB is GoingSRB * remove this SRB from GoingSRB Q */ pDCB->pGoingSRB = psrb1->pNextSRB; else { /* * if this SRB is not current GoingSRB * remove this SRB from GoingSRB Q */ while (pSRB != psrb1->pNextSRB) psrb1 = psrb1->pNextSRB; psrb1->pNextSRB = pSRB->pNextSRB; if (pSRB == pDCB->pGoingLastSRB) pDCB->pGoingLastSRB = psrb1; } if ((psrb1 = pDCB->pWaitingSRB)) { /* * if WaitingSRB Q is not NULL * Q back this SRB into WaitingSRB */ pSRB->pNextSRB = psrb1; pDCB->pWaitingSRB = pSRB; } else { pSRB->pNextSRB = NULL; pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } splx(intflag); } static void trm_DoWaitingSRB(PACB pACB) { int intflag; PDCB ptr, ptr1; PSRB pSRB; intflag = splcam(); if (!(pACB->pActiveDCB) && !(pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV))) { ptr = pACB->pDCBRunRobin; if (!ptr) { ptr = pACB->pLinkDCB; pACB->pDCBRunRobin = ptr; } ptr1 = ptr; for (;ptr1 ;) { pACB->pDCBRunRobin = ptr1->pNextDCB; if (!(ptr1->MaxActiveCommandCnt > ptr1->GoingSRBCnt) || !(pSRB = ptr1->pWaitingSRB)) { if (pACB->pDCBRunRobin == ptr) break; ptr1 = ptr1->pNextDCB; } else { if (!trm_StartSCSI(pACB, ptr1, pSRB)) { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ ptr1->GoingSRBCnt++; if (ptr1->pWaitingLastSRB == pSRB) { ptr1->pWaitingSRB = NULL; ptr1->pWaitingLastSRB = NULL; } else ptr1->pWaitingSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; if (ptr1->pGoingSRB) ptr1->pGoingLastSRB->pNextSRB = pSRB; else ptr1->pGoingSRB = pSRB; ptr1->pGoingLastSRB = pSRB; } break; } } } splx(intflag); return; } static void trm_SRBwaiting(PDCB pDCB, PSRB pSRB) { if (pDCB->pWaitingSRB) { pDCB->pWaitingLastSRB->pNextSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; pSRB->pNextSRB = NULL; } else { pDCB->pWaitingSRB = pSRB; pDCB->pWaitingLastSRB = pSRB; } } static u_int32_t trm_get_sense_bufaddr(PACB pACB, PSRB pSRB) { int offset; offset = pSRB->TagNumber; return (pACB->sense_busaddr + (offset * sizeof(struct scsi_sense_data))); } static struct scsi_sense_data * trm_get_sense_buf(PACB pACB, PSRB pSRB) { int offset; offset = pSRB->TagNumber; return (&pACB->sense_buffers[offset]); } static void trm_ExecuteSRB(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { int flags; PACB pACB; PSRB pSRB; union ccb *ccb; u_long totalxferlen=0; flags = splcam(); pSRB = (PSRB)arg; ccb = pSRB->pccb; pACB = (PACB)ccb->ccb_h.ccb_trmacb_ptr; TRM_DPRINTF("trm_ExecuteSRB..........\n"); if (nseg != 0) { PSEG psg; bus_dma_segment_t *end_seg; int op; /* Copy the segments into our SG list */ end_seg = dm_segs + nseg; psg = pSRB->pSRBSGL; while (dm_segs < end_seg) { psg->address = dm_segs->ds_addr; psg->length = (u_long)dm_segs->ds_len; totalxferlen += dm_segs->ds_len; psg++; dm_segs++; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } bus_dmamap_sync(pACB->buffer_dmat, pSRB->dmamap, op); } pSRB->RetryCnt = 0; pSRB->SRBTotalXferLength = totalxferlen; pSRB->SRBSGCount = nseg; pSRB->SRBSGIndex = 0; pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pSRB->MsgCnt = 0; pSRB->SRBStatus = 0; pSRB->SRBFlag = 0; pSRB->SRBState = 0; pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(pACB->buffer_dmat, pSRB->dmamap); pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; xpt_done(ccb); splx(flags); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; trm_SendSRB(pACB, pSRB); splx(flags); return; } static void trm_SendSRB(PACB pACB, PSRB pSRB) { PDCB pDCB; pDCB = pSRB->pSRBDCB; if (!(pDCB->MaxActiveCommandCnt > pDCB->GoingSRBCnt) || (pACB->pActiveDCB) || (pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV))) { TRM_DPRINTF("pDCB->MaxCommand=%d \n",pDCB->MaxActiveCommandCnt); TRM_DPRINTF("pDCB->GoingSRBCnt=%d \n",pDCB->GoingSRBCnt); TRM_DPRINTF("pACB->pActiveDCB=%8x \n",(u_int)pACB->pActiveDCB); TRM_DPRINTF("pACB->ACBFlag=%x \n",pACB->ACBFlag); trm_SRBwaiting(pDCB, pSRB); goto SND_EXIT; } if (pDCB->pWaitingSRB) { trm_SRBwaiting(pDCB, pSRB); pSRB = pDCB->pWaitingSRB; pDCB->pWaitingSRB = pSRB->pNextSRB; pSRB->pNextSRB = NULL; } if (!trm_StartSCSI(pACB, pDCB, pSRB)) { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ pDCB->GoingSRBCnt++; /* stack waiting SRB*/ if (pDCB->pGoingSRB) { pDCB->pGoingLastSRB->pNextSRB = pSRB; pDCB->pGoingLastSRB = pSRB; } else { pDCB->pGoingSRB = pSRB; pDCB->pGoingLastSRB = pSRB; } } else { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do */ trm_RewaitSRB0(pDCB, pSRB); } SND_EXIT: return; } static void trm_action(struct cam_sim *psim, union ccb *pccb) { PACB pACB; int actionflags; u_int target_id,target_lun; CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("trm_action\n")); actionflags = splcam(); pACB = (PACB) cam_sim_softc(psim); target_id = pccb->ccb_h.target_id; target_lun = pccb->ccb_h.target_lun; switch (pccb->ccb_h.func_code) { - case XPT_NOOP: - TRM_DPRINTF(" XPT_NOOP \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; /* * Execute the requested I/O operation */ case XPT_SCSI_IO: { PDCB pDCB = NULL; PSRB pSRB; struct ccb_scsiio *pcsio; int error; pcsio = &pccb->csio; TRM_DPRINTF(" XPT_SCSI_IO \n"); TRM_DPRINTF("trm: target_id= %d target_lun= %d \n" ,target_id, target_lun); TRM_DPRINTF( "pACB->scan_devices[target_id][target_lun]= %d \n" ,pACB->scan_devices[target_id][target_lun]); if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(pccb); splx(actionflags); return; } pDCB = &pACB->DCBarray[target_id][target_lun]; if (!(pDCB->DCBstatus & DS_IN_QUEUE)) { pACB->scan_devices[target_id][target_lun] = 1; trm_initDCB(pACB, pDCB, pACB->AdapterUnit, target_id, target_lun); } /* * Assign an SRB and connect it with this ccb. */ pSRB = trm_GetSRB(pACB); if (!pSRB) { /* Freeze SIMQ */ pccb->ccb_h.status = CAM_RESRC_UNAVAIL; xpt_done(pccb); splx(actionflags); return; } pSRB->pSRBDCB = pDCB; pccb->ccb_h.ccb_trmsrb_ptr = pSRB; pccb->ccb_h.ccb_trmacb_ptr = pACB; pSRB->pccb = pccb; pSRB->ScsiCmdLen = pcsio->cdb_len; /* * move layer of CAM command block to layer of SCSI * Request Block for SCSI processor command doing */ if ((pccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((pccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(pcsio->cdb_io.cdb_ptr,pSRB->CmdBlock ,pcsio->cdb_len); } else { pccb->ccb_h.status = CAM_REQ_INVALID; pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB= pSRB; xpt_done(pccb); splx(actionflags); return; } } else bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len); error = bus_dmamap_load_ccb(pACB->buffer_dmat, pSRB->dmamap, pccb, trm_ExecuteSRB, pSRB, 0); if (error == EINPROGRESS) { xpt_freeze_simq(pACB->psim, 1); pccb->ccb_h.status |= CAM_RELEASE_SIMQ; } break; } - case XPT_GDEV_TYPE: - TRM_DPRINTF(" XPT_GDEV_TYPE \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - case XPT_GDEVLIST: - TRM_DPRINTF(" XPT_GDEVLIST \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; /* * Path routing inquiry * Path Inquiry CCB */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &pccb->cpi; TRM_DPRINTF(" XPT_PATH_INQ \n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 15 ; cpi->max_lun = pACB->max_lun; /* 7 or 0 */ cpi->initiator_id = pACB->AdaptSCSIID; cpi->bus_id = cam_sim_bus(psim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Tekram_TRM", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(psim); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); - } break; + } /* - * Release a frozen SIM queue - * Release SIM Queue + * XPT_ABORT = 0x10, Abort the specified CCB + * Abort XPT request CCB */ - case XPT_REL_SIMQ: - TRM_DPRINTF(" XPT_REL_SIMQ \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Set Asynchronous Callback Parameters - * Set Asynchronous Callback CCB - */ - case XPT_SASYNC_CB: - TRM_DPRINTF(" XPT_SASYNC_CB \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Set device type information - * Set Device Type CCB - */ - case XPT_SDEV_TYPE: - TRM_DPRINTF(" XPT_SDEV_TYPE \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Get EDT entries matching the given pattern - */ - case XPT_DEV_MATCH: - TRM_DPRINTF(" XPT_DEV_MATCH \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Turn on debugging for a bus, target or lun - */ - case XPT_DEBUG: - TRM_DPRINTF(" XPT_DEBUG \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * XPT_ABORT = 0x10, Abort the specified CCB - * Abort XPT request CCB - */ case XPT_ABORT: TRM_DPRINTF(" XPT_ABORT \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* - * Reset the specified SCSI bus - * Reset SCSI Bus CCB - */ - case XPT_RESET_BUS: { + * Reset the specified SCSI bus + * Reset SCSI Bus CCB + */ + case XPT_RESET_BUS: { int i; TRM_DPRINTF(" XPT_RESET_BUS \n"); - trm_reset(pACB); + trm_reset(pACB); pACB->ACBFlag=0; for (i=0; i<500; i++) DELAY(1000); pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); - } break; + } /* * Bus Device Reset the specified SCSI device * Reset SCSI Device CCB */ case XPT_RESET_DEV: /* * Don't (yet?) support vendor * specific commands. */ TRM_DPRINTF(" XPT_RESET_DEV \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Terminate the I/O process * Terminate I/O Process Request CCB */ case XPT_TERM_IO: TRM_DPRINTF(" XPT_TERM_IO \n"); pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; /* * Get/Set transfer rate/width/disconnection/tag queueing * settings * (GET) default/user transfer settings for the target */ case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &pccb->cts; int intflag; struct trm_transinfo *tinfo; PDCB pDCB; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; TRM_DPRINTF(" XPT_GET_TRAN_SETTINGS \n"); pDCB = &pACB->DCBarray[target_id][target_lun]; intflag = splcam(); /* * disable interrupt */ if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { /* current transfer settings */ if (pDCB->tinfo.disc_tag & TRM_CUR_DISCENB) spi->flags = CTS_SPI_FLAGS_DISC_ENB; else spi->flags = 0;/* no tag & disconnect */ if (pDCB->tinfo.disc_tag & TRM_CUR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; tinfo = &pDCB->tinfo.current; TRM_DPRINTF("CURRENT: cts->flags= %2x \n", cts->flags); } else { /* default(user) transfer settings */ if (pDCB->tinfo.disc_tag & TRM_USR_DISCENB) spi->flags = CTS_SPI_FLAGS_DISC_ENB; else spi->flags = 0; if (pDCB->tinfo.disc_tag & TRM_USR_TAGENB) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; tinfo = &pDCB->tinfo.user; TRM_DPRINTF("USER: cts->flags= %2x \n", cts->flags); } spi->sync_period = tinfo->period; spi->sync_offset = tinfo->offset; spi->bus_width = tinfo->width; TRM_DPRINTF("pDCB->SyncPeriod: %d \n", pDCB->SyncPeriod); TRM_DPRINTF("period: %d \n", tinfo->period); TRM_DPRINTF("offset: %d \n", tinfo->offset); TRM_DPRINTF("width: %d \n", tinfo->width); splx(intflag); spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); } break; /* * Get/Set transfer rate/width/disconnection/tag queueing * settings * (Set) transfer rate/width negotiation settings */ case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &pccb->cts; u_int update_type; int intflag; PDCB pDCB; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; TRM_DPRINTF(" XPT_SET_TRAN_SETTINGS \n"); update_type = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) update_type |= TRM_TRANS_GOAL; if (cts->type == CTS_TYPE_USER_SETTINGS) update_type |= TRM_TRANS_USER; intflag = splcam(); pDCB = &pACB->DCBarray[target_id][target_lun]; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { /*ccb disc enables */ if (update_type & TRM_TRANS_GOAL) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_CUR_DISCENB; else pDCB->tinfo.disc_tag &= ~TRM_CUR_DISCENB; } if (update_type & TRM_TRANS_USER) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_USR_DISCENB; else pDCB->tinfo.disc_tag &= ~TRM_USR_DISCENB; } } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { /* if ccb tag q active */ if (update_type & TRM_TRANS_GOAL) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_CUR_TAGENB; else pDCB->tinfo.disc_tag &= ~TRM_CUR_TAGENB; } if (update_type & TRM_TRANS_USER) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) pDCB->tinfo.disc_tag |= TRM_USR_TAGENB; else pDCB->tinfo.disc_tag &= ~TRM_USR_TAGENB; } } /* Minimum sync period factor */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { /* if ccb sync active */ /* TRM-S1040 MinSyncPeriod = 4 clocks/byte */ if ((spi->sync_period != 0) && (spi->sync_period < 125)) spi->sync_period = 125; /* 1/(125*4) minsync 2 MByte/sec */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { if (spi->sync_offset == 0) spi->sync_period = 0; /* TRM-S1040 MaxSyncOffset = 15 bytes*/ if (spi->sync_offset > 15) spi->sync_offset = 15; } } if ((update_type & TRM_TRANS_USER) != 0) { pDCB->tinfo.user.period = spi->sync_period; pDCB->tinfo.user.offset = spi->sync_offset; pDCB->tinfo.user.width = spi->bus_width; } if ((update_type & TRM_TRANS_GOAL) != 0) { pDCB->tinfo.goal.period = spi->sync_period; pDCB->tinfo.goal.offset = spi->sync_offset; pDCB->tinfo.goal.width = spi->bus_width; } splx(intflag); pccb->ccb_h.status = CAM_REQ_CMP; xpt_done(pccb); break; } /* * Calculate the geometry parameters for a device give * the sector size and volume size. */ case XPT_CALC_GEOMETRY: TRM_DPRINTF(" XPT_CALC_GEOMETRY \n"); cam_calc_geometry(&pccb->ccg, /*extended*/1); - xpt_done(pccb); - break; - case XPT_ENG_INQ: - TRM_DPRINTF(" XPT_ENG_INQ \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * HBA execute engine request - * This structure must match SCSIIO size - */ - case XPT_ENG_EXEC: - TRM_DPRINTF(" XPT_ENG_EXEC \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * XPT_EN_LUN = 0x30, Enable LUN as a target - * Target mode structures. - */ - case XPT_EN_LUN: - /* - * Don't (yet?) support vendor - * specific commands. - */ - TRM_DPRINTF(" XPT_EN_LUN \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Execute target I/O request - */ - case XPT_TARGET_IO: - /* - * Don't (yet?) support vendor - * specific commands. - */ - TRM_DPRINTF(" XPT_TARGET_IO \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Accept Host Target Mode CDB - */ - case XPT_ACCEPT_TARGET_IO: - /* - * Don't (yet?) support vendor - * specific commands. - */ - TRM_DPRINTF(" XPT_ACCEPT_TARGET_IO \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Continue Host Target I/O Connection - */ - case XPT_CONT_TARGET_IO: - /* - * Don't (yet?) support vendor - * specific commands. - */ - TRM_DPRINTF(" XPT_CONT_TARGET_IO \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Notify Host Target driver of event - */ - case XPT_IMMED_NOTIFY: - TRM_DPRINTF(" XPT_IMMED_NOTIFY \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * Acknowledgement of event - */ - case XPT_NOTIFY_ACK: - TRM_DPRINTF(" XPT_NOTIFY_ACK \n"); - pccb->ccb_h.status = CAM_REQ_INVALID; - xpt_done(pccb); - break; - /* - * XPT_VUNIQUE = 0x80 - */ - case XPT_VUNIQUE: - pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; default: pccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(pccb); break; } splx(actionflags); } static void trm_poll(struct cam_sim *psim) { trm_Interrupt(cam_sim_softc(psim)); } static void trm_ResetDevParam(PACB pACB) { PDCB pDCB, pdcb; PNVRAMTYPE pEEpromBuf; u_int8_t PeriodIndex; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { pDCB->SyncMode &= ~(SYNC_NEGO_DONE+ WIDE_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pEEpromBuf = &trm_eepromBuf[pACB->AdapterUnit]; pDCB->DevMode = pEEpromBuf->NvramTarget[pDCB->TargetID].NvmTarCfg0; pDCB->AdpMode = pEEpromBuf->NvramChannelCfg; PeriodIndex = pEEpromBuf->NvramTarget[pDCB->TargetID].NvmTarPeriod & 0x07; if (pACB->AdaptType == 1) /* is U2? */ pDCB->MaxNegoPeriod = dc395u2x_clock_period[PeriodIndex]; else pDCB->MaxNegoPeriod = dc395x_clock_period[PeriodIndex]; if ((pDCB->DevMode & NTC_DO_WIDE_NEGO) && (pACB->Config & HCC_WIDE_CARD)) pDCB->SyncMode |= WIDE_NEGO_ENABLE; pDCB = pDCB->pNextDCB; } while (pdcb != pDCB); } static void trm_RecoverSRB(PACB pACB) { PDCB pDCB, pdcb; PSRB psrb, psrb2; u_int16_t cnt, i; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { cnt = pdcb->GoingSRBCnt; psrb = pdcb->pGoingSRB; for (i = 0; i < cnt; i++) { psrb2 = psrb; psrb = psrb->pNextSRB; if (pdcb->pWaitingSRB) { psrb2->pNextSRB = pdcb->pWaitingSRB; pdcb->pWaitingSRB = psrb2; } else { pdcb->pWaitingSRB = psrb2; pdcb->pWaitingLastSRB = psrb2; psrb2->pNextSRB = NULL; } } pdcb->GoingSRBCnt = 0; pdcb->pGoingSRB = NULL; pdcb = pdcb->pNextDCB; } while (pdcb != pDCB); } static void trm_reset(PACB pACB) { int intflag; u_int16_t i; TRM_DPRINTF("trm: RESET"); intflag = splcam(); trm_reg_write8(0x00, TRMREG_DMA_INTEN); trm_reg_write8(0x00, TRMREG_SCSI_INTEN); trm_ResetSCSIBus(pACB); for (i = 0; i < 500; i++) DELAY(1000); trm_reg_write8(0x7F, TRMREG_SCSI_INTEN); /* Enable DMA interrupt */ trm_reg_write8(EN_SCSIINTR, TRMREG_DMA_INTEN); /* Clear DMA FIFO */ trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); /* Clear SCSI FIFO */ trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); trm_ResetDevParam(pACB); trm_DoingSRB_Done(pACB); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0;/* RESET_DETECT, RESET_DONE ,RESET_DEV */ trm_DoWaitingSRB(pACB); /* Tell the XPT layer that a bus reset occurred */ if (pACB->ppath != NULL) xpt_async(AC_BUS_RESET, pACB->ppath, NULL); splx(intflag); return; } static u_int16_t trm_StartSCSI(PACB pACB, PDCB pDCB, PSRB pSRB) { u_int16_t return_code; u_int8_t scsicommand, i,command,identify_message; u_int8_t * ptr; union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; trm_reg_write8(pACB->AdaptSCSIID, TRMREG_SCSI_HOSTID); trm_reg_write8(pDCB->TargetID, TRMREG_SCSI_TARGETID); trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); pSRB->ScsiPhase = PH_BUS_FREE;/* initial phase */ /* Flush FIFO */ trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); identify_message = pDCB->IdentifyMsg; if ((pSRB->CmdBlock[0] == INQUIRY) || (pSRB->CmdBlock[0] == REQUEST_SENSE) || (pSRB->SRBFlag & AUTO_REQSENSE)) { if (((pDCB->SyncMode & WIDE_NEGO_ENABLE) && !(pDCB->SyncMode & WIDE_NEGO_DONE)) || ((pDCB->SyncMode & SYNC_NEGO_ENABLE) && !(pDCB->SyncMode & SYNC_NEGO_DONE))) { if (!(pDCB->IdentifyMsg & 7) || (pSRB->CmdBlock[0] != INQUIRY)) { scsicommand = SCMD_SEL_ATNSTOP; pSRB->SRBState = SRB_MSGOUT; goto polling; } } /* * Send identify message */ trm_reg_write8((identify_message & 0xBF) ,TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN; pSRB->SRBState = SRB_START_; } else { /* not inquiry,request sense,auto request sense */ /* * Send identify message */ trm_reg_write8(identify_message,TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN; pSRB->SRBState = SRB_START_; if (pDCB->SyncMode & EN_TAG_QUEUING) { /* Send Tag message */ trm_reg_write8(MSG_SIMPLE_QTAG, TRMREG_SCSI_FIFO); trm_reg_write8(pSRB->TagNumber, TRMREG_SCSI_FIFO); scsicommand = SCMD_SEL_ATN3; } } polling: /* * Send CDB ..command block ......... */ if (pSRB->SRBFlag & AUTO_REQSENSE) { trm_reg_write8(REQUEST_SENSE, TRMREG_SCSI_FIFO); trm_reg_write8((pDCB->IdentifyMsg << 5), TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(pcsio->sense_len, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); } else { ptr = (u_int8_t *) pSRB->CmdBlock; for (i = 0; i < pSRB->ScsiCmdLen ; i++) { command = *ptr++; trm_reg_write8(command,TRMREG_SCSI_FIFO); } } if (trm_reg_read16(TRMREG_SCSI_STATUS) & SCSIINTERRUPT) { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do, * SCSI processor has been occupied by one SRB. */ pSRB->SRBState = SRB_READY; return_code = 1; } else { /* * If trm_StartSCSI return 0 : * current interrupt status is interrupt enable * It's said that SCSI processor is unoccupied */ pSRB->ScsiPhase = SCSI_NOP1; /* SCSI bus free Phase */ pACB->pActiveDCB = pDCB; pDCB->pActiveSRB = pSRB; return_code = 0; trm_reg_write16(DO_DATALATCH | DO_HWRESELECT, TRMREG_SCSI_CONTROL);/* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(scsicommand,TRMREG_SCSI_COMMAND); } return (return_code); } static void trm_Interrupt(vpACB) void *vpACB; { PACB pACB; PDCB pDCB; PSRB pSRB; u_int16_t phase; void (*stateV)(PACB, PSRB, u_int16_t *); u_int16_t scsi_status=0; u_int8_t scsi_intstatus; pACB = vpACB; scsi_status = trm_reg_read16(TRMREG_SCSI_STATUS); if (!(scsi_status & SCSIINTERRUPT)) { TRM_DPRINTF("trm_Interrupt: TRMREG_SCSI_STATUS scsi_status = NULL ,return......"); return; } TRM_DPRINTF("scsi_status=%2x,",scsi_status); scsi_intstatus = trm_reg_read8(TRMREG_SCSI_INTSTATUS); TRM_DPRINTF("scsi_intstatus=%2x,",scsi_intstatus); if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { trm_Disconnect(pACB); return; } if (scsi_intstatus & INT_RESELECTED) { trm_Reselect(pACB); return; } if (scsi_intstatus & INT_SCSIRESET) { trm_ScsiRstDetect(pACB); return; } if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { pDCB = pACB->pActiveDCB; KASSERT(pDCB != NULL, ("no active DCB")); pSRB = pDCB->pActiveSRB; if (pDCB->DCBFlag & ABORT_DEV_) trm_EnableMsgOutAbort1(pACB, pSRB); phase = (u_int16_t) pSRB->ScsiPhase; /* phase: */ stateV = (void *) trm_SCSI_phase0[phase]; stateV(pACB, pSRB, &scsi_status); pSRB->ScsiPhase = scsi_status & PHASEMASK; /* phase:0,1,2,3,4,5,6,7 */ phase = (u_int16_t) scsi_status & PHASEMASK; stateV = (void *) trm_SCSI_phase1[phase]; stateV(pACB, pSRB, &scsi_status); } } static void trm_MsgOutPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { if (pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase*/ } static void trm_MsgOutPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t bval; u_int16_t i, cnt; u_int8_t * ptr; PDCB pDCB; trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); pDCB = pACB->pActiveDCB; if (!(pSRB->SRBState & SRB_MSGOUT)) { cnt = pSRB->MsgCnt; if (cnt) { ptr = (u_int8_t *) pSRB->MsgOutBuf; for (i = 0; i < cnt; i++) { trm_reg_write8(*ptr, TRMREG_SCSI_FIFO); ptr++; } pSRB->MsgCnt = 0; if ((pDCB->DCBFlag & ABORT_DEV_) && (pSRB->MsgOutBuf[0] == MSG_ABORT)) { pSRB->SRBState = SRB_ABORT_SENT; } } else { bval = MSG_ABORT; if ((pSRB->CmdBlock[0] == INQUIRY) || (pSRB->CmdBlock[0] == REQUEST_SENSE) || (pSRB->SRBFlag & AUTO_REQSENSE)) { if (pDCB->SyncMode & SYNC_NEGO_ENABLE) { goto mop1; } } trm_reg_write8(bval, TRMREG_SCSI_FIFO); } } else { mop1: /* message out phase */ if (!(pSRB->SRBState & SRB_DO_WIDE_NEGO) && (pDCB->SyncMode & WIDE_NEGO_ENABLE)) { /* * WIDE DATA TRANSFER REQUEST code (03h) */ pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP); trm_reg_write8((pDCB->IdentifyMsg & 0xBF), TRMREG_SCSI_FIFO); trm_reg_write8(MSG_EXTENDED,TRMREG_SCSI_FIFO); /* (01h) */ trm_reg_write8(2,TRMREG_SCSI_FIFO); /* Message length (02h) */ trm_reg_write8(3,TRMREG_SCSI_FIFO); /* wide data xfer (03h) */ trm_reg_write8(1,TRMREG_SCSI_FIFO); /* width:0(8bit),1(16bit),2(32bit) */ pSRB->SRBState |= SRB_DO_WIDE_NEGO; } else if (!(pSRB->SRBState & SRB_DO_SYNC_NEGO) && (pDCB->SyncMode & SYNC_NEGO_ENABLE)) { /* * SYNCHRONOUS DATA TRANSFER REQUEST code (01h) */ if (!(pDCB->SyncMode & WIDE_NEGO_DONE)) trm_reg_write8((pDCB->IdentifyMsg & 0xBF), TRMREG_SCSI_FIFO); trm_reg_write8(MSG_EXTENDED,TRMREG_SCSI_FIFO); /* (01h) */ trm_reg_write8(3,TRMREG_SCSI_FIFO); /* Message length (03h) */ trm_reg_write8(1,TRMREG_SCSI_FIFO); /* SYNCHRONOUS DATA TRANSFER REQUEST code (01h) */ trm_reg_write8(pDCB->MaxNegoPeriod,TRMREG_SCSI_FIFO); /* Transfer peeriod factor */ trm_reg_write8((pACB->AdaptType == 1) ? 31 : 15, TRMREG_SCSI_FIFO); /* REQ/ACK offset */ pSRB->SRBState |= SRB_DO_SYNC_NEGO; } } trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_OUT, TRMREG_SCSI_COMMAND); } static void trm_CommandPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_CommandPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { PDCB pDCB; u_int8_t * ptr; u_int16_t i, cnt; union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; trm_reg_write16(DO_CLRATN | DO_CLRFIFO , TRMREG_SCSI_CONTROL); if (!(pSRB->SRBFlag & AUTO_REQSENSE)) { cnt = (u_int16_t) pSRB->ScsiCmdLen; ptr = (u_int8_t *) pSRB->CmdBlock; for (i = 0; i < cnt; i++) { trm_reg_write8(*ptr, TRMREG_SCSI_FIFO); ptr++; } } else { trm_reg_write8(REQUEST_SENSE, TRMREG_SCSI_FIFO); pDCB = pACB->pActiveDCB; /* target id */ trm_reg_write8((pDCB->IdentifyMsg << 5), TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); /* sizeof(struct scsi_sense_data) */ trm_reg_write8(pcsio->sense_len, TRMREG_SCSI_FIFO); trm_reg_write8(0, TRMREG_SCSI_FIFO); } pSRB->SRBState = SRB_COMMAND; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_OUT, TRMREG_SCSI_COMMAND); } static void trm_DataOutPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { PDCB pDCB; u_int8_t TempDMAstatus,SGIndexTemp; u_int16_t scsi_status; PSEG pseg; u_long TempSRBXferredLength,dLeftCounter=0; pDCB = pSRB->pSRBDCB; scsi_status = *pscsi_status; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsi_status & PARITYERROR) pSRB->SRBStatus |= PARITY_ERROR; if (!(scsi_status & SCSIXFERDONE)) { /* * when data transfer from DMA FIFO to SCSI FIFO * if there was some data left in SCSI FIFO */ dLeftCounter = (u_long) (trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x3F); if (pDCB->SyncPeriod & WIDE_SYNC) { /* * if WIDE scsi SCSI FIFOCNT unit is word * so need to * 2 */ dLeftCounter <<= 1; } } /* * caculate all the residue data that not yet tranfered * SCSI transfer counter + left in SCSI FIFO data * * .....TRM_SCSI_COUNTER (24bits) * The counter always decrement by one for every SCSI byte *transfer. * .....TRM_SCSI_FIFOCNT (5bits) * The counter is SCSI FIFO offset counter */ dLeftCounter += trm_reg_read32(TRMREG_SCSI_COUNTER); if (dLeftCounter == 1) { dLeftCounter = 0; trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); } if ((dLeftCounter == 0) || (scsi_status & SCSIXFERCNT_2_ZERO)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); while (!(TempDMAstatus & DMAXFERCOMP)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); } pSRB->SRBTotalXferLength = 0; } else { /* Update SG list */ /* * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ if (pSRB->SRBTotalXferLength != dLeftCounter) { /* * data that had transferred length */ TempSRBXferredLength = pSRB->SRBTotalXferLength - dLeftCounter; /* * next time to be transferred length */ pSRB->SRBTotalXferLength = dLeftCounter; /* * parsing from last time disconnect SRBSGIndex */ pseg = pSRB->pSRBSGL + pSRB->SRBSGIndex; for (SGIndexTemp = pSRB->SRBSGIndex; SGIndexTemp < pSRB->SRBSGCount; SGIndexTemp++) { /* * find last time which SG transfer be * disconnect */ if (TempSRBXferredLength >= pseg->length) TempSRBXferredLength -= pseg->length; else { /* * update last time disconnected SG * list */ pseg->length -= TempSRBXferredLength; /* residue data length */ pseg->address += TempSRBXferredLength; /* residue data pointer */ pSRB->SRBSGIndex = SGIndexTemp; break; } pseg++; } } } } trm_reg_write8(STOPDMAXFER ,TRMREG_DMA_CONTROL); } static void trm_DataOutPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int16_t ioDir; /* * do prepare befor transfer when data out phase */ ioDir = XFERDATAOUT; trm_DataIO_transfer(pACB, pSRB, ioDir); } static void trm_DataInPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t TempDMAstatus, SGIndexTemp; u_int16_t scsi_status; PSEG pseg; u_long TempSRBXferredLength,dLeftCounter = 0; scsi_status = *pscsi_status; if (!(pSRB->SRBState & SRB_XFERPAD)) { if (scsi_status & PARITYERROR) pSRB->SRBStatus |= PARITY_ERROR; dLeftCounter += trm_reg_read32(TRMREG_SCSI_COUNTER); if ((dLeftCounter == 0) || (scsi_status & SCSIXFERCNT_2_ZERO)) { TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); while (!(TempDMAstatus & DMAXFERCOMP)) TempDMAstatus = trm_reg_read8(TRMREG_DMA_STATUS); pSRB->SRBTotalXferLength = 0; } else { /* * parsing the case: * when a transfer not yet complete * but be disconnected by uper layer * if transfer not yet complete * there were some data residue in SCSI FIFO or * SCSI transfer counter not empty */ if (pSRB->SRBTotalXferLength != dLeftCounter) { /* * data that had transferred length */ TempSRBXferredLength = pSRB->SRBTotalXferLength - dLeftCounter; /* * next time to be transferred length */ pSRB->SRBTotalXferLength = dLeftCounter; /* * parsing from last time disconnect SRBSGIndex */ pseg = pSRB->pSRBSGL + pSRB->SRBSGIndex; for (SGIndexTemp = pSRB->SRBSGIndex; SGIndexTemp < pSRB->SRBSGCount; SGIndexTemp++) { /* * find last time which SG transfer be disconnect */ if (TempSRBXferredLength >= pseg->length) TempSRBXferredLength -= pseg->length; else { /* * update last time disconnected SG list */ pseg->length -= TempSRBXferredLength; /* residue data length */ pseg->address += TempSRBXferredLength; /* residue data pointer */ pSRB->SRBSGIndex = SGIndexTemp; break; } pseg++; } } } } } static void trm_DataInPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int16_t ioDir; /* * do prepare befor transfer when data in phase */ ioDir = XFERDATAIN; trm_DataIO_transfer(pACB, pSRB, ioDir); } static void trm_DataIO_transfer(PACB pACB, PSRB pSRB, u_int16_t ioDir) { u_int8_t bval; PDCB pDCB; pDCB = pSRB->pSRBDCB; if (pSRB->SRBSGIndex < pSRB->SRBSGCount) { if (pSRB->SRBTotalXferLength != 0) { /* * load what physical address of Scatter/Gather list table want to be transfer */ TRM_DPRINTF(" SG->address=%8x \n",pSRB->pSRBSGL->address); TRM_DPRINTF(" SG->length=%8x \n",pSRB->pSRBSGL->length); TRM_DPRINTF(" pDCB->SyncPeriod=%x \n",pDCB->SyncPeriod); TRM_DPRINTF(" pSRB->pSRBSGL=%8x \n",(unsigned int)pSRB->pSRBSGL); TRM_DPRINTF(" pSRB->SRBSGPhyAddr=%8x \n",pSRB->SRBSGPhyAddr); TRM_DPRINTF(" pSRB->SRBSGIndex=%d \n",pSRB->SRBSGIndex); TRM_DPRINTF(" pSRB->SRBSGCount=%d \n",pSRB->SRBSGCount); TRM_DPRINTF(" pSRB->SRBTotalXferLength=%d \n",pSRB->SRBTotalXferLength); pSRB->SRBState = SRB_DATA_XFER; trm_reg_write32(0, TRMREG_DMA_XHIGHADDR); trm_reg_write32( (pSRB->SRBSGPhyAddr + ((u_long)pSRB->SRBSGIndex << 3)), TRMREG_DMA_XLOWADDR); /* * load how many bytes in the Scatter/Gather * list table */ trm_reg_write32( ((u_long)(pSRB->SRBSGCount - pSRB->SRBSGIndex) << 3), TRMREG_DMA_XCNT); /* * load total transfer length (24bits) max value * 16Mbyte */ trm_reg_write32(pSRB->SRBTotalXferLength, TRMREG_SCSI_COUNTER); /* Start DMA transfer */ trm_reg_write16(ioDir, TRMREG_DMA_COMMAND); /* Start SCSI transfer */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ bval = (ioDir == XFERDATAOUT) ? SCMD_DMA_OUT : SCMD_DMA_IN; trm_reg_write8(bval, TRMREG_SCSI_COMMAND); } else { /* xfer pad */ if (pSRB->SRBSGCount) { pSRB->AdaptStatus = H_OVER_UNDER_RUN; pSRB->SRBStatus |= OVER_RUN; } if (pDCB->SyncPeriod & WIDE_SYNC) trm_reg_write32(2,TRMREG_SCSI_COUNTER); else trm_reg_write32(1,TRMREG_SCSI_COUNTER); if (ioDir == XFERDATAOUT) trm_reg_write16(0, TRMREG_SCSI_FIFO); else trm_reg_read16(TRMREG_SCSI_FIFO); pSRB->SRBState |= SRB_XFERPAD; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ bval = (ioDir == XFERDATAOUT) ? SCMD_FIFO_OUT : SCMD_FIFO_IN; trm_reg_write8(bval, TRMREG_SCSI_COMMAND); } } } static void trm_StatusPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { pSRB->TargetStatus = trm_reg_read8(TRMREG_SCSI_FIFO); pSRB->SRBState = SRB_COMPLETED; *pscsi_status = PH_BUS_FREE; /*.. initial phase*/ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); } static void trm_StatusPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { if (trm_reg_read16(TRMREG_DMA_COMMAND) & 0x0001) { if (!(trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x40)) trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); if (!(trm_reg_read16(TRMREG_DMA_FIFOCNT) & 0x8000)) trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); } else { if (!(trm_reg_read16(TRMREG_DMA_FIFOCNT) & 0x8000)) trm_reg_write8(CLRXFIFO, TRMREG_DMA_CONTROL); if (!(trm_reg_read8(TRMREG_SCSI_FIFOCNT) & 0x40)) trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); } pSRB->SRBState = SRB_STATUS; trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_COMP, TRMREG_SCSI_COMMAND); } /* *scsiiom * trm_MsgInPhase0: one of trm_SCSI_phase0[] vectors * stateV = (void *) trm_SCSI_phase0[phase] * if phase =7 * extended message codes: * * code description * * 02h Reserved * 00h MODIFY DATA POINTER * 01h SYNCHRONOUS DATA TRANSFER REQUEST * 03h WIDE DATA TRANSFER REQUEST * 04h - 7Fh Reserved * 80h - FFh Vendor specific * */ static void trm_MsgInPhase0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { u_int8_t message_in_code,bIndex,message_in_tag_id; PDCB pDCB; PSRB pSRBTemp; pDCB = pACB->pActiveDCB; message_in_code = trm_reg_read8(TRMREG_SCSI_FIFO); if (!(pSRB->SRBState & SRB_EXTEND_MSGIN)) { if (message_in_code == MSG_DISCONNECT) { pSRB->SRBState = SRB_DISCONNECT; *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_SAVE_PTR) { *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((message_in_code == MSG_EXTENDED) || ((message_in_code >= MSG_SIMPLE_QTAG) && (message_in_code <= MSG_ORDER_QTAG))) { pSRB->SRBState |= SRB_EXTEND_MSGIN; pSRB->MsgInBuf[0] = message_in_code; /* extended message (01h) */ pSRB->MsgCnt = 1; pSRB->pMsgPtr = &pSRB->MsgInBuf[1]; /* extended message length (n) */ *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_REJECT_) { /* Reject message */ if (pDCB->SyncMode & WIDE_NEGO_ENABLE) { /* do wide nego reject */ pDCB = pSRB->pSRBDCB; pDCB->SyncMode |= WIDE_NEGO_DONE; pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP | WIDE_NEGO_ENABLE); pSRB->SRBState &= ~(SRB_DO_WIDE_NEGO+SRB_MSGIN); if ((pDCB->SyncMode & SYNC_NEGO_ENABLE) && !(pDCB->SyncMode & SYNC_NEGO_DONE)) { /* Set ATN, in case ATN was clear */ pSRB->SRBState |= SRB_MSGOUT; trm_reg_write16( DO_SETATN, TRMREG_SCSI_CONTROL); } else { /* Clear ATN */ trm_reg_write16( DO_CLRATN, TRMREG_SCSI_CONTROL); } } else if (pDCB->SyncMode & SYNC_NEGO_ENABLE) { /* do sync nego reject */ trm_reg_write16(DO_CLRATN,TRMREG_SCSI_CONTROL); if (pSRB->SRBState & SRB_DO_SYNC_NEGO) { pDCB = pSRB->pSRBDCB; pDCB->SyncMode &= ~(SYNC_NEGO_ENABLE+SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (message_in_code == MSG_IGNOREWIDE) { trm_reg_write32(1, TRMREG_SCSI_COUNTER); trm_reg_read8(TRMREG_SCSI_FIFO); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else { /* Restore data pointer message */ /* Save data pointer message */ /* Completion message */ /* NOP message */ *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } } else { /* * Parsing incoming extented messages */ *pSRB->pMsgPtr = message_in_code; pSRB->MsgCnt++; pSRB->pMsgPtr++; TRM_DPRINTF("pSRB->MsgInBuf[0]=%2x \n ",pSRB->MsgInBuf[0]); TRM_DPRINTF("pSRB->MsgInBuf[1]=%2x \n ",pSRB->MsgInBuf[1]); TRM_DPRINTF("pSRB->MsgInBuf[2]=%2x \n ",pSRB->MsgInBuf[2]); TRM_DPRINTF("pSRB->MsgInBuf[3]=%2x \n ",pSRB->MsgInBuf[3]); TRM_DPRINTF("pSRB->MsgInBuf[4]=%2x \n ",pSRB->MsgInBuf[4]); if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_QTAG) && (pSRB->MsgInBuf[0] <= MSG_ORDER_QTAG)) { /* * is QUEUE tag message : * * byte 0: * HEAD QUEUE TAG (20h) * ORDERED QUEUE TAG (21h) * SIMPLE QUEUE TAG (22h) * byte 1: * Queue tag (00h - FFh) */ if (pSRB->MsgCnt == 2) { pSRB->SRBState = 0; message_in_tag_id = pSRB->MsgInBuf[1]; pSRB = pDCB->pGoingSRB; pSRBTemp = pDCB->pGoingLastSRB; if (pSRB) { for (;;) { if (pSRB->TagNumber != message_in_tag_id) { if (pSRB == pSRBTemp) { goto mingx0; } pSRB = pSRB->pNextSRB; } else break; } if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; trm_EnableMsgOutAbort1( pACB, pSRB); } if (!(pSRB->SRBState & SRB_DISCONNECT)) { TRM_DPRINTF("SRB not yet disconnect........ \n "); goto mingx0; } pDCB->pActiveSRB = pSRB; pSRB->SRBState = SRB_DATA_XFER; } else { mingx0: pSRB = &pACB->TmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; pSRB->MsgOutBuf[0] = MSG_ABORT_TAG; trm_EnableMsgOutAbort2( pACB, pSRB); } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgInBuf[2] == 3) && (pSRB->MsgCnt == 4)) { /* * is Wide data xfer Extended message : * ====================================== * WIDE DATA TRANSFER REQUEST * ====================================== * byte 0 : Extended message (01h) * byte 1 : Extended message length (02h) * byte 2 : WIDE DATA TRANSFER code (03h) * byte 3 : Transfer width exponent */ pDCB = pSRB->pSRBDCB; pSRB->SRBState &= ~(SRB_EXTEND_MSGIN+SRB_DO_WIDE_NEGO); if ((pSRB->MsgInBuf[1] != 2)) { /* Length is wrong, reject it */ pDCB->SyncMode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } if (pDCB->SyncMode & WIDE_NEGO_ENABLE) { /* Do wide negoniation */ if (pSRB->MsgInBuf[3] > 2) { /* > 32 bit */ /* reject_msg: */ pDCB->SyncMode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } if (pSRB->MsgInBuf[3] == 2) { pSRB->MsgInBuf[3] = 1; /* do 16 bits */ } else { if (!(pDCB->SyncMode & WIDE_NEGO_DONE)) { pSRB->SRBState &= ~(SRB_DO_WIDE_NEGO+SRB_MSGIN); pDCB->SyncMode |= WIDE_NEGO_DONE; pDCB->SyncMode &= ~(SYNC_NEGO_DONE | EN_ATN_STOP | WIDE_NEGO_ENABLE); if (pSRB->MsgInBuf[3] != 0) { /* is Wide data xfer */ pDCB->SyncPeriod |= WIDE_SYNC; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_16_BIT; pDCB->tinfo.goal.width = MSG_EXT_WDTR_BUS_16_BIT; } } } } else pSRB->MsgInBuf[3] = 0; pSRB->SRBState |= SRB_MSGOUT; trm_reg_write16(DO_SETATN,TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ /* it's important for atn stop */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* * SCSI command */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgInBuf[2] == 1) && (pSRB->MsgCnt == 5)) { /* * is 8bit transfer Extended message : * ================================= * SYNCHRONOUS DATA TRANSFER REQUEST * ================================= * byte 0 : Extended message (01h) * byte 1 : Extended message length (03) * byte 2 : SYNCHRONOUS DATA TRANSFER code (01h) * byte 3 : Transfer period factor * byte 4 : REQ/ACK offset */ pSRB->SRBState &= ~(SRB_EXTEND_MSGIN+SRB_DO_SYNC_NEGO); if ((pSRB->MsgInBuf[1] != 3) || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */ pSRB->MsgCnt = 1; pSRB->MsgInBuf[0] = MSG_REJECT_; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else if (!(pSRB->MsgInBuf[3]) || !(pSRB->MsgInBuf[4])) { /* set async */ pDCB = pSRB->pSRBDCB; /* disable sync & sync nego */ pDCB->SyncMode &= ~(SYNC_NEGO_ENABLE+SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pDCB->tinfo.goal.period = 0; pDCB->tinfo.goal.offset = 0; pDCB->tinfo.current.period = 0; pDCB->tinfo.current.offset = 0; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod,TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset,TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); return; } else { /* set sync */ pDCB = pSRB->pSRBDCB; pDCB->SyncMode |= SYNC_NEGO_ENABLE+SYNC_NEGO_DONE; pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3]; /* Transfer period factor */ pDCB->SyncOffset = pSRB->MsgInBuf[4]; /* REQ/ACK offset */ if (pACB->AdaptType == 1) { for(bIndex = 0; bIndex < 7; bIndex++) { if (pSRB->MsgInBuf[3] <= dc395u2x_clock_period[bIndex]) { pDCB->tinfo.goal.period = dc395u2x_tinfo_period[bIndex]; pDCB->tinfo.current.period = dc395u2x_tinfo_period[bIndex]; pDCB->tinfo.goal.offset = pDCB->SyncOffset; pDCB->tinfo.current.offset = pDCB->SyncOffset; pDCB->SyncPeriod |= (bIndex|LVDS_SYNC); break; } } } else { for(bIndex = 0; bIndex < 7; bIndex++) { if (pSRB->MsgInBuf[3] <= dc395x_clock_period[bIndex]) { pDCB->tinfo.goal.period = dc395x_tinfo_period[bIndex]; pDCB->tinfo.current.period = dc395x_tinfo_period[bIndex]; pDCB->tinfo.goal.offset = pDCB->SyncOffset; pDCB->tinfo.current.offset = pDCB->SyncOffset; pDCB->SyncPeriod |= (bIndex|ALT_SYNC); break; } } } /* * * program SCSI control register * */ trm_reg_write8(pDCB->SyncPeriod, TRMREG_SCSI_SYNC); trm_reg_write8(pDCB->SyncOffset, TRMREG_SCSI_OFFSET); trm_SetXferRate(pACB,pSRB,pDCB); *pscsi_status=PH_BUS_FREE;/*.. initial phase*/ trm_reg_write16(DO_DATALATCH,TRMREG_SCSI_CONTROL);/* it's important for atn stop*/ /* ** SCSI command */ trm_reg_write8(SCMD_MSGACCEPT,TRMREG_SCSI_COMMAND); return; } } *pscsi_status = PH_BUS_FREE; /* .. initial phase */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop */ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); } } static void trm_MsgInPhase1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { trm_reg_write16(DO_CLRFIFO, TRMREG_SCSI_CONTROL); trm_reg_write32(1,TRMREG_SCSI_COUNTER); if (!(pSRB->SRBState & SRB_MSGIN)) { pSRB->SRBState &= SRB_DISCONNECT; pSRB->SRBState |= SRB_MSGIN; } trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_FIFO_IN, TRMREG_SCSI_COMMAND); } static void trm_Nop0(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_Nop1(PACB pACB, PSRB pSRB, u_int16_t *pscsi_status) { } static void trm_SetXferRate(PACB pACB,PSRB pSRB, PDCB pDCB) { union ccb *pccb; struct ccb_trans_settings neg; u_int16_t cnt, i; u_int8_t bval; PDCB pDCBTemp; /* * set all lun device's period , offset */ TRM_DPRINTF("trm_SetXferRate\n"); pccb = pSRB->pccb; memset(&neg, 0, sizeof (neg)); neg.xport_specific.spi.sync_period = pDCB->tinfo.goal.period; neg.xport_specific.spi.sync_offset = pDCB->tinfo.goal.offset; neg.xport_specific.spi.valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; xpt_setup_ccb(&neg.ccb_h, pccb->ccb_h.path, /* priority */1); xpt_async(AC_TRANSFER_NEG, pccb->ccb_h.path, &neg); if (!(pDCB->IdentifyMsg & 0x07)) { pDCBTemp = pACB->pLinkDCB; cnt = pACB->DeviceCnt; bval = pDCB->TargetID; for (i = 0; i < cnt; i++) { if (pDCBTemp->TargetID == bval) { pDCBTemp->SyncPeriod = pDCB->SyncPeriod; pDCBTemp->SyncOffset = pDCB->SyncOffset; pDCBTemp->SyncMode = pDCB->SyncMode; } pDCBTemp = pDCBTemp->pNextDCB; } } return; } /* * scsiiom * trm_Interrupt * * * ---SCSI bus phase * * PH_DATA_OUT 0x00 Data out phase * PH_DATA_IN 0x01 Data in phase * PH_COMMAND 0x02 Command phase * PH_STATUS 0x03 Status phase * PH_BUS_FREE 0x04 Invalid phase used as bus free * PH_BUS_FREE 0x05 Invalid phase used as bus free * PH_MSG_OUT 0x06 Message out phase * PH_MSG_IN 0x07 Message in phase * */ static void trm_Disconnect(PACB pACB) { PDCB pDCB; PSRB pSRB, psrb; u_int16_t i,j, cnt; u_int target_id,target_lun; TRM_DPRINTF("trm_Disconnect...............\n "); pDCB = pACB->pActiveDCB; if (!pDCB) { TRM_DPRINTF(" Exception Disconnect DCB=NULL..............\n "); j = 400; while (--j) DELAY(1); /* 1 msec */ trm_reg_write16((DO_CLRFIFO | DO_HWRESELECT), TRMREG_SCSI_CONTROL); return; } pSRB = pDCB->pActiveSRB; /* bug pSRB=0 */ target_id = pSRB->pccb->ccb_h.target_id; target_lun = pSRB->pccb->ccb_h.target_lun; TRM_DPRINTF(":pDCB->pActiveSRB= %8x \n ",(u_int) pDCB->pActiveSRB); pACB->pActiveDCB = 0; pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ trm_reg_write16((DO_CLRFIFO | DO_HWRESELECT), TRMREG_SCSI_CONTROL); if (pSRB->SRBState & SRB_UNEXPECT_RESEL) { pSRB->SRBState = 0; trm_DoWaitingSRB(pACB); } else if (pSRB->SRBState & SRB_ABORT_SENT) { pDCB->DCBFlag = 0; cnt = pDCB->GoingSRBCnt; pDCB->GoingSRBCnt = 0; pSRB = pDCB->pGoingSRB; for (i = 0; i < cnt; i++) { psrb = pSRB->pNextSRB; pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; pSRB = psrb; } pDCB->pGoingSRB = 0; trm_DoWaitingSRB(pACB); } else { if ((pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED))) { /* Selection time out */ if (!(pACB->scan_devices[target_id][target_lun]) && pSRB->CmdBlock[0] != 0x00 && /* TEST UNIT READY */ pSRB->CmdBlock[0] != INQUIRY) { pSRB->SRBState = SRB_READY; trm_RewaitSRB(pDCB, pSRB); } else { pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT; goto disc1; } } else if (pSRB->SRBState & SRB_DISCONNECT) { /* * SRB_DISCONNECT */ trm_DoWaitingSRB(pACB); } else if (pSRB->SRBState & SRB_COMPLETED) { disc1: /* * SRB_COMPLETED */ pDCB->pActiveSRB = 0; pSRB->SRBState = SRB_FREE; trm_SRBdone(pACB, pDCB, pSRB); } } return; } static void trm_Reselect(PACB pACB) { PDCB pDCB; PSRB pSRB; u_int16_t RselTarLunId; TRM_DPRINTF("trm_Reselect................. \n"); pDCB = pACB->pActiveDCB; if (pDCB) { /* Arbitration lost but Reselection win */ pSRB = pDCB->pActiveSRB; pSRB->SRBState = SRB_READY; trm_RewaitSRB(pDCB, pSRB); } /* Read Reselected Target Id and LUN */ RselTarLunId = trm_reg_read16(TRMREG_SCSI_TARGETID) & 0x1FFF; pDCB = pACB->pLinkDCB; while (RselTarLunId != *((u_int16_t *) &pDCB->TargetID)) { /* get pDCB of the reselect id */ pDCB = pDCB->pNextDCB; } pACB->pActiveDCB = pDCB; if (pDCB->SyncMode & EN_TAG_QUEUING) { pSRB = &pACB->TmpSRB; pDCB->pActiveSRB = pSRB; } else { pSRB = pDCB->pActiveSRB; if (!pSRB || !(pSRB->SRBState & SRB_DISCONNECT)) { /* * abort command */ pSRB = &pACB->TmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; trm_EnableMsgOutAbort1(pACB, pSRB); } else { if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; trm_EnableMsgOutAbort1(pACB, pSRB); } else pSRB->SRBState = SRB_DATA_XFER; } } pSRB->ScsiPhase = PH_BUS_FREE; /* SCSI bus free Phase */ /* * Program HA ID, target ID, period and offset */ trm_reg_write8((u_int8_t) RselTarLunId,TRMREG_SCSI_TARGETID); /* target ID */ trm_reg_write8(pACB->AdaptSCSIID,TRMREG_SCSI_HOSTID); /* host ID */ trm_reg_write8(pDCB->SyncPeriod,TRMREG_SCSI_SYNC); /* period */ trm_reg_write8(pDCB->SyncOffset,TRMREG_SCSI_OFFSET); /* offset */ trm_reg_write16(DO_DATALATCH, TRMREG_SCSI_CONTROL); /* it's important for atn stop*/ /* * SCSI cammand */ trm_reg_write8(SCMD_MSGACCEPT, TRMREG_SCSI_COMMAND); /* to rls the /ACK signal */ } static void trm_SRBdone(PACB pACB, PDCB pDCB, PSRB pSRB) { PSRB psrb; u_int8_t bval, bval1,status; union ccb *pccb; struct ccb_scsiio *pcsio; PSCSI_INQDATA ptr; int intflag; u_int target_id,target_lun; PDCB pTempDCB; pccb = pSRB->pccb; if (pccb == NULL) return; pcsio = &pccb->csio; target_id = pSRB->pccb->ccb_h.target_id; target_lun = pSRB->pccb->ccb_h.target_lun; if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(pACB->buffer_dmat, pSRB->dmamap, op); bus_dmamap_unload(pACB->buffer_dmat, pSRB->dmamap); } /* * * target status * */ status = pSRB->TargetStatus; pcsio->scsi_status=SCSI_STAT_GOOD; pccb->ccb_h.status = CAM_REQ_CMP; if (pSRB->SRBFlag & AUTO_REQSENSE) { /* * status of auto request sense */ pSRB->SRBFlag &= ~AUTO_REQSENSE; pSRB->AdaptStatus = 0; pSRB->TargetStatus = SCSI_STATUS_CHECK_COND; if (status == SCSI_STATUS_CHECK_COND) { pccb->ccb_h.status = CAM_SEL_TIMEOUT; goto ckc_e; } *((u_long *) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0]; *((u_long *) &(pSRB->CmdBlock[4])) = pSRB->Segment0[1]; pSRB->SRBTotalXferLength = pSRB->Segment1[1]; pSRB->pSRBSGL->address = pSRB->SgSenseTemp.address; pSRB->pSRBSGL->length = pSRB->SgSenseTemp.length; pcsio->scsi_status = SCSI_STATUS_CHECK_COND; bcopy(trm_get_sense_buf(pACB, pSRB), &pcsio->sense_data, pcsio->sense_len); pcsio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; goto ckc_e; } /* * target status */ if (status) { if (status == SCSI_STATUS_CHECK_COND) { if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { TRM_DPRINTF("trm_RequestSense..................\n"); trm_RequestSense(pACB, pDCB, pSRB); return; } pcsio->scsi_status = SCSI_STATUS_CHECK_COND; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; goto ckc_e; } else if (status == SCSI_STAT_QUEUEFULL) { bval = (u_int8_t) pDCB->GoingSRBCnt; bval--; pDCB->MaxActiveCommandCnt = bval; trm_RewaitSRB(pDCB, pSRB); pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; return; } else if (status == SCSI_STAT_SEL_TIMEOUT) { pSRB->AdaptStatus = H_SEL_TIMEOUT; pSRB->TargetStatus = 0; pcsio->scsi_status = SCSI_STAT_SEL_TIMEOUT; pccb->ccb_h.status = CAM_SEL_TIMEOUT; } else if (status == SCSI_STAT_BUSY) { TRM_DPRINTF("trm: target busy at %s %d\n", __FILE__, __LINE__); pcsio->scsi_status = SCSI_STAT_BUSY; pccb->ccb_h.status = CAM_SCSI_BUSY; return; /* The device busy, try again later? */ } else if (status == SCSI_STAT_RESCONFLICT) { TRM_DPRINTF("trm: target reserved at %s %d\n", __FILE__, __LINE__); pcsio->scsi_status = SCSI_STAT_RESCONFLICT; pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /*XXX*/ return; } else { pSRB->AdaptStatus = 0; if (pSRB->RetryCnt) { pSRB->RetryCnt--; pSRB->TargetStatus = 0; pSRB->SRBSGIndex = 0; if (trm_StartSCSI(pACB, pDCB, pSRB)) { /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt * disreenable * It's said that SCSI processor has more * one SRB need to do */ trm_RewaitSRB(pDCB, pSRB); } return; } else { TRM_DPRINTF("trm: driver stuffup at %s %d\n", __FILE__, __LINE__); pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } } } else { /* * process initiator status.......................... * Adapter (initiator) status */ status = pSRB->AdaptStatus; if (status & H_OVER_UNDER_RUN) { pSRB->TargetStatus = 0; pccb->ccb_h.status = CAM_DATA_RUN_ERR; /* Illegal length (over/under run) */ } else if (pSRB->SRBStatus & PARITY_ERROR) { TRM_DPRINTF("trm: driver stuffup %s %d\n", __FILE__, __LINE__); pDCB->tinfo.goal.period = 0; pDCB->tinfo.goal.offset = 0; /* Driver failed to perform operation */ pccb->ccb_h.status = CAM_UNCOR_PARITY; } else { /* no error */ pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; pccb->ccb_h.status = CAM_REQ_CMP; /* there is no error, (sense is invalid) */ } } ckc_e: if (pACB->scan_devices[target_id][target_lun]) { /* * if SCSI command in "scan devices" duty */ if (pSRB->CmdBlock[0] == TEST_UNIT_READY) pACB->scan_devices[target_id][target_lun] = 0; /* SCSI command phase :test unit ready */ else if (pSRB->CmdBlock[0] == INQUIRY) { /* * SCSI command phase :inquiry scsi device data * (type,capacity,manufacture.... */ if (pccb->ccb_h.status == CAM_SEL_TIMEOUT) goto NO_DEV; ptr = (PSCSI_INQDATA) pcsio->data_ptr; /* page fault */ TRM_DPRINTF("trm_SRBdone..PSCSI_INQDATA:%2x \n", ptr->DevType); bval1 = ptr->DevType & SCSI_DEVTYPE; if (bval1 == SCSI_NODEV) { NO_DEV: TRM_DPRINTF("trm_SRBdone NO Device:target_id= %d ,target_lun= %d \n", target_id, target_lun); intflag = splcam(); pACB->scan_devices[target_id][target_lun] = 0; /* no device set scan device flag =0*/ /* pDCB Q link */ /* move the head of DCB to tempDCB*/ pTempDCB=pACB->pLinkDCB; /* search current DCB for pass link */ while (pTempDCB->pNextDCB != pDCB) { pTempDCB = pTempDCB->pNextDCB; } /* * when the current DCB found than connect * current DCB tail */ /* to the DCB tail that before current DCB */ pTempDCB->pNextDCB = pDCB->pNextDCB; /* * if there was only one DCB ,connect his tail * to his head */ if (pACB->pLinkDCB == pDCB) pACB->pLinkDCB = pTempDCB->pNextDCB; if (pACB->pDCBRunRobin == pDCB) pACB->pDCBRunRobin = pTempDCB->pNextDCB; pDCB->DCBstatus &= ~DS_IN_QUEUE; pACB->DeviceCnt--; if (pACB->DeviceCnt == 0) { pACB->pLinkDCB = NULL; pACB->pDCBRunRobin = NULL; } splx(intflag); } else { #ifdef trm_DEBUG1 int j; for (j = 0; j < 28; j++) { TRM_DPRINTF("ptr=%2x ", ((u_int8_t *)ptr)[j]); } #endif pDCB->DevType = bval1; if (bval1 == SCSI_DASD || bval1 == SCSI_OPTICAL) { if ((((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) && (ptr->Flags & SCSI_INQ_CMDQUEUE) && (pDCB->DevMode & TAG_QUEUING_) && (pDCB->DevMode & EN_DISCONNECT_)) { if (pDCB->DevMode & TAG_QUEUING_) { pDCB-> MaxActiveCommandCnt = pACB->TagMaxNum; pDCB->SyncMode |= EN_TAG_QUEUING; pDCB->tinfo.disc_tag |= TRM_CUR_TAGENB; } else { pDCB->SyncMode |= EN_ATN_STOP; pDCB->tinfo.disc_tag &= ~TRM_CUR_TAGENB; } } } } /* pSRB->CmdBlock[0] == INQUIRY */ } /* pACB->scan_devices[target_id][target_lun] */ } intflag = splcam(); /* ReleaseSRB(pDCB, pSRB); */ if (pSRB == pDCB->pGoingSRB) pDCB->pGoingSRB = pSRB->pNextSRB; else { psrb = pDCB->pGoingSRB; while (psrb->pNextSRB != pSRB) { psrb = psrb->pNextSRB; } psrb->pNextSRB = pSRB->pNextSRB; if (pSRB == pDCB->pGoingLastSRB) { pDCB->pGoingLastSRB = psrb; } } pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; pDCB->GoingSRBCnt--; trm_DoWaitingSRB(pACB); splx(intflag); /* Notify cmd done */ xpt_done (pccb); } static void trm_DoingSRB_Done(PACB pACB) { PDCB pDCB, pdcb; PSRB psrb, psrb2; u_int16_t cnt, i; union ccb *pccb; pDCB = pACB->pLinkDCB; if (pDCB == NULL) return; pdcb = pDCB; do { cnt = pdcb->GoingSRBCnt; psrb = pdcb->pGoingSRB; for (i = 0; i < cnt; i++) { psrb2 = psrb->pNextSRB; pccb = psrb->pccb; pccb->ccb_h.status = CAM_SEL_TIMEOUT; /* ReleaseSRB(pDCB, pSRB); */ psrb->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = psrb; xpt_done(pccb); psrb = psrb2; } pdcb->GoingSRBCnt = 0; pdcb->pGoingSRB = NULL; pdcb = pdcb->pNextDCB; } while (pdcb != pDCB); } static void trm_ResetSCSIBus(PACB pACB) { int intflag; intflag = splcam(); pACB->ACBFlag |= RESET_DEV; trm_reg_write16(DO_RSTSCSI,TRMREG_SCSI_CONTROL); while (!(trm_reg_read16(TRMREG_SCSI_INTSTATUS) & INT_SCSIRESET)); splx(intflag); return; } static void trm_ScsiRstDetect(PACB pACB) { int intflag; u_long wlval; TRM_DPRINTF("trm_ScsiRstDetect \n"); wlval = 1000; while (--wlval) DELAY(1000); intflag = splcam(); trm_reg_write8(STOPDMAXFER,TRMREG_DMA_CONTROL); trm_reg_write16(DO_CLRFIFO,TRMREG_SCSI_CONTROL); if (pACB->ACBFlag & RESET_DEV) pACB->ACBFlag |= RESET_DONE; else { pACB->ACBFlag |= RESET_DETECT; trm_ResetDevParam(pACB); /* trm_DoingSRB_Done(pACB); ???? */ trm_RecoverSRB(pACB); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0; trm_DoWaitingSRB(pACB); } splx(intflag); return; } static void trm_RequestSense(PACB pACB, PDCB pDCB, PSRB pSRB) { union ccb *pccb; struct ccb_scsiio *pcsio; pccb = pSRB->pccb; pcsio = &pccb->csio; pSRB->SRBFlag |= AUTO_REQSENSE; pSRB->Segment0[0] = *((u_long *) &(pSRB->CmdBlock[0])); pSRB->Segment0[1] = *((u_long *) &(pSRB->CmdBlock[4])); pSRB->Segment1[0] = (u_long) ((pSRB->ScsiCmdLen << 8) + pSRB->SRBSGCount); pSRB->Segment1[1] = pSRB->SRBTotalXferLength; /* ?????????? */ /* $$$$$$ Status of initiator/target $$$$$$$$ */ pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; /* $$$$$$ Status of initiator/target $$$$$$$$ */ pSRB->SRBTotalXferLength = sizeof(pcsio->sense_data); pSRB->SgSenseTemp.address = pSRB->pSRBSGL->address; pSRB->SgSenseTemp.length = pSRB->pSRBSGL->length; pSRB->pSRBSGL->address = trm_get_sense_bufaddr(pACB, pSRB); pSRB->pSRBSGL->length = (u_long) sizeof(struct scsi_sense_data); pSRB->SRBSGCount = 1; pSRB->SRBSGIndex = 0; *((u_long *) &(pSRB->CmdBlock[0])) = 0x00000003; pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5; *((u_int16_t *) &(pSRB->CmdBlock[4])) = pcsio->sense_len; pSRB->ScsiCmdLen = 6; if (trm_StartSCSI(pACB, pDCB, pSRB)) /* * If trm_StartSCSI return 1 : * current interrupt status is interrupt disreenable * It's said that SCSI processor has more one SRB need to do */ trm_RewaitSRB(pDCB, pSRB); } static void trm_EnableMsgOutAbort2(PACB pACB, PSRB pSRB) { pSRB->MsgCnt = 1; trm_reg_write16(DO_SETATN, TRMREG_SCSI_CONTROL); } static void trm_EnableMsgOutAbort1(PACB pACB, PSRB pSRB) { pSRB->MsgOutBuf[0] = MSG_ABORT; trm_EnableMsgOutAbort2(pACB, pSRB); } static void trm_initDCB(PACB pACB, PDCB pDCB, u_int16_t unit,u_int32_t i,u_int32_t j) { PNVRAMTYPE pEEpromBuf; u_int8_t bval,PeriodIndex; u_int target_id,target_lun; PDCB pTempDCB; int intflag; target_id = i; target_lun = j; /* * Using the lun 0 device to init other DCB first, if the device * has been initialized. * I don't want init sync arguments one by one, it is the same. */ if (target_lun != 0 && (pACB->DCBarray[target_id][0].DCBstatus & DS_IN_QUEUE)) bcopy(&pACB->DCBarray[target_id][0], pDCB, sizeof(TRM_DCB)); intflag = splcam(); if (pACB->pLinkDCB == 0) { pACB->pLinkDCB = pDCB; /* * RunRobin impersonate the role * that let each device had good proportion * about SCSI command proceeding */ pACB->pDCBRunRobin = pDCB; pDCB->pNextDCB = pDCB; } else { pTempDCB=pACB->pLinkDCB; /* search the last nod of DCB link */ while (pTempDCB->pNextDCB != pACB->pLinkDCB) pTempDCB = pTempDCB->pNextDCB; /* connect current DCB with last DCB tail */ pTempDCB->pNextDCB = pDCB; /* connect current DCB tail to this DCB Q head */ pDCB->pNextDCB=pACB->pLinkDCB; } splx(intflag); pACB->DeviceCnt++; pDCB->TargetID = target_id; pDCB->TargetLUN = target_lun; pDCB->pWaitingSRB = NULL; pDCB->pGoingSRB = NULL; pDCB->GoingSRBCnt = 0; pDCB->pActiveSRB = NULL; pDCB->MaxActiveCommandCnt = 1; pDCB->DCBFlag = 0; pDCB->DCBstatus |= DS_IN_QUEUE; /* $$$$$$$ */ pEEpromBuf = &trm_eepromBuf[unit]; pDCB->DevMode = pEEpromBuf->NvramTarget[target_id].NvmTarCfg0; pDCB->AdpMode = pEEpromBuf->NvramChannelCfg; /* $$$$$$$ */ /* * disconnect enable ? */ if (pDCB->DevMode & NTC_DO_DISCONNECT) { bval = 0xC0; pDCB->tinfo.disc_tag |= TRM_USR_DISCENB ; } else { bval = 0x80; pDCB->tinfo.disc_tag &= ~(TRM_USR_DISCENB); } bval |= target_lun; pDCB->IdentifyMsg = bval; if (target_lun != 0 && (pACB->DCBarray[target_id][0].DCBstatus & DS_IN_QUEUE)) return; /* $$$$$$$ */ /* * tag Qing enable ? */ if (pDCB->DevMode & TAG_QUEUING_) { pDCB->tinfo.disc_tag |= TRM_USR_TAGENB ; } else pDCB->tinfo.disc_tag &= ~(TRM_USR_TAGENB); /* $$$$$$$ */ /* * wide nego ,sync nego enable ? */ pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; PeriodIndex = pEEpromBuf->NvramTarget[target_id].NvmTarPeriod & 0x07; if (pACB->AdaptType==1) {/* is U2? */ pDCB->MaxNegoPeriod=dc395u2x_clock_period[ PeriodIndex ]; pDCB->tinfo.user.period=pDCB->MaxNegoPeriod; pDCB->tinfo.user.offset=(pDCB->SyncMode & SYNC_NEGO_ENABLE) ? 31 : 0; } else { pDCB->MaxNegoPeriod=dc395x_clock_period[ PeriodIndex ]; pDCB->tinfo.user.period=pDCB->MaxNegoPeriod; pDCB->tinfo.user.offset=(pDCB->SyncMode & SYNC_NEGO_ENABLE) ? 15 : 0; } pDCB->SyncMode = 0; if ((pDCB->DevMode & NTC_DO_WIDE_NEGO) && (pACB->Config & HCC_WIDE_CARD)) pDCB->SyncMode |= WIDE_NEGO_ENABLE; /* enable wide nego */ if (pDCB->DevMode & NTC_DO_SYNC_NEGO) pDCB->SyncMode |= SYNC_NEGO_ENABLE; /* enable sync nego */ /* $$$$$$$ */ /* * Fill in tinfo structure. */ pDCB->tinfo.user.width = (pDCB->SyncMode & WIDE_NEGO_ENABLE) ? MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT; pDCB->tinfo.current.period = 0; pDCB->tinfo.current.offset = 0; pDCB->tinfo.current.width = MSG_EXT_WDTR_BUS_8_BIT; } static void trm_srbmapSG(void *arg, bus_dma_segment_t *segs, int nseg, int error) { PSRB pSRB; pSRB=(PSRB) arg; pSRB->SRBSGPhyAddr=segs->ds_addr; return; } static void trm_destroySRB(PACB pACB) { PSRB pSRB; pSRB = pACB->pFreeSRB; while (pSRB) { if (pSRB->SRBSGPhyAddr) bus_dmamap_unload(pACB->sg_dmat, pSRB->sg_dmamap); if (pSRB->pSRBSGL) bus_dmamem_free(pACB->sg_dmat, pSRB->pSRBSGL, pSRB->sg_dmamap); if (pSRB->dmamap) bus_dmamap_destroy(pACB->buffer_dmat, pSRB->dmamap); pSRB = pSRB->pNextSRB; } } static int trm_initSRB(PACB pACB) { u_int16_t i; PSRB pSRB; int error; for (i = 0; i < TRM_MAX_SRB_CNT; i++) { pSRB = (PSRB)&pACB->pFreeSRB[i]; if (bus_dmamem_alloc(pACB->sg_dmat, (void **)&pSRB->pSRBSGL, BUS_DMA_NOWAIT, &pSRB->sg_dmamap) !=0 ) { return ENXIO; } bus_dmamap_load(pACB->sg_dmat, pSRB->sg_dmamap, pSRB->pSRBSGL, TRM_MAX_SG_LISTENTRY * sizeof(SGentry), trm_srbmapSG, pSRB, /*flags*/0); if (i != TRM_MAX_SRB_CNT - 1) { /* * link all SRB */ pSRB->pNextSRB = &pACB->pFreeSRB[i+1]; } else { /* * load NULL to NextSRB of the last SRB */ pSRB->pNextSRB = NULL; } pSRB->TagNumber = i; /* * Create the dmamap. This is no longer optional! */ if ((error = bus_dmamap_create(pACB->buffer_dmat, 0, &pSRB->dmamap)) != 0) return (error); } return (0); } static void trm_initACB(PACB pACB, u_int8_t adaptType, u_int16_t unit) { PNVRAMTYPE pEEpromBuf; pEEpromBuf = &trm_eepromBuf[unit]; pACB->max_id = 15; if (pEEpromBuf->NvramChannelCfg & NAC_SCANLUN) pACB->max_lun = 7; else pACB->max_lun = 0; TRM_DPRINTF("trm: pACB->max_id= %d pACB->max_lun= %d \n", pACB->max_id, pACB->max_lun); pACB->pLinkDCB = NULL; pACB->pDCBRunRobin = NULL; pACB->pActiveDCB = NULL; pACB->AdapterUnit = (u_int8_t)unit; pACB->AdaptSCSIID = pEEpromBuf->NvramScsiId; pACB->AdaptSCSILUN = 0; pACB->DeviceCnt = 0; pACB->AdaptType = adaptType; pACB->TagMaxNum = 2 << pEEpromBuf->NvramMaxTag; pACB->ACBFlag = 0; return; } static void NVRAM_trm_write_all(PNVRAMTYPE pEEpromBuf,PACB pACB) { u_int8_t *bpEeprom = (u_int8_t *) pEEpromBuf; u_int8_t bAddr; /* Enable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) | EN_EEPROM), TRMREG_GEN_CONTROL); /* * Write enable */ NVRAM_trm_write_cmd(pACB, 0x04, 0xFF); trm_reg_write8(0, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); for (bAddr = 0; bAddr < 128; bAddr++, bpEeprom++) { NVRAM_trm_set_data(pACB, bAddr, *bpEeprom); } /* * Write disable */ NVRAM_trm_write_cmd(pACB, 0x04, 0x00); trm_reg_write8(0 , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* Disable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) & ~EN_EEPROM), TRMREG_GEN_CONTROL); return; } static void NVRAM_trm_set_data(PACB pACB, u_int8_t bAddr, u_int8_t bData) { int i; u_int8_t bSendData; /* * Send write command & address */ NVRAM_trm_write_cmd(pACB, 0x05, bAddr); /* * Write data */ for (i = 0; i < 8; i++, bData <<= 1) { bSendData = NVR_SELECT; if (bData & 0x80) /* Start from bit 7 */ bSendData |= NVR_BITOUT; trm_reg_write8(bSendData , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } trm_reg_write8(NVR_SELECT , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* * Disable chip select */ trm_reg_write8(0 , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT ,TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); /* * Wait for write ready */ while (1) { trm_reg_write8((NVR_SELECT | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); if (trm_reg_read8(TRMREG_GEN_NVRAM) & NVR_BITIN) { break; } } /* * Disable chip select */ trm_reg_write8(0, TRMREG_GEN_NVRAM); return; } static void NVRAM_trm_read_all(PNVRAMTYPE pEEpromBuf, PACB pACB) { u_int8_t *bpEeprom = (u_int8_t*) pEEpromBuf; u_int8_t bAddr; /* * Enable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) | EN_EEPROM), TRMREG_GEN_CONTROL); for (bAddr = 0; bAddr < 128; bAddr++, bpEeprom++) *bpEeprom = NVRAM_trm_get_data(pACB, bAddr); /* * Disable SEEPROM */ trm_reg_write8((trm_reg_read8(TRMREG_GEN_CONTROL) & ~EN_EEPROM), TRMREG_GEN_CONTROL); return; } static u_int8_t NVRAM_trm_get_data(PACB pACB, u_int8_t bAddr) { int i; u_int8_t bReadData, bData = 0; /* * Send read command & address */ NVRAM_trm_write_cmd(pACB, 0x06, bAddr); for (i = 0; i < 8; i++) { /* * Read data */ trm_reg_write8((NVR_SELECT | NVR_CLOCK) , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8(NVR_SELECT , TRMREG_GEN_NVRAM); /* * Get data bit while falling edge */ bReadData = trm_reg_read8(TRMREG_GEN_NVRAM); bData <<= 1; if (bReadData & NVR_BITIN) { bData |= 1; } NVRAM_trm_wait_30us(pACB); } /* * Disable chip select */ trm_reg_write8(0, TRMREG_GEN_NVRAM); return (bData); } static void NVRAM_trm_wait_30us(PACB pACB) { /* ScsiPortStallExecution(30); wait 30 us */ trm_reg_write8(5, TRMREG_GEN_TIMER); while (!(trm_reg_read8(TRMREG_GEN_STATUS) & GTIMEOUT)); return; } static void NVRAM_trm_write_cmd(PACB pACB, u_int8_t bCmd, u_int8_t bAddr) { int i; u_int8_t bSendData; for (i = 0; i < 3; i++, bCmd <<= 1) { /* * Program SB+OP code */ bSendData = NVR_SELECT; if (bCmd & 0x04) bSendData |= NVR_BITOUT; /* start from bit 2 */ trm_reg_write8(bSendData, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } for (i = 0; i < 7; i++, bAddr <<= 1) { /* * Program address */ bSendData = NVR_SELECT; if (bAddr & 0x40) /* Start from bit 6 */ bSendData |= NVR_BITOUT; trm_reg_write8(bSendData , TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); trm_reg_write8((bSendData | NVR_CLOCK), TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } trm_reg_write8(NVR_SELECT, TRMREG_GEN_NVRAM); NVRAM_trm_wait_30us(pACB); } static void trm_check_eeprom(PNVRAMTYPE pEEpromBuf, PACB pACB) { u_int16_t *wpEeprom = (u_int16_t *) pEEpromBuf; u_int16_t wAddr, wCheckSum; u_long dAddr, *dpEeprom; NVRAM_trm_read_all(pEEpromBuf,pACB); wCheckSum = 0; for (wAddr = 0, wpEeprom = (u_int16_t *) pEEpromBuf; wAddr < 64; wAddr++, wpEeprom++) { wCheckSum += *wpEeprom; } if (wCheckSum != 0x1234) { /* * Checksum error, load default */ pEEpromBuf->NvramSubVendorID[0] = (u_int8_t) PCI_Vendor_ID_TEKRAM; pEEpromBuf->NvramSubVendorID[1] = (u_int8_t) (PCI_Vendor_ID_TEKRAM >> 8); pEEpromBuf->NvramSubSysID[0] = (u_int8_t) PCI_Device_ID_TRM_S1040; pEEpromBuf->NvramSubSysID[1] = (u_int8_t) (PCI_Device_ID_TRM_S1040 >> 8); pEEpromBuf->NvramSubClass = 0x00; pEEpromBuf->NvramVendorID[0] = (u_int8_t) PCI_Vendor_ID_TEKRAM; pEEpromBuf->NvramVendorID[1] = (u_int8_t) (PCI_Vendor_ID_TEKRAM >> 8); pEEpromBuf->NvramDeviceID[0] = (u_int8_t) PCI_Device_ID_TRM_S1040; pEEpromBuf->NvramDeviceID[1] = (u_int8_t) (PCI_Device_ID_TRM_S1040 >> 8); pEEpromBuf->NvramReserved = 0x00; for (dAddr = 0, dpEeprom = (u_long *) pEEpromBuf->NvramTarget; dAddr < 16; dAddr++, dpEeprom++) { *dpEeprom = 0x00000077; /* NvmTarCfg3,NvmTarCfg2,NvmTarPeriod,NvmTarCfg0 */ } *dpEeprom++ = 0x04000F07; /* NvramMaxTag,NvramDelayTime,NvramChannelCfg,NvramScsiId */ *dpEeprom++ = 0x00000015; /* NvramReserved1,NvramBootLun,NvramBootTarget,NvramReserved0 */ for (dAddr = 0; dAddr < 12; dAddr++, dpEeprom++) *dpEeprom = 0x00; pEEpromBuf->NvramCheckSum = 0x00; for (wAddr = 0, wCheckSum = 0, wpEeprom = (u_int16_t *) pEEpromBuf; wAddr < 63; wAddr++, wpEeprom++) wCheckSum += *wpEeprom; *wpEeprom = 0x1234 - wCheckSum; NVRAM_trm_write_all(pEEpromBuf,pACB); } return; } static int trm_initAdapter(PACB pACB, u_int16_t unit) { PNVRAMTYPE pEEpromBuf; u_int16_t wval; u_int8_t bval; pEEpromBuf = &trm_eepromBuf[unit]; /* 250ms selection timeout */ trm_reg_write8(SEL_TIMEOUT, TRMREG_SCSI_TIMEOUT); /* Mask all the interrupt */ trm_reg_write8(0x00, TRMREG_DMA_INTEN); trm_reg_write8(0x00, TRMREG_SCSI_INTEN); /* Reset SCSI module */ trm_reg_write16(DO_RSTMODULE, TRMREG_SCSI_CONTROL); /* program configuration 0 */ pACB->Config = HCC_AUTOTERM | HCC_PARITY; if (trm_reg_read8(TRMREG_GEN_STATUS) & WIDESCSI) pACB->Config |= HCC_WIDE_CARD; if (pEEpromBuf->NvramChannelCfg & NAC_POWERON_SCSI_RESET) pACB->Config |= HCC_SCSI_RESET; if (pACB->Config & HCC_PARITY) bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK; else bval = PHASELATCH | INITIATOR | BLOCKRST ; trm_reg_write8(bval,TRMREG_SCSI_CONFIG0); /* program configuration 1 */ trm_reg_write8(0x13, TRMREG_SCSI_CONFIG1); /* program Host ID */ bval = pEEpromBuf->NvramScsiId; trm_reg_write8(bval, TRMREG_SCSI_HOSTID); /* set ansynchronous transfer */ trm_reg_write8(0x00, TRMREG_SCSI_OFFSET); /* Trun LED control off*/ wval = trm_reg_read16(TRMREG_GEN_CONTROL) & 0x7F; trm_reg_write16(wval, TRMREG_GEN_CONTROL); /* DMA config */ wval = trm_reg_read16(TRMREG_DMA_CONFIG) | DMA_ENHANCE; trm_reg_write16(wval, TRMREG_DMA_CONFIG); /* Clear pending interrupt status */ trm_reg_read8(TRMREG_SCSI_INTSTATUS); /* Enable SCSI interrupt */ trm_reg_write8(0x7F, TRMREG_SCSI_INTEN); trm_reg_write8(EN_SCSIINTR, TRMREG_DMA_INTEN); return (0); } static void trm_mapSRB(void *arg, bus_dma_segment_t *segs, int nseg, int error) { PACB pACB; pACB = (PACB)arg; pACB->srb_physbase = segs->ds_addr; } static void trm_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static PACB trm_init(u_int16_t unit, device_t dev) { PACB pACB; int rid = PCIR_BAR(0), i = 0, j = 0; u_int16_t adaptType = 0; pACB = (PACB) device_get_softc(dev); if (!pACB) { printf("trm%d: cannot allocate ACB !\n", unit); return (NULL); } pACB->iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (pACB->iores == NULL) { printf("trm_init: bus_alloc_resource failed!\n"); return (NULL); } switch (pci_get_devid(dev)) { case PCI_DEVICEID_TRMS1040: adaptType = 0; break; case PCI_DEVICEID_TRMS2080: adaptType = 1; break; default: printf("trm_init %d: unknown adapter type!\n", unit); goto bad; } pACB->dev = dev; pACB->tag = rman_get_bustag(pACB->iores); pACB->bsh = rman_get_bushandle(pACB->iores); if (bus_dma_tag_create( /*parent_dmat*/ bus_get_dma_tag(dev), /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/ BUS_SPACE_UNRESTRICTED, /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, /*flags*/ 0, /*lockfunc*/ NULL, /*lockarg*/ NULL, /* dmat */ &pACB->parent_dmat) != 0) goto bad; if (bus_dma_tag_create( /*parent_dmat*/ pACB->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ TRM_MAXPHYS, /*nsegments*/ TRM_NSEG, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ BUS_DMA_ALLOCNOW, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /* dmat */ &pACB->buffer_dmat) != 0) goto bad; /* DMA tag for our ccb structures */ if (bus_dma_tag_create( /*parent_dmat*/pACB->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ TRM_MAX_SRB_CNT * sizeof(TRM_SRB), /*nsegments*/ 1, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ 0, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /*dmat*/ &pACB->srb_dmat) != 0) { printf("trm_init %d: bus_dma_tag_create SRB failure\n", unit); goto bad; } if (bus_dmamem_alloc(pACB->srb_dmat, (void **)&pACB->pFreeSRB, BUS_DMA_NOWAIT, &pACB->srb_dmamap) != 0) { printf("trm_init %d: bus_dmamem_alloc SRB failure\n", unit); goto bad; } bus_dmamap_load(pACB->srb_dmat, pACB->srb_dmamap, pACB->pFreeSRB, TRM_MAX_SRB_CNT * sizeof(TRM_SRB), trm_mapSRB, pACB, /* flags */0); /* Create, allocate, and map DMA buffers for autosense data */ if (bus_dma_tag_create( /*parent_dmat*/pACB->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, sizeof(struct scsi_sense_data) * TRM_MAX_SRB_CNT, /*nsegments*/1, /*maxsegsz*/TRM_MAXTRANSFER_SIZE, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &pACB->sense_dmat) != 0) { if (bootverbose) device_printf(dev, "cannot create sense buffer dmat\n"); goto bad; } if (bus_dmamem_alloc(pACB->sense_dmat, (void **)&pACB->sense_buffers, BUS_DMA_NOWAIT, &pACB->sense_dmamap) != 0) goto bad; bus_dmamap_load(pACB->sense_dmat, pACB->sense_dmamap, pACB->sense_buffers, sizeof(struct scsi_sense_data) * TRM_MAX_SRB_CNT, trm_dmamap_cb, &pACB->sense_busaddr, /*flags*/0); trm_check_eeprom(&trm_eepromBuf[unit],pACB); trm_initACB(pACB, adaptType, unit); for (i = 0; i < (pACB->max_id + 1); i++) { if (pACB->AdaptSCSIID == i) continue; for(j = 0; j < (pACB->max_lun + 1); j++) { pACB->scan_devices[i][j] = 1; /* we assume we need to scan all devices */ trm_initDCB(pACB, &pACB->DCBarray[i][j], unit, i, j); } } bzero(pACB->pFreeSRB, TRM_MAX_SRB_CNT * sizeof(TRM_SRB)); if (bus_dma_tag_create( /*parent_dmat*/pACB->parent_dmat, /*alignment*/ 1, /*boundary*/ 0, /*lowaddr*/ BUS_SPACE_MAXADDR, /*highaddr*/ BUS_SPACE_MAXADDR, /*filter*/ NULL, /*filterarg*/ NULL, /*maxsize*/ TRM_MAX_SG_LISTENTRY * sizeof(SGentry), /*nsegments*/ 1, /*maxsegsz*/ TRM_MAXTRANSFER_SIZE, /*flags*/ 0, /*lockfunc*/ busdma_lock_mutex, /*lockarg*/ &Giant, /*dmat*/ &pACB->sg_dmat) != 0) goto bad; if (trm_initSRB(pACB)) { printf("trm_initSRB: error\n"); goto bad; } if (trm_initAdapter(pACB, unit)) { printf("trm_initAdapter: initial ERROR\n"); goto bad; } return (pACB); bad: if (pACB->iores) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); if (pACB->sense_dmamap) { bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); } if (pACB->sense_dmat) bus_dma_tag_destroy(pACB->sense_dmat); if (pACB->sg_dmat) { trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); } if (pACB->pFreeSRB) { bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); } if (pACB->srb_dmat) bus_dma_tag_destroy(pACB->srb_dmat); if (pACB->buffer_dmat) bus_dma_tag_destroy(pACB->buffer_dmat); if (pACB->parent_dmat) bus_dma_tag_destroy(pACB->parent_dmat); return (NULL); } static int trm_attach(device_t dev) { struct cam_devq *device_Q; u_long device_id; PACB pACB = 0; int rid = 0; int unit = device_get_unit(dev); device_id = pci_get_devid(dev); /* * These cards do not allow memory mapped accesses */ if ((pACB = trm_init((u_int16_t) unit, dev)) == NULL) { printf("trm%d: trm_init error!\n",unit); return (ENXIO); } /* After setting up the adapter, map our interrupt */ /* * Now let the CAM generic SCSI layer find the SCSI devices on the bus * start queue to reset to the idle loop. * Create device queue of SIM(s) * (MAX_START_JOB - 1) : max_sim_transactions */ pACB->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (pACB->irq == NULL || bus_setup_intr(dev, pACB->irq, INTR_TYPE_CAM, NULL, trm_Interrupt, pACB, &pACB->ih)) { printf("trm%d: register Interrupt handler error!\n", unit); goto bad; } device_Q = cam_simq_alloc(TRM_MAX_START_JOB); if (device_Q == NULL){ printf("trm%d: device_Q == NULL !\n",unit); goto bad; } /* * Now tell the generic SCSI layer * about our bus. * If this is the xpt layer creating a sim, then it's OK * to wait for an allocation. * XXX Should we pass in a flag to indicate that wait is OK? * * SIM allocation * * SCSI Interface Modules * The sim driver creates a sim for each controller. The sim device * queue is separately created in order to allow resource sharing betwee * sims. For instance, a driver may create one sim for each channel of * a multi-channel controller and use the same queue for each channel. * In this way, the queue resources are shared across all the channels * of the multi-channel controller. * trm_action : sim_action_func * trm_poll : sim_poll_func * "trm" : sim_name ,if sim_name = "xpt" ..M_DEVBUF,M_WAITOK * pACB : *softc if sim_name <> "xpt" ..M_DEVBUF,M_NOWAIT * pACB->unit : unit * 1 : max_dev_transactions * MAX_TAGS : max_tagged_dev_transactions * * *******Construct our first channel SIM entry */ pACB->psim = cam_sim_alloc(trm_action, trm_poll, "trm", pACB, unit, &Giant, 1, TRM_MAX_TAGS_CMD_QUEUE, device_Q); if (pACB->psim == NULL) { printf("trm%d: SIM allocate fault !\n",unit); cam_simq_free(device_Q); /* SIM allocate fault*/ goto bad; } if (xpt_bus_register(pACB->psim, dev, 0) != CAM_SUCCESS) { printf("trm%d: xpt_bus_register fault !\n",unit); goto bad; } if (xpt_create_path(&pACB->ppath, NULL, cam_sim_path(pACB->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("trm%d: xpt_create_path fault !\n",unit); xpt_bus_deregister(cam_sim_path(pACB->psim)); goto bad; } return (0); bad: if (pACB->iores) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); if (pACB->sg_dmat) { trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); } if (pACB->pFreeSRB) { bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); } if (pACB->srb_dmat) bus_dma_tag_destroy(pACB->srb_dmat); if (pACB->sense_buffers) { bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); } if (pACB->sense_dmat) bus_dma_tag_destroy(pACB->sense_dmat); if (pACB->buffer_dmat) bus_dma_tag_destroy(pACB->buffer_dmat); if (pACB->ih) bus_teardown_intr(dev, pACB->irq, pACB->ih); if (pACB->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, pACB->irq); if (pACB->psim) cam_sim_free(pACB->psim, TRUE); return (ENXIO); } /* * pci_device * trm_probe (device_t tag, pcidi_t type) * */ static int trm_probe(device_t dev) { switch (pci_get_devid(dev)) { case PCI_DEVICEID_TRMS1040: device_set_desc(dev, "Tekram DC395U/UW/F DC315/U Fast20 Wide SCSI Adapter"); return (BUS_PROBE_DEFAULT); case PCI_DEVICEID_TRMS2080: device_set_desc(dev, "Tekram DC395U2D/U2W Fast40 Wide SCSI Adapter"); return (BUS_PROBE_DEFAULT); default: return (ENXIO); } } static int trm_detach(device_t dev) { PACB pACB = device_get_softc(dev); bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), pACB->iores); trm_destroySRB(pACB); bus_dma_tag_destroy(pACB->sg_dmat); bus_dmamap_unload(pACB->srb_dmat, pACB->srb_dmamap); bus_dmamem_free(pACB->srb_dmat, pACB->pFreeSRB, pACB->srb_dmamap); bus_dma_tag_destroy(pACB->srb_dmat); bus_dmamap_unload(pACB->sense_dmat, pACB->sense_dmamap); bus_dmamem_free(pACB->sense_dmat, pACB->sense_buffers, pACB->sense_dmamap); bus_dma_tag_destroy(pACB->sense_dmat); bus_dma_tag_destroy(pACB->buffer_dmat); bus_teardown_intr(dev, pACB->irq, pACB->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, pACB->irq); xpt_async(AC_LOST_DEVICE, pACB->ppath, NULL); xpt_free_path(pACB->ppath); xpt_bus_deregister(cam_sim_path(pACB->psim)); cam_sim_free(pACB->psim, TRUE); return (0); } static device_method_t trm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, trm_probe), DEVMETHOD(device_attach, trm_attach), DEVMETHOD(device_detach, trm_detach), { 0, 0 } }; static driver_t trm_driver = { "trm", trm_methods, sizeof(struct _ACB) }; static devclass_t trm_devclass; DRIVER_MODULE(trm, pci, trm_driver, trm_devclass, 0, 0); MODULE_DEPEND(trm, pci, 1, 1, 1); MODULE_DEPEND(trm, cam, 1, 1, 1); Index: stable/11 =================================================================== --- stable/11 (revision 314380) +++ stable/11 (revision 314381) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r313949