Index: head/sys/dev/mpt/mpt.c =================================================================== --- head/sys/dev/mpt/mpt.c (revision 159918) +++ head/sys/dev/mpt/mpt.c (revision 159919) @@ -1,2545 +1,2547 @@ /*- * Generic routines for LSI Fusion adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include /* XXX For static handler registration */ #include /* XXX For static handler registration */ #include #include #include #include #include #define MPT_MAX_TRYS 3 #define MPT_MAX_WAIT 300000 static int maxwait_ack = 0; static int maxwait_int = 0; static int maxwait_state = 0; TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; static mpt_reply_handler_t mpt_default_reply_handler; static mpt_reply_handler_t mpt_config_reply_handler; static mpt_reply_handler_t mpt_handshake_reply_handler; static mpt_reply_handler_t mpt_event_reply_handler; static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); static int mpt_soft_reset(struct mpt_softc *mpt); static void mpt_hard_reset(struct mpt_softc *mpt); static int mpt_configure_ioc(struct mpt_softc *mpt); static int mpt_enable_ioc(struct mpt_softc *mpt, int); /************************* Personality Module Support *************************/ /* * We include one extra entry that is guaranteed to be NULL * to simplify our itterator. */ static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; static __inline struct mpt_personality* mpt_pers_find(struct mpt_softc *, u_int); static __inline struct mpt_personality* mpt_pers_find_reverse(struct mpt_softc *, u_int); static __inline struct mpt_personality * mpt_pers_find(struct mpt_softc *mpt, u_int start_at) { KASSERT(start_at <= MPT_MAX_PERSONALITIES, ("mpt_pers_find: starting position out of range\n")); while (start_at < MPT_MAX_PERSONALITIES && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { start_at++; } return (mpt_personalities[start_at]); } /* * Used infrequently, so no need to optimize like a forward * traversal where we use the MAX+1 is guaranteed to be NULL * trick. */ static __inline struct mpt_personality * mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) { while (start_at < MPT_MAX_PERSONALITIES && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { start_at--; } if (start_at < MPT_MAX_PERSONALITIES) return (mpt_personalities[start_at]); return (NULL); } #define MPT_PERS_FOREACH(mpt, pers) \ for (pers = mpt_pers_find(mpt, /*start_at*/0); \ pers != NULL; \ pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ pers != NULL; \ pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) static mpt_load_handler_t mpt_stdload; static mpt_probe_handler_t mpt_stdprobe; static mpt_attach_handler_t mpt_stdattach; static mpt_enable_handler_t mpt_stdenable; static mpt_event_handler_t mpt_stdevent; static mpt_reset_handler_t mpt_stdreset; static mpt_shutdown_handler_t mpt_stdshutdown; static mpt_detach_handler_t mpt_stddetach; static mpt_unload_handler_t mpt_stdunload; static struct mpt_personality mpt_default_personality = { .load = mpt_stdload, .probe = mpt_stdprobe, .attach = mpt_stdattach, .enable = mpt_stdenable, .event = mpt_stdevent, .reset = mpt_stdreset, .shutdown = mpt_stdshutdown, .detach = mpt_stddetach, .unload = mpt_stdunload }; static mpt_load_handler_t mpt_core_load; static mpt_attach_handler_t mpt_core_attach; static mpt_enable_handler_t mpt_core_enable; static mpt_reset_handler_t mpt_core_ioc_reset; static mpt_event_handler_t mpt_core_event; static mpt_shutdown_handler_t mpt_core_shutdown; static mpt_shutdown_handler_t mpt_core_detach; static mpt_unload_handler_t mpt_core_unload; static struct mpt_personality mpt_core_personality = { .name = "mpt_core", .load = mpt_core_load, .attach = mpt_core_attach, .enable = mpt_core_enable, .event = mpt_core_event, .reset = mpt_core_ioc_reset, .shutdown = mpt_core_shutdown, .detach = mpt_core_detach, .unload = mpt_core_unload, }; /* * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need * ordering information. We want the core to always register FIRST. * other modules are set to SI_ORDER_SECOND. */ static moduledata_t mpt_core_mod = { "mpt_core", mpt_modevent, &mpt_core_personality }; DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(mpt_core, 1); #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) int mpt_modevent(module_t mod, int type, void *data) { struct mpt_personality *pers; int error; pers = (struct mpt_personality *)data; error = 0; switch (type) { case MOD_LOAD: { mpt_load_handler_t **def_handler; mpt_load_handler_t **pers_handler; int i; for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { if (mpt_personalities[i] == NULL) break; } if (i >= MPT_MAX_PERSONALITIES) { error = ENOMEM; break; } pers->id = i; mpt_personalities[i] = pers; /* Install standard/noop handlers for any NULL entries. */ def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); pers_handler = MPT_PERS_FIRST_HANDLER(pers); while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { if (*pers_handler == NULL) *pers_handler = *def_handler; pers_handler++; def_handler++; } error = (pers->load(pers)); if (error != 0) mpt_personalities[i] = NULL; break; } case MOD_SHUTDOWN: break; #if __FreeBSD_version >= 500000 case MOD_QUIESCE: break; #endif case MOD_UNLOAD: error = pers->unload(pers); mpt_personalities[pers->id] = NULL; break; default: error = EINVAL; break; } return (error); } int mpt_stdload(struct mpt_personality *pers) { /* Load is always successfull. */ return (0); } int mpt_stdprobe(struct mpt_softc *mpt) { /* Probe is always successfull. */ return (0); } int mpt_stdattach(struct mpt_softc *mpt) { /* Attach is always successfull. */ return (0); } int mpt_stdenable(struct mpt_softc *mpt) { /* Enable is always successfull. */ return (0); } int mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); /* Event was not for us. */ return (0); } void mpt_stdreset(struct mpt_softc *mpt, int type) { } void mpt_stdshutdown(struct mpt_softc *mpt) { } void mpt_stddetach(struct mpt_softc *mpt) { } int mpt_stdunload(struct mpt_personality *pers) { /* Unload is always successfull. */ return (0); } /******************************* Bus DMA Support ******************************/ void mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mpt_map_info *map_info; map_info = (struct mpt_map_info *)arg; map_info->error = error; map_info->phys = segs->ds_addr; } /**************************** Reply/Event Handling ****************************/ int mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, mpt_handler_t handler, uint32_t *phandler_id) { switch (type) { case MPT_HANDLER_REPLY: { u_int cbi; u_int free_cbi; if (phandler_id == NULL) return (EINVAL); free_cbi = MPT_HANDLER_ID_NONE; for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { /* * If the same handler is registered multiple * times, don't error out. Just return the * index of the original registration. */ if (mpt_reply_handlers[cbi] == handler.reply_handler) { *phandler_id = MPT_CBI_TO_HID(cbi); return (0); } /* * Fill from the front in the hope that * all registered handlers consume only a * single cache line. * * We don't break on the first empty slot so * that the full table is checked to see if * this handler was previously registered. */ if (free_cbi == MPT_HANDLER_ID_NONE && (mpt_reply_handlers[cbi] == mpt_default_reply_handler)) free_cbi = cbi; } if (free_cbi == MPT_HANDLER_ID_NONE) { return (ENOMEM); } mpt_reply_handlers[free_cbi] = handler.reply_handler; *phandler_id = MPT_CBI_TO_HID(free_cbi); break; } default: mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); return (EINVAL); } return (0); } int mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, mpt_handler_t handler, uint32_t handler_id) { switch (type) { case MPT_HANDLER_REPLY: { u_int cbi; cbi = MPT_CBI(handler_id); if (cbi >= MPT_NUM_REPLY_HANDLERS || mpt_reply_handlers[cbi] != handler.reply_handler) return (ENOENT); mpt_reply_handlers[cbi] = mpt_default_reply_handler; break; } default: mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); return (EINVAL); } return (0); } static int mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { mpt_prt(mpt, "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", req, req->serno, reply_desc, reply_frame); if (reply_frame != NULL) mpt_dump_reply_frame(mpt, reply_frame); mpt_prt(mpt, "Reply Frame Ignored\n"); return (/*free_reply*/TRUE); } static int mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { if (req != NULL) { if (reply_frame != NULL) { MSG_CONFIG *cfgp; MSG_CONFIG_REPLY *reply; cfgp = (MSG_CONFIG *)req->req_vbuf; reply = (MSG_CONFIG_REPLY *)reply_frame; req->IOCStatus = le16toh(reply_frame->IOCStatus); bcopy(&reply->Header, &cfgp->Header, sizeof(cfgp->Header)); } req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } } return (TRUE); } static int mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { /* Nothing to be done. */ return (TRUE); } static int mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int free_reply; KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); free_reply = TRUE; switch (reply_frame->Function) { case MPI_FUNCTION_EVENT_NOTIFICATION: { MSG_EVENT_NOTIFY_REPLY *msg; struct mpt_personality *pers; u_int handled; handled = 0; msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; MPT_PERS_FOREACH(mpt, pers) handled += pers->event(mpt, req, msg); if (handled == 0 && mpt->mpt_pers_mask == 0) { mpt_lprt(mpt, MPT_PRT_INFO, "No Handlers For Any Event Notify Frames. " "Event %#x (ACK %sequired).\n", msg->Event, msg->AckRequired? "r" : "not r"); } else if (handled == 0) { mpt_lprt(mpt, MPT_PRT_WARN, "Unhandled Event Notify Frame. Event %#x " "(ACK %sequired).\n", msg->Event, msg->AckRequired? "r" : "not r"); } if (msg->AckRequired) { request_t *ack_req; uint32_t context; context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); ack_req = mpt_get_request(mpt, FALSE); if (ack_req == NULL) { struct mpt_evtf_record *evtf; evtf = (struct mpt_evtf_record *)reply_frame; evtf->context = context; LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); free_reply = FALSE; break; } mpt_send_event_ack(mpt, ack_req, msg, context); /* * Don't check for CONTINUATION_REPLY here */ return (free_reply); } break; } case MPI_FUNCTION_PORT_ENABLE: mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); break; case MPI_FUNCTION_EVENT_ACK: break; default: mpt_prt(mpt, "unknown event function: %x\n", reply_frame->Function); break; } /* * I'm not sure that this continuation stuff works as it should. * * I've had FC async events occur that free the frame up because * the continuation bit isn't set, and then additional async events * then occur using the same context. As you might imagine, this * leads to Very Bad Thing. * * Let's just be safe for now and not free them up until we figure * out what's actually happening here. */ #if 0 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", reply_frame->Function, req, req->serno); if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { MSG_EVENT_NOTIFY_REPLY *msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; mpt_prtc(mpt, " Event=0x%x AckReq=%d", msg->Event, msg->AckRequired); } } else { mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", reply_frame->Function, req, req->serno); if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { MSG_EVENT_NOTIFY_REPLY *msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; mpt_prtc(mpt, " Event=0x%x AckReq=%d", msg->Event, msg->AckRequired); } mpt_prtc(mpt, "\n"); } #endif return (free_reply); } /* * Process an asynchronous event from the IOC. */ static int mpt_core_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", msg->Event & 0xFF); switch(msg->Event & 0xFF) { case MPI_EVENT_NONE: break; case MPI_EVENT_LOG_DATA: { int i; /* Some error occured that LSI wants logged */ mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", msg->IOCLogInfo); mpt_prt(mpt, "\tEvtLogData: Event Data:"); for (i = 0; i < msg->EventDataLength; i++) mpt_prtc(mpt, " %08x", msg->Data[i]); mpt_prtc(mpt, "\n"); break; } case MPI_EVENT_EVENT_CHANGE: /* * This is just an acknowledgement * of our mpt_send_event_request. */ break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: break; default: return (0); break; } return (1); } static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) { MSG_EVENT_ACK *ackp; ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; memset(ackp, 0, sizeof (*ackp)); ackp->Function = MPI_FUNCTION_EVENT_ACK; ackp->Event = msg->Event; ackp->EventContext = msg->EventContext; ackp->MsgContext = context; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, ack_req); } /***************************** Interrupt Handling *****************************/ void mpt_intr(void *arg) { struct mpt_softc *mpt; uint32_t reply_desc; int ntrips = 0; mpt = (struct mpt_softc *)arg; mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { request_t *req; MSG_DEFAULT_REPLY *reply_frame; uint32_t reply_baddr; uint32_t ctxt_idx; u_int cb_index; u_int req_index; int free_rf; req = NULL; reply_frame = NULL; reply_baddr = 0; if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { u_int offset; /* * Insure that the reply frame is coherent. */ reply_baddr = MPT_REPLY_BADDR(reply_desc); offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset, MPT_REPLY_SIZE, BUS_DMASYNC_POSTREAD); reply_frame = MPT_REPLY_OTOV(mpt, offset); ctxt_idx = le32toh(reply_frame->MsgContext); } else { uint32_t type; type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); ctxt_idx = reply_desc; mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", reply_desc); switch (type) { case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; break; case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: ctxt_idx = GET_IO_INDEX(reply_desc); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_intr: no target cmd ptrs\n"); reply_desc = MPT_REPLY_EMPTY; break; } if (ctxt_idx >= mpt->tgt_cmds_allocated) { mpt_prt(mpt, "mpt_intr: bad tgt cmd ctxt %u\n", ctxt_idx); reply_desc = MPT_REPLY_EMPTY; ntrips = 1000; break; } req = mpt->tgt_cmd_ptrs[ctxt_idx]; if (req == NULL) { mpt_prt(mpt, "no request backpointer " "at index %u", ctxt_idx); reply_desc = MPT_REPLY_EMPTY; ntrips = 1000; break; } /* * Reformulate ctxt_idx to be just as if * it were another type of context reply * so the code below will find the request * via indexing into the pool. */ ctxt_idx = req->index | mpt->scsi_tgt_handler_id; req = NULL; break; case MPI_CONTEXT_REPLY_TYPE_LAN: mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", reply_desc); reply_desc = MPT_REPLY_EMPTY; break; default: mpt_prt(mpt, "Context Reply 0x%08x?\n", type); reply_desc = MPT_REPLY_EMPTY; break; } if (reply_desc == MPT_REPLY_EMPTY) { if (ntrips++ > 1000) { break; } continue; } } cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); if (req_index < MPT_MAX_REQUESTS(mpt)) { req = &mpt->request_pool[req_index]; } else { mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" " 0x%x)\n", req_index, reply_desc); } free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_desc, reply_frame); if (reply_frame != NULL && free_rf) { mpt_free_reply(mpt, reply_baddr); } /* * If we got ourselves disabled, don't get stuck in a loop */ if (mpt->disabled) { mpt_disable_ints(mpt); break; } if (ntrips++ > 1000) { break; } } mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); } /******************************* Error Recovery *******************************/ void mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, u_int iocstatus) { MSG_DEFAULT_REPLY ioc_status_frame; request_t *req; memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); ioc_status_frame.IOCStatus = iocstatus; while((req = TAILQ_FIRST(chain)) != NULL) { MSG_REQUEST_HEADER *msg_hdr; u_int cb_index; TAILQ_REMOVE(chain, req, links); msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; ioc_status_frame.Function = msg_hdr->Function; ioc_status_frame.MsgContext = msg_hdr->MsgContext; cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, &ioc_status_frame); } } /********************************* Diagnostics ********************************/ /* * Perform a diagnostic dump of a reply frame. */ void mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) { mpt_prt(mpt, "Address Reply:\n"); mpt_print_reply(reply_frame); } /******************************* Doorbell Access ******************************/ static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt) { return mpt_read(mpt, MPT_OFFSET_DOORBELL); } static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt) { return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); } /* Busy wait for a door bell to be read by IOC */ static int mpt_wait_db_ack(struct mpt_softc *mpt) { int i; for (i=0; i < MPT_MAX_WAIT; i++) { if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { maxwait_ack = i > maxwait_ack ? i : maxwait_ack; return (MPT_OK); } DELAY(200); } return (MPT_FAIL); } /* Busy wait for a door bell interrupt */ static int mpt_wait_db_int(struct mpt_softc *mpt) { int i; for (i=0; i < MPT_MAX_WAIT; i++) { if (MPT_DB_INTR(mpt_rd_intr(mpt))) { maxwait_int = i > maxwait_int ? i : maxwait_int; return MPT_OK; } DELAY(100); } return (MPT_FAIL); } /* Wait for IOC to transition to a give state */ void mpt_check_doorbell(struct mpt_softc *mpt) { uint32_t db = mpt_rd_db(mpt); if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { mpt_prt(mpt, "Device not running\n"); mpt_print_db(db); } } /* Wait for IOC to transition to a give state */ static int mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) { int i; for (i = 0; i < MPT_MAX_WAIT; i++) { uint32_t db = mpt_rd_db(mpt); if (MPT_STATE(db) == state) { maxwait_state = i > maxwait_state ? i : maxwait_state; return (MPT_OK); } DELAY(100); } return (MPT_FAIL); } /************************* Intialization/Configuration ************************/ static int mpt_download_fw(struct mpt_softc *mpt); /* Issue the reset COMMAND to the IOC */ static int mpt_soft_reset(struct mpt_softc *mpt) { mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); /* Have to use hard reset if we are not in Running state */ if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { mpt_prt(mpt, "soft reset failed: device not running\n"); return (MPT_FAIL); } /* If door bell is in use we don't have a chance of getting * a word in since the IOC probably crashed in message * processing. So don't waste our time. */ if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); return (MPT_FAIL); } /* Send the reset request to the IOC */ mpt_write(mpt, MPT_OFFSET_DOORBELL, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); if (mpt_wait_db_ack(mpt) != MPT_OK) { mpt_prt(mpt, "soft reset failed: ack timeout\n"); return (MPT_FAIL); } /* Wait for the IOC to reload and come out of reset state */ if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { mpt_prt(mpt, "soft reset failed: device did not restart\n"); return (MPT_FAIL); } return MPT_OK; } static int mpt_enable_diag_mode(struct mpt_softc *mpt) { int try; try = 20; while (--try) { if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) break; /* Enable diagnostic registers */ mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); DELAY(100000); } if (try == 0) return (EIO); return (0); } static void mpt_disable_diag_mode(struct mpt_softc *mpt) { mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); } /* This is a magic diagnostic reset that resets all the ARM * processors in the chip. */ static void mpt_hard_reset(struct mpt_softc *mpt) { int error; int wait; uint32_t diagreg; mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); error = mpt_enable_diag_mode(mpt); if (error) { mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); mpt_prt(mpt, "Trying to reset anyway.\n"); } diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); /* * This appears to be a workaround required for some * firmware or hardware revs. */ mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); DELAY(1000); /* Diag. port is now active so we can now hit the reset bit */ mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); /* * Ensure that the reset has finished. We delay 1ms * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 5000; do { DELAY(1000); diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); if (wait == 0) { mpt_prt(mpt, "WARNING - Failed hard reset! " "Trying to initialize anyway.\n"); } /* * If we have firmware to download, it must be loaded before * the controller will become operational. Do so now. */ if (mpt->fw_image != NULL) { error = mpt_download_fw(mpt); if (error) { mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); mpt_prt(mpt, "Trying to initialize anyway.\n"); } } /* * Reseting the controller should have disabled write * access to the diagnostic registers, but disable * manually to be sure. */ mpt_disable_diag_mode(mpt); } static void mpt_core_ioc_reset(struct mpt_softc *mpt, int type) { /* * Complete all pending requests with a status * appropriate for an IOC reset. */ mpt_complete_request_chain(mpt, &mpt->request_pending_list, MPI_IOCSTATUS_INVALID_STATE); } /* * Reset the IOC when needed. Try software command first then if needed * poke at the magic diagnostic reset. Note that a hard reset resets * *both* IOCs on dual function chips (FC929 && LSI1030) as well as * fouls up the PCI configuration registers. */ int mpt_reset(struct mpt_softc *mpt, int reinit) { struct mpt_personality *pers; int ret; int retry_cnt = 0; /* * Try a soft reset. If that fails, get out the big hammer. */ again: if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { int cnt; for (cnt = 0; cnt < 5; cnt++) { /* Failed; do a hard reset */ mpt_hard_reset(mpt); /* * Wait for the IOC to reload * and come out of reset state */ ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); if (ret == MPT_OK) { break; } /* * Okay- try to check again... */ ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); if (ret == MPT_OK) { break; } mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", retry_cnt, cnt); } } if (retry_cnt == 0) { /* * Invoke reset handlers. We bump the reset count so * that mpt_wait_req() understands that regardless of * the specified wait condition, it should stop its wait. */ mpt->reset_cnt++; MPT_PERS_FOREACH(mpt, pers) pers->reset(mpt, ret); } if (reinit) { ret = mpt_enable_ioc(mpt, 1); if (ret == MPT_OK) { mpt_enable_ints(mpt); } } if (ret != MPT_OK && retry_cnt++ < 2) { goto again; } return ret; } /* Return a command buffer to the free queue */ void mpt_free_request(struct mpt_softc *mpt, request_t *req) { request_t *nxt; struct mpt_evtf_record *record; uint32_t reply_baddr; if (req == NULL || req != &mpt->request_pool[req->index]) { panic("mpt_free_request bad req ptr\n"); return; } if ((nxt = req->chain) != NULL) { req->chain = NULL; mpt_free_request(mpt, nxt); /* NB: recursion */ } KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("mpt_free_request: req %p:%u func %x already on freelist", req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); KASSERT(mpt_req_on_pending_list(mpt, req) == 0, ("mpt_free_request: req %p:%u func %x on pending list", req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); #endif req->ccb = NULL; if (LIST_EMPTY(&mpt->ack_frames)) { /* * Insert free ones at the tail */ req->serno = 0; req->state = REQ_STATE_FREE; #ifdef INVARIANTS memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); #endif TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); if (mpt->getreqwaiter != 0) { mpt->getreqwaiter = 0; wakeup(&mpt->request_free_list); } return; } /* * Process an ack frame deferred due to resource shortage. */ record = LIST_FIRST(&mpt->ack_frames); LIST_REMOVE(record, links); req->state = REQ_STATE_ALLOCATED; mpt_assign_serno(mpt, req); mpt_send_event_ack(mpt, req, &record->reply, record->context); reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) + (mpt->reply_phys & 0xFFFFFFFF); mpt_free_reply(mpt, reply_baddr); } /* Get a command buffer from the free queue */ request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok) { request_t *req; retry: KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); req = TAILQ_FIRST(&mpt->request_free_list); if (req != NULL) { KASSERT(req == &mpt->request_pool[req->index], ("mpt_get_request: corrupted request free list\n")); KASSERT(req->state == REQ_STATE_FREE, ("req %p:%u not free on free list %x index %d function %x", req, req->serno, req->state, req->index, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); TAILQ_REMOVE(&mpt->request_free_list, req, links); req->state = REQ_STATE_ALLOCATED; req->chain = NULL; mpt_assign_serno(mpt, req); } else if (sleep_ok != 0) { mpt->getreqwaiter = 1; mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); goto retry; } return (req); } /* Pass the command to the IOC */ void mpt_send_cmd(struct mpt_softc *mpt, request_t *req) { if (mpt->verbose > MPT_PRT_DEBUG2) { mpt_dump_request(mpt, req); } bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, BUS_DMASYNC_PREWRITE); req->state |= REQ_STATE_QUEUED; KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("req %p:%u func %x on freelist list in mpt_send_cmd", req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); KASSERT(mpt_req_on_pending_list(mpt, req) == 0, ("req %p:%u func %x already on pending list in mpt_send_cmd", req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); } /* * Wait for a request to complete. * * Inputs: * mpt softc of controller executing request * req request to wait for * sleep_ok nonzero implies may sleep in this context * time_ms timeout in ms. 0 implies no timeout. * * Return Values: * 0 Request completed * non-0 Timeout fired before request completion. */ int mpt_wait_req(struct mpt_softc *mpt, request_t *req, mpt_req_state_t state, mpt_req_state_t mask, int sleep_ok, int time_ms) { int error; int timeout; u_int saved_cnt; /* * timeout is in ms. 0 indicates infinite wait. * Convert to ticks or 500us units depending on * our sleep mode. */ if (sleep_ok != 0) { timeout = (time_ms * hz) / 1000; } else { timeout = time_ms * 2; } req->state |= REQ_STATE_NEED_WAKEUP; mask &= ~REQ_STATE_NEED_WAKEUP; saved_cnt = mpt->reset_cnt; while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { if (sleep_ok != 0) { error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); if (error == EWOULDBLOCK) { timeout = 0; break; } } else { if (time_ms != 0 && --timeout == 0) { break; } DELAY(500); mpt_intr(mpt); } } req->state &= ~REQ_STATE_NEED_WAKEUP; if (mpt->reset_cnt != saved_cnt) { return (EIO); } if (time_ms && timeout <= 0) { MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); return (ETIMEDOUT); } return (0); } /* * Send a command to the IOC via the handshake register. * * Only done at initialization time and for certain unusual * commands such as device/bus reset as specified by LSI. */ int mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) { int i; uint32_t data, *data32; /* Check condition of the IOC */ data = mpt_rd_db(mpt); if ((MPT_STATE(data) != MPT_DB_STATE_READY && MPT_STATE(data) != MPT_DB_STATE_RUNNING && MPT_STATE(data) != MPT_DB_STATE_FAULT) || MPT_DB_IS_IN_USE(data)) { mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); mpt_print_db(data); return (EBUSY); } /* We move things in 32 bit chunks */ len = (len + 3) >> 2; data32 = cmd; /* Clear any left over pending doorbell interupts */ if (MPT_DB_INTR(mpt_rd_intr(mpt))) mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); /* * Tell the handshake reg. we are going to send a command * and how long it is going to be. */ data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); mpt_write(mpt, MPT_OFFSET_DOORBELL, data); /* Wait for the chip to notice */ if (mpt_wait_db_int(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); return (ETIMEDOUT); } /* Clear the interrupt */ mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); if (mpt_wait_db_ack(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); return (ETIMEDOUT); } /* Send the command */ for (i = 0; i < len; i++) { mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); if (mpt_wait_db_ack(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_send_handshake_cmd timeout! index = %d\n", i); return (ETIMEDOUT); } } return MPT_OK; } /* Get the response from the handshake register */ int mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) { int left, reply_left; u_int16_t *data16; MSG_DEFAULT_REPLY *hdr; /* We move things out in 16 bit chunks */ reply_len >>= 1; data16 = (u_int16_t *)reply; hdr = (MSG_DEFAULT_REPLY *)reply; /* Get first word */ if (mpt_wait_db_int(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); return ETIMEDOUT; } *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); /* Get Second Word */ if (mpt_wait_db_int(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); return ETIMEDOUT; } *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); /* * With the second word, we can now look at the length. * Warn about a reply that's too short (except for IOC FACTS REPLY) */ if ((reply_len >> 1) != hdr->MsgLength && (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ #if __FreeBSD_version >= 500000 mpt_prt(mpt, "reply length does not match message length: " "got %x; expected %zx for function %x\n", hdr->MsgLength << 2, reply_len << 1, hdr->Function); #else mpt_prt(mpt, "reply length does not match message length: " "got %x; expected %x for function %x\n", hdr->MsgLength << 2, reply_len << 1, hdr->Function); #endif } /* Get rest of the reply; but don't overflow the provided buffer */ left = (hdr->MsgLength << 1) - 2; reply_left = reply_len - 2; while (left--) { u_int16_t datum; if (mpt_wait_db_int(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); return ETIMEDOUT; } datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); if (reply_left-- > 0) *data16++ = datum & MPT_DB_DATA_MASK; mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); } /* One more wait & clear at the end */ if (mpt_wait_db_int(mpt) != MPT_OK) { mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); return ETIMEDOUT; } mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { if (mpt->verbose >= MPT_PRT_TRACE) mpt_print_reply(hdr); return (MPT_FAIL | hdr->IOCStatus); } return (0); } static int mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) { MSG_IOC_FACTS f_req; int error; memset(&f_req, 0, sizeof f_req); f_req.Function = MPI_FUNCTION_IOC_FACTS; f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); if (error) return(error); error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); return (error); } static int mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) { MSG_PORT_FACTS f_req; int error; /* XXX: Only getting PORT FACTS for Port 0 */ memset(&f_req, 0, sizeof f_req); f_req.Function = MPI_FUNCTION_PORT_FACTS; f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); if (error) return(error); error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); return (error); } /* * Send the initialization request. This is where we specify how many * SCSI busses and how many devices per bus we wish to emulate. * This is also the command that specifies the max size of the reply * frames from the IOC that we will be allocating. */ static int mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) { int error = 0; MSG_IOC_INIT init; MSG_IOC_INIT_REPLY reply; memset(&init, 0, sizeof init); init.WhoInit = who; init.Function = MPI_FUNCTION_IOC_INIT; - if (mpt->is_fc) { - init.MaxDevices = 255; - } else if (mpt->is_sas) { - init.MaxDevices = mpt->mpt_max_devices; - } else { - init.MaxDevices = 16; - } + init.MaxDevices = mpt->mpt_max_devices; init.MaxBuses = 1; init.MsgVersion = htole16(MPI_VERSION); init.HeaderVersion = htole16(MPI_HEADER_VERSION); init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { return(error); } error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); return (error); } /* * Utiltity routine to read configuration headers and pages */ int mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, u_int PageVersion, u_int PageLength, u_int PageNumber, u_int PageType, uint32_t PageAddress, bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms) { MSG_CONFIG *cfgp; SGE_SIMPLE32 *se; cfgp = req->req_vbuf; memset(cfgp, 0, sizeof *cfgp); cfgp->Action = Action; cfgp->Function = MPI_FUNCTION_CONFIG; cfgp->Header.PageVersion = PageVersion; cfgp->Header.PageLength = PageLength; cfgp->Header.PageNumber = PageNumber; cfgp->Header.PageType = PageType; cfgp->PageAddress = PageAddress; se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; se->Address = addr; MPI_pSGE_SET_LENGTH(se, len); MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_END_OF_LIST | ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, timeout_ms)); } int mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, int sleep_ok, int timeout_ms) { request_t *req; MSG_CONFIG *cfgp; int error; req = mpt_get_request(mpt, sleep_ok); if (req == NULL) { mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); return (ENOMEM); } error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, /*PageVersion*/0, /*PageLength*/0, PageNumber, PageType, PageAddress, /*addr*/0, /*len*/0, sleep_ok, timeout_ms); if (error != 0) { mpt_free_request(mpt, req); mpt_prt(mpt, "read_cfg_header timed out\n"); return (ETIMEDOUT); } switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { case MPI_IOCSTATUS_SUCCESS: cfgp = req->req_vbuf; bcopy(&cfgp->Header, rslt, sizeof(*rslt)); error = 0; break; case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: mpt_lprt(mpt, MPT_PRT_DEBUG, "Invalid Page Type %d Number %d Addr 0x%0x\n", PageType, PageNumber, PageAddress); error = EINVAL; break; default: mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", req->IOCStatus); error = EIO; break; } mpt_free_request(mpt, req); return (error); } int mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { request_t *req; int error; req = mpt_get_request(mpt, sleep_ok); if (req == NULL) { mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); return (-1); } error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, hdr->PageLength, hdr->PageNumber, hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, PageAddress, req->req_pbuf + MPT_RQSL(mpt), len, sleep_ok, timeout_ms); if (error != 0) { mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); return (-1); } if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", req->IOCStatus); mpt_free_request(mpt, req); return (-1); } bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, BUS_DMASYNC_POSTREAD); memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); mpt_free_request(mpt, req); return (0); } int mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { request_t *req; u_int hdr_attr; int error; hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { mpt_prt(mpt, "page type 0x%x not changeable\n", hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); return (-1); } hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, req = mpt_get_request(mpt, sleep_ok); if (req == NULL) return (-1); memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); /* Restore stripped out attributes */ hdr->PageType |= hdr_attr; error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, hdr->PageLength, hdr->PageNumber, hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, PageAddress, req->req_pbuf + MPT_RQSL(mpt), len, sleep_ok, timeout_ms); if (error != 0) { mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); return (-1); } if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", req->IOCStatus); mpt_free_request(mpt, req); return (-1); } mpt_free_request(mpt, req); return (0); } /* * Read IOC configuration information */ static int mpt_read_config_info_ioc(struct mpt_softc *mpt) { CONFIG_PAGE_HEADER hdr; struct mpt_raid_volume *mpt_raid; int rv; int i; size_t len; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr, FALSE, 5000); /* * If it's an invalid page, so what? Not a supported function.... */ if (rv == EINVAL) { return (0); } if (rv) { return (rv); } #if __FreeBSD_version >= 500000 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " "num %x, type %x\n", hdr.PageVersion, hdr.PageLength * sizeof(uint32_t), hdr.PageNumber, hdr.PageType); #else mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " "num %x, type %x\n", hdr.PageVersion, hdr.PageLength * sizeof(uint32_t), hdr.PageNumber, hdr.PageType); #endif len = hdr.PageLength * sizeof(uint32_t); mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->ioc_page2 == NULL) { mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); mpt_raid_free_mem(mpt); return (ENOMEM); } memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->ioc_page2->Header, len, FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read IOC Page 2\n"); mpt_raid_free_mem(mpt); return (EIO); } if (mpt->ioc_page2->CapabilitiesFlags != 0) { uint32_t mask; mpt_prt(mpt, "Capabilities: ("); for (mask = 1; mask != 0; mask <<= 1) { if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { continue; } switch (mask) { case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: mpt_prtc(mpt, " RAID-0"); break; case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: mpt_prtc(mpt, " RAID-1E"); break; case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: mpt_prtc(mpt, " RAID-1"); break; case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: mpt_prtc(mpt, " SES"); break; case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: mpt_prtc(mpt, " SAFTE"); break; case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: mpt_prtc(mpt, " Multi-Channel-Arrays"); default: break; } } mpt_prtc(mpt, " )\n"); if ((mpt->ioc_page2->CapabilitiesFlags & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", mpt->ioc_page2->NumActiveVolumes, mpt->ioc_page2->NumActiveVolumes != 1 ? "s " : " ", mpt->ioc_page2->MaxVolumes); mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", mpt->ioc_page2->NumActivePhysDisks, mpt->ioc_page2->NumActivePhysDisks != 1 ? "s " : " ", mpt->ioc_page2->MaxPhysDisks); } } len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->raid_volumes == NULL) { mpt_prt(mpt, "Could not allocate RAID volume data\n"); mpt_raid_free_mem(mpt); return (ENOMEM); } /* * Copy critical data out of ioc_page2 so that we can * safely refresh the page without windows of unreliable * data. */ mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; len = sizeof(*mpt->raid_volumes->config_page) + (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt_raid = &mpt->raid_volumes[i]; mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt_raid->config_page == NULL) { mpt_prt(mpt, "Could not allocate RAID page data\n"); mpt_raid_free_mem(mpt); return (ENOMEM); } } mpt->raid_page0_len = len; len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->raid_disks == NULL) { mpt_prt(mpt, "Could not allocate RAID disk data\n"); mpt_raid_free_mem(mpt); return (ENOMEM); } mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; /* * Load page 3. */ rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr, FALSE, 5000); if (rv) { mpt_raid_free_mem(mpt); return (EIO); } mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); len = hdr.PageLength * sizeof(uint32_t); mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->ioc_page3 == NULL) { mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); mpt_raid_free_mem(mpt); return (ENOMEM); } memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->ioc_page3->Header, len, FALSE, 5000); if (rv) { mpt_raid_free_mem(mpt); return (EIO); } mpt_raid_wakeup(mpt); return (0); } /* * Enable IOC port */ static int mpt_send_port_enable(struct mpt_softc *mpt, int port) { request_t *req; MSG_PORT_ENABLE *enable_req; int error; req = mpt_get_request(mpt, /*sleep_ok*/FALSE); if (req == NULL) return (-1); enable_req = req->req_vbuf; memset(enable_req, 0, MPT_RQSL(mpt)); enable_req->Function = MPI_FUNCTION_PORT_ENABLE; enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); enable_req->PortNumber = port; mpt_check_doorbell(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); mpt_send_cmd(mpt, req); error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); if (error != 0) { mpt_prt(mpt, "port %d enable timed out\n", port); return (-1); } mpt_free_request(mpt, req); mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); return (0); } /* * Enable/Disable asynchronous event reporting. */ static int mpt_send_event_request(struct mpt_softc *mpt, int onoff) { request_t *req; MSG_EVENT_NOTIFY *enable_req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } enable_req = req->req_vbuf; memset(enable_req, 0, sizeof *enable_req); enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); enable_req->Switch = onoff; mpt_check_doorbell(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", onoff ? "en" : "dis"); /* * Send the command off, but don't wait for it. */ mpt_send_cmd(mpt, req); return (0); } /* * Un-mask the interupts on the chip. */ void mpt_enable_ints(struct mpt_softc *mpt) { /* Unmask every thing except door bell int */ mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); } /* * Mask the interupts on the chip. */ void mpt_disable_ints(struct mpt_softc *mpt) { /* Mask all interrupts */ mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); } static void mpt_sysctl_attach(struct mpt_softc *mpt) { #if __FreeBSD_version >= 500000 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &mpt->verbose, 0, "Debugging/Verbose level"); #endif } int mpt_attach(struct mpt_softc *mpt) { struct mpt_personality *pers; int i; int error; for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { pers = mpt_personalities[i]; if (pers == NULL) { continue; } if (pers->probe(mpt) == 0) { error = pers->attach(mpt); if (error != 0) { mpt_detach(mpt); return (error); } mpt->mpt_pers_mask |= (0x1 << pers->id); pers->use_count++; } } /* * Now that we've attached everything, do the enable function * for all of the personalities. This allows the personalities * to do setups that are appropriate for them prior to enabling * any ports. */ for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { pers = mpt_personalities[i]; if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { error = pers->enable(mpt); if (error != 0) { mpt_prt(mpt, "personality %s attached but would" " not enable (%d)\n", pers->name, error); mpt_detach(mpt); return (error); } } } return (0); } int mpt_shutdown(struct mpt_softc *mpt) { struct mpt_personality *pers; MPT_PERS_FOREACH_REVERSE(mpt, pers) { pers->shutdown(mpt); } return (0); } int mpt_detach(struct mpt_softc *mpt) { struct mpt_personality *pers; MPT_PERS_FOREACH_REVERSE(mpt, pers) { pers->detach(mpt); mpt->mpt_pers_mask &= ~(0x1 << pers->id); pers->use_count--; } return (0); } int mpt_core_load(struct mpt_personality *pers) { int i; /* * Setup core handlers and insert the default handler * into all "empty slots". */ for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { mpt_reply_handlers[i] = mpt_default_reply_handler; } mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = mpt_event_reply_handler; mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = mpt_config_reply_handler; mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = mpt_handshake_reply_handler; return (0); } /* * Initialize per-instance driver data and perform * initial controller configuration. */ int mpt_core_attach(struct mpt_softc *mpt) { int val; int error; LIST_INIT(&mpt->ack_frames); /* Put all request buffers on the free list */ TAILQ_INIT(&mpt->request_pending_list); TAILQ_INIT(&mpt->request_free_list); TAILQ_INIT(&mpt->request_timeout_list); for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { request_t *req = &mpt->request_pool[val]; req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, req); } for (val = 0; val < MPT_MAX_LUNS; val++) { STAILQ_INIT(&mpt->trt[val].atios); STAILQ_INIT(&mpt->trt[val].inots); } STAILQ_INIT(&mpt->trt_wildcard.atios); STAILQ_INIT(&mpt->trt_wildcard.inots); mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; mpt_sysctl_attach(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); error = mpt_configure_ioc(mpt); return (error); } int mpt_core_enable(struct mpt_softc *mpt) { /* * We enter with the IOC enabled, but async events * not enabled, ports not enabled and interrupts * not enabled. */ /* * Enable asynchronous event reporting- all personalities * have attached so that they should be able to now field * async events. */ mpt_send_event_request(mpt, 1); /* * Catch any pending interrupts * * This seems to be crucial- otherwise * the portenable below times out. */ mpt_intr(mpt); /* * Enable Interrupts */ mpt_enable_ints(mpt); /* * Catch any pending interrupts * * This seems to be crucial- otherwise * the portenable below times out. */ mpt_intr(mpt); /* * Enable the port. */ if (mpt_send_port_enable(mpt, 0) != MPT_OK) { mpt_prt(mpt, "failed to enable port 0\n"); return (ENXIO); } return (0); } void mpt_core_shutdown(struct mpt_softc *mpt) { mpt_disable_ints(mpt); } void mpt_core_detach(struct mpt_softc *mpt) { mpt_disable_ints(mpt); } int mpt_core_unload(struct mpt_personality *pers) { /* Unload is always successfull. */ return (0); } #define FW_UPLOAD_REQ_SIZE \ (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) static int mpt_upload_fw(struct mpt_softc *mpt) { uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; MSG_FW_UPLOAD_REPLY fw_reply; MSG_FW_UPLOAD *fw_req; FW_UPLOAD_TCSGE *tsge; SGE_SIMPLE32 *sge; uint32_t flags; int error; memset(&fw_req_buf, 0, sizeof(fw_req_buf)); fw_req = (MSG_FW_UPLOAD *)fw_req_buf; fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; fw_req->Function = MPI_FUNCTION_FW_UPLOAD; fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; tsge->DetailsLength = 12; tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; tsge->ImageSize = htole32(mpt->fw_image_size); sge = (SGE_SIMPLE32 *)(tsge + 1); flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); flags <<= MPI_SGE_FLAGS_SHIFT; sge->FlagsLength = htole32(flags | mpt->fw_image_size); sge->Address = htole32(mpt->fw_phys); error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); if (error) return(error); error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); return (error); } static void mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, uint32_t *data, bus_size_t len) { uint32_t *data_end; data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); - pci_enable_io(mpt->dev, SYS_RES_IOPORT); + if (mpt->is_sas) { + pci_enable_io(mpt->dev, SYS_RES_IOPORT); + } mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); while (data != data_end) { mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); data++; } - pci_disable_io(mpt->dev, SYS_RES_IOPORT); + if (mpt->is_sas) { + pci_disable_io(mpt->dev, SYS_RES_IOPORT); + } } static int mpt_download_fw(struct mpt_softc *mpt) { MpiFwHeader_t *fw_hdr; int error; uint32_t ext_offset; uint32_t data; mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", mpt->fw_image_size); error = mpt_enable_diag_mode(mpt); if (error != 0) { mpt_prt(mpt, "Could not enter diagnostic mode!\n"); return (EIO); } mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); fw_hdr = (MpiFwHeader_t *)mpt->fw_image; mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, fw_hdr->ImageSize); ext_offset = fw_hdr->NextImageHeaderOffset; while (ext_offset != 0) { MpiExtImageHeader_t *ext; ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); ext_offset = ext->NextImageHeaderOffset; mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, ext->ImageSize); } - pci_enable_io(mpt->dev, SYS_RES_IOPORT); + if (mpt->is_sas) { + pci_enable_io(mpt->dev, SYS_RES_IOPORT); + } /* Setup the address to jump to on reset. */ mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); /* * The controller sets the "flash bad" status after attempting * to auto-boot from flash. Clear the status so that the controller * will continue the boot process with our newly installed firmware. */ mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); - pci_disable_io(mpt->dev, SYS_RES_IOPORT); + if (mpt->is_sas) { + pci_disable_io(mpt->dev, SYS_RES_IOPORT); + } /* * Re-enable the processor and clear the boot halt flag. */ data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); mpt_disable_diag_mode(mpt); return (0); } /* * Allocate/Initialize data structures for the controller. Called * once at instance startup. */ static int mpt_configure_ioc(struct mpt_softc *mpt) { MSG_PORT_FACTS_REPLY pfp; MSG_IOC_FACTS_REPLY facts; int try; int needreset; uint32_t max_chain_depth; needreset = 0; for (try = 0; try < MPT_MAX_TRYS; try++) { /* * No need to reset if the IOC is already in the READY state. * * Force reset if initialization failed previously. * Note that a hard_reset of the second channel of a '929 * will stop operation of the first channel. Hopefully, if the * first channel is ok, the second will not require a hard * reset. */ if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) { if (mpt_reset(mpt, FALSE) != MPT_OK) { continue; } } needreset = 0; if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { mpt_prt(mpt, "mpt_get_iocfacts failed\n"); needreset = 1; continue; } mpt->mpt_global_credits = le16toh(facts.GlobalCredits); mpt->request_frame_size = le16toh(facts.RequestFrameSize); mpt->ioc_facts_flags = facts.Flags; mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", le16toh(facts.MsgVersion) >> 8, le16toh(facts.MsgVersion) & 0xFF, le16toh(facts.HeaderVersion) >> 8, le16toh(facts.HeaderVersion) & 0xFF); /* * Now that we know request frame size, we can calculate * the actual (reasonable) segment limit for read/write I/O. * * This limit is constrained by: * * + The size of each area we allocate per command (and how * many chain segments we can fit into it). * + The total number of areas we've set up. * + The actual chain depth the card will allow. * * The first area's segment count is limited by the I/O request * at the head of it. We cannot allocate realistically more * than MPT_MAX_REQUESTS areas. Therefore, to account for both * conditions, we'll just start out with MPT_MAX_REQUESTS-2. * */ max_chain_depth = facts.MaxChainDepth; /* total number of request areas we (can) allocate */ mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; /* converted to the number of chain areas possible */ mpt->max_seg_cnt *= MPT_NRFM(mpt); /* limited by the number of chain areas the card will support */ if (mpt->max_seg_cnt > max_chain_depth) { mpt_lprt(mpt, MPT_PRT_DEBUG, "chain depth limited to %u (from %u)\n", max_chain_depth, mpt->max_seg_cnt); mpt->max_seg_cnt = max_chain_depth; } /* converted to the number of simple sges in chain segments. */ mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n", mpt->max_seg_cnt); mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n", facts.MsgLength, facts.IOCNumber); mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " "Request Frame Size %u bytes Max Chain Depth %u\n", mpt->mpt_global_credits, facts.BlockSize, mpt->request_frame_size << 2, max_chain_depth); mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, " "Flags=%#x\n", facts.NumberOfPorts, le32toh(facts.FWImageSize), facts.Flags); if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { struct mpt_map_info mi; int error; /* * In some configurations, the IOC's firmware is * stored in a shared piece of system NVRAM that * is only accessable via the BIOS. In this * case, the firmware keeps a copy of firmware in * RAM until the OS driver retrieves it. Once * retrieved, we are responsible for re-downloading * the firmware after any hard-reset. */ mpt->fw_image_size = le32toh(facts.FWImageSize); error = mpt_dma_tag_create(mpt, mpt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, mpt->fw_image_size, /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, /*flags*/0, &mpt->fw_dmat); if (error != 0) { mpt_prt(mpt, "cannot create fw dma tag\n"); return (ENOMEM); } error = bus_dmamem_alloc(mpt->fw_dmat, (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap); if (error != 0) { mpt_prt(mpt, "cannot allocate fw mem.\n"); bus_dma_tag_destroy(mpt->fw_dmat); return (ENOMEM); } mi.mpt = mpt; mi.error = 0; bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0); mpt->fw_phys = mi.phys; error = mpt_upload_fw(mpt); if (error != 0) { mpt_prt(mpt, "fw upload failed.\n"); bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, mpt->fw_dmap); bus_dma_tag_destroy(mpt->fw_dmat); mpt->fw_image = NULL; return (EIO); } } if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { mpt_prt(mpt, "mpt_get_portfacts failed\n"); needreset = 1; continue; } mpt_lprt(mpt, MPT_PRT_DEBUG, "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, pfp.MaxDevices); mpt->mpt_port_type = pfp.PortType; mpt->mpt_proto_flags = pfp.ProtocolFlags; if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { mpt_prt(mpt, "Unsupported Port Type (%x)\n", pfp.PortType); return (ENXIO); } mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { mpt->is_fc = 1; mpt->is_sas = 0; mpt->is_spi = 0; } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { mpt->is_fc = 0; mpt->is_sas = 1; mpt->is_spi = 0; } else { mpt->is_fc = 0; mpt->is_sas = 0; mpt->is_spi = 1; } mpt->mpt_ini_id = pfp.PortSCSIID; mpt->mpt_max_devices = pfp.MaxDevices; /* * Set our expected role with what this port supports. */ mpt->role = MPT_ROLE_NONE; if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { mpt->role |= MPT_ROLE_INITIATOR; } if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { mpt->role |= MPT_ROLE_TARGET; } if (mpt->role == MPT_ROLE_NONE) { mpt_prt(mpt, "port does not support either target or " "initiator role\n"); return (ENXIO); } if (mpt_enable_ioc(mpt, 0) != MPT_OK) { mpt_prt(mpt, "unable to initialize IOC\n"); return (ENXIO); } /* * Read IOC configuration information. * * We need this to determine whether or not we have certain * settings for Integrated Mirroring (e.g.). */ mpt_read_config_info_ioc(mpt); /* Everything worked */ break; } if (try >= MPT_MAX_TRYS) { mpt_prt(mpt, "failed to initialize IOC"); return (EIO); } return (0); } static int mpt_enable_ioc(struct mpt_softc *mpt, int portenable) { uint32_t pptr; int val; if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { mpt_prt(mpt, "mpt_send_ioc_init failed\n"); return (EIO); } mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { mpt_prt(mpt, "IOC failed to go to run state\n"); return (ENXIO); } mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); /* * Give it reply buffers * * Do *not* exceed global credits. */ for (val = 0, pptr = mpt->reply_phys; (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); pptr += MPT_REPLY_SIZE) { mpt_free_reply(mpt, pptr); if (++val == mpt->mpt_global_credits - 1) break; } /* * Enable the port if asked. This is only done if we're resetting * the IOC after initial startup. */ if (portenable) { /* * Enable asynchronous event reporting */ mpt_send_event_request(mpt, 1); if (mpt_send_port_enable(mpt, 0) != MPT_OK) { mpt_prt(mpt, "failed to enable port 0\n"); return (ENXIO); } } return (MPT_OK); } Index: head/sys/dev/mpt/mpt.h =================================================================== --- head/sys/dev/mpt/mpt.h (revision 159918) +++ head/sys/dev/mpt/mpt.h (revision 159919) @@ -1,1180 +1,1182 @@ /* $FreeBSD$ */ /*- * Generic defines for LSI '909 FC adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /* * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2004, 2005 Justin T. Gibbs * Copyright (c) 2005, WHEEL Sp. z o.o. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _MPT_H_ #define _MPT_H_ /********************************* OS Includes ********************************/ #include #include #include #include #include #if __FreeBSD_version < 500000 #include #include #include #else #include #include #include #include #include #include #endif #include #include #include #include #include #if __FreeBSD_version < 500000 #include #include #endif #include #if __FreeBSD_version < 500000 #include #include #else #include #include #endif #include #include "opt_ddb.h" /**************************** Register Definitions ****************************/ #include /******************************* MPI Definitions ******************************/ #include #include #include #include #include /* XXX For mpt_debug.c */ #include /****************************** Misc Definitions ******************************/ #define MPT_OK (0) #define MPT_FAIL (0x10000) #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define MPT_ROLE_NONE 0 #define MPT_ROLE_INITIATOR 1 #define MPT_ROLE_TARGET 2 #define MPT_ROLE_BOTH 3 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR /**************************** Forward Declarations ****************************/ struct mpt_softc; struct mpt_personality; typedef struct req_entry request_t; /************************* Personality Module Support *************************/ typedef int mpt_load_handler_t(struct mpt_personality *); typedef int mpt_probe_handler_t(struct mpt_softc *); typedef int mpt_attach_handler_t(struct mpt_softc *); typedef int mpt_enable_handler_t(struct mpt_softc *); typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, MSG_EVENT_NOTIFY_REPLY *); typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); /* XXX Add return value and use for veto? */ typedef void mpt_shutdown_handler_t(struct mpt_softc *); typedef void mpt_detach_handler_t(struct mpt_softc *); typedef int mpt_unload_handler_t(struct mpt_personality *); struct mpt_personality { const char *name; uint32_t id; /* Assigned identifier. */ u_int use_count; /* Instances using personality*/ mpt_load_handler_t *load; /* configure personailty */ #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) mpt_probe_handler_t *probe; /* configure personailty */ mpt_attach_handler_t *attach; /* initialize device instance */ mpt_enable_handler_t *enable; /* enable device */ mpt_event_handler_t *event; /* Handle MPI event. */ mpt_reset_handler_t *reset; /* Re-init after reset. */ mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ mpt_detach_handler_t *detach; /* release device instance */ mpt_unload_handler_t *unload; /* Shutdown personality */ #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) }; int mpt_modevent(module_t, int, void *); /* Maximum supported number of personalities. */ #define MPT_MAX_PERSONALITIES (15) #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ MODULE_DEPEND(name, dep, vmin, vpref, vmax) #define DECLARE_MPT_PERSONALITY(name, order) \ static moduledata_t name##_mod = { \ #name, mpt_modevent, &name##_personality \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ MODULE_VERSION(name, 1); \ MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) /******************************* Bus DMA Support ******************************/ /* XXX Need to update bus_dmamap_sync to take a range argument. */ #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ bus_dmamap_sync(dma_tag, dmamap, op) #if __FreeBSD_version >= 501102 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ busdma_lock_mutex, &Giant, \ dma_tagp) #else #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) #endif struct mpt_map_info { struct mpt_softc *mpt; int error; uint32_t phys; }; void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); /**************************** Kernel Thread Support ***************************/ #if __FreeBSD_version > 500005 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #else #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, fmtstr, arg) #endif /****************************** Timer Facilities ******************************/ #if __FreeBSD_version > 500000 #define mpt_callout_init(c) callout_init(c, /*mpsafe*/0); #else #define mpt_callout_init(c) callout_init(c); #endif /********************************** Endianess *********************************/ static __inline uint64_t u64toh(U64 s) { uint64_t result; result = le32toh(s.Low); result |= ((uint64_t)le32toh(s.High)) << 32; return (result); } /**************************** MPI Transaction State ***************************/ typedef enum { REQ_STATE_NIL = 0x00, REQ_STATE_FREE = 0x01, REQ_STATE_ALLOCATED = 0x02, REQ_STATE_QUEUED = 0x04, REQ_STATE_DONE = 0x08, REQ_STATE_TIMEDOUT = 0x10, REQ_STATE_NEED_WAKEUP = 0x20, REQ_STATE_LOCKED = 0x80, /* can't be freed */ REQ_STATE_MASK = 0xFF } mpt_req_state_t; struct req_entry { TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ mpt_req_state_t state; /* Request State Information */ uint16_t index; /* Index of this entry */ uint16_t IOCStatus; /* Completion status */ uint16_t ResponseCode; /* TMF Reponse Code */ uint16_t serno; /* serial number */ union ccb *ccb; /* CAM request */ void *req_vbuf; /* Virtual Address of Entry */ void *sense_vbuf; /* Virtual Address of sense data */ bus_addr_t req_pbuf; /* Physical Address of Entry */ bus_addr_t sense_pbuf; /* Physical Address of sense data */ bus_dmamap_t dmap; /* DMA map for data buffers */ struct req_entry *chain; /* for SGE overallocations */ }; /**************************** MPI Target State Info ***************************/ typedef struct { uint32_t reply_desc; /* current reply descriptor */ uint32_t resid; /* current data residual */ uint32_t bytes_xfered; /* current relative offset */ union ccb *ccb; /* pointer to currently active ccb */ request_t *req; /* pointer to currently active assist request */ int nxfers; uint32_t tag_id; enum { TGT_STATE_NIL, TGT_STATE_LOADING, TGT_STATE_LOADED, TGT_STATE_IN_CAM, TGT_STATE_SETTING_UP_FOR_DATA, TGT_STATE_MOVING_DATA, TGT_STATE_MOVING_DATA_AND_STATUS, TGT_STATE_SENDING_STATUS } state; } mpt_tgt_state_t; /* * When we get an incoming command it has its own tag which is called the * IoIndex. This is the value we gave that particular command buffer when * we originally assigned it. It's just a number, really. The FC card uses * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which * contains pointers the request_t structures related to that IoIndex. * * What *we* do is construct a tag out of the index for the target command * which owns the incoming ATIO plus a rolling sequence number. */ #define MPT_MAKE_TAGID(mpt, req, ioindex) \ ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) #ifdef INVARIANTS #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) #else #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] #endif #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); #define MPT_MAX_LUNS 256 typedef struct { struct mpt_hdr_stailq atios; struct mpt_hdr_stailq inots; int enabled; } tgt_resource_t; #define MPT_MAX_ELS 64 /**************************** Handler Registration ****************************/ /* * Global table of registered reply handlers. The * handler is indicated by byte 3 of the request * index submitted to the IOC. This allows the * driver core to perform generic processing without * any knowledge of per-personality behavior. * * MPT_NUM_REPLY_HANDLERS must be a power of 2 * to allow the easy generation of a mask. * * The handler offsets used by the core are hard coded * allowing faster code generation when assigning a handler * to a request. All "personalities" must use the * the handler registration mechanism. * * The IOC handlers that are rarely executed are placed * at the tail of the table to make it more likely that * all commonly executed handlers fit in a single cache * line. */ #define MPT_NUM_REPLY_HANDLERS (32) #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); typedef union { mpt_reply_handler_t *reply_handler; } mpt_handler_t; typedef enum { MPT_HANDLER_REPLY, MPT_HANDLER_EVENT, MPT_HANDLER_RESET, MPT_HANDLER_SHUTDOWN } mpt_handler_type; struct mpt_handler_record { LIST_ENTRY(mpt_handler_record) links; mpt_handler_t handler; }; LIST_HEAD(mpt_handler_list, mpt_handler_record); /* * The handler_id is currently unused but would contain the * handler ID used in the MsgContext field to allow direction * of replies to the handler. Registrations that don't require * a handler id can pass in NULL for the handler_id. * * Deregistrations for handlers without a handler id should * pass in MPT_HANDLER_ID_NONE. */ #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) int mpt_register_handler(struct mpt_softc *, mpt_handler_type, mpt_handler_t, uint32_t *); int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, mpt_handler_t, uint32_t); /******************* Per-Controller Instance Data Structures ******************/ TAILQ_HEAD(req_queue, req_entry); /* Structure for saving proper values for modifyable PCI config registers */ struct mpt_pci_cfg { uint16_t Command; uint16_t LatencyTimer_LineSize; uint32_t IO_BAR; uint32_t Mem0_BAR[2]; uint32_t Mem1_BAR[2]; uint32_t ROM_BAR; uint8_t IntLine; uint32_t PMCSR; }; typedef enum { MPT_RVF_NONE = 0x0, MPT_RVF_ACTIVE = 0x1, MPT_RVF_ANNOUNCED = 0x2, MPT_RVF_UP2DATE = 0x4, MPT_RVF_REFERENCED = 0x8, MPT_RVF_WCE_CHANGED = 0x10 } mpt_raid_volume_flags; struct mpt_raid_volume { CONFIG_PAGE_RAID_VOL_0 *config_page; MPI_RAID_VOL_INDICATOR sync_progress; mpt_raid_volume_flags flags; u_int quiesced_disks; }; typedef enum { MPT_RDF_NONE = 0x00, MPT_RDF_ACTIVE = 0x01, MPT_RDF_ANNOUNCED = 0x02, MPT_RDF_UP2DATE = 0x04, MPT_RDF_REFERENCED = 0x08, MPT_RDF_QUIESCING = 0x10, MPT_RDF_QUIESCED = 0x20 } mpt_raid_disk_flags; struct mpt_raid_disk { CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; struct mpt_raid_volume *volume; u_int member_number; u_int pass_thru_active; mpt_raid_disk_flags flags; }; struct mpt_evtf_record { MSG_EVENT_NOTIFY_REPLY reply; uint32_t context; LIST_ENTRY(mpt_evtf_record) links; }; LIST_HEAD(mpt_evtf_list, mpt_evtf_record); struct mpt_softc { device_t dev; #if __FreeBSD_version < 500000 uint32_t mpt_islocked; int mpt_splsaved; #else struct mtx mpt_lock; int mpt_locksetup; #endif uint32_t mpt_pers_mask; uint32_t : 8, unit : 8, : 1, twildcard : 1, tenabled : 1, role : 2, /* none, ini, target, both */ : 1, raid_enabled : 1, raid_mwce_set : 1, getreqwaiter : 1, shutdwn_raid : 1, shutdwn_recovery: 1, outofbeer : 1, disabled : 1, is_spi : 1, is_sas : 1, is_fc : 1; u_int verbose; /* * IOC Facts */ uint16_t mpt_global_credits; uint16_t request_frame_size; uint8_t mpt_max_devices; uint8_t mpt_max_buses; uint8_t ioc_facts_flags; uint8_t padding0; /* * Port Facts * XXX - Add multi-port support!. */ uint16_t mpt_ini_id; uint16_t mpt_port_type; uint16_t mpt_proto_flags; uint16_t mpt_max_tgtcmds; /* * Device Configuration Information */ union { struct mpt_spi_cfg { CONFIG_PAGE_SCSI_PORT_0 _port_page0; CONFIG_PAGE_SCSI_PORT_1 _port_page1; CONFIG_PAGE_SCSI_PORT_2 _port_page2; CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; uint16_t _tag_enable; uint16_t _disc_enable; } spi; #define mpt_port_page0 cfg.spi._port_page0 #define mpt_port_page1 cfg.spi._port_page1 #define mpt_port_page2 cfg.spi._port_page2 #define mpt_dev_page0 cfg.spi._dev_page0 #define mpt_dev_page1 cfg.spi._dev_page1 #define mpt_tag_enable cfg.spi._tag_enable #define mpt_disc_enable cfg.spi._disc_enable struct mpi_fc_cfg { CONFIG_PAGE_FC_PORT_0 _port_page0; + uint32_t _port_speed; #define mpt_fcport_page0 cfg.fc._port_page0 +#define mpt_fcport_speed cfg.fc._port_speed } fc; } cfg; /* Controller Info for RAID information */ CONFIG_PAGE_IOC_2 * ioc_page2; CONFIG_PAGE_IOC_3 * ioc_page3; /* Raid Data */ struct mpt_raid_volume* raid_volumes; struct mpt_raid_disk* raid_disks; u_int raid_max_volumes; u_int raid_max_disks; u_int raid_page0_len; u_int raid_wakeup; u_int raid_rescan; u_int raid_resync_rate; u_int raid_mwce_setting; u_int raid_queue_depth; u_int raid_nonopt_volumes; struct proc *raid_thread; struct callout raid_timer; /* * PCI Hardware info */ struct resource * pci_irq; /* Interrupt map for chip */ void * ih; /* Interupt handle */ struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ /* * DMA Mapping Stuff */ struct resource * pci_reg; /* Register map for chip */ int pci_mem_rid; /* Resource ID */ bus_space_tag_t pci_st; /* Bus tag for registers */ bus_space_handle_t pci_sh; /* Bus handle for registers */ /* PIO versions of above. */ int pci_pio_rid; struct resource * pci_pio_reg; bus_space_tag_t pci_pio_st; bus_space_handle_t pci_pio_sh; bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ bus_dmamap_t reply_dmap; /* DMA map for reply memory */ uint8_t *reply; /* KVA of reply memory */ bus_addr_t reply_phys; /* BusAddr of reply memory */ bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ bus_dmamap_t request_dmap; /* DMA map for request memroy */ uint8_t *request; /* KVA of Request memory */ bus_addr_t request_phys; /* BusAddr of request memory */ uint32_t max_seg_cnt; /* calculated after IOC facts */ /* * Hardware management */ u_int reset_cnt; /* * CAM && Software Management */ request_t *request_pool; struct req_queue request_free_list; struct req_queue request_pending_list; struct req_queue request_timeout_list; struct cam_sim *sim; struct cam_path *path; struct cam_sim *phydisk_sim; struct cam_path *phydisk_path; struct proc *recovery_thread; request_t *tmf_req; /* * Deferred frame acks due to resource shortage. */ struct mpt_evtf_list ack_frames; /* * Target Mode Support */ uint32_t scsi_tgt_handler_id; request_t ** tgt_cmd_ptrs; request_t ** els_cmd_ptrs; /* FC only */ /* * *snork*- this is chosen to be here *just in case* somebody * forgets to point to it exactly and we index off of trt with * CAM_LUN_WILDCARD. */ tgt_resource_t trt_wildcard; /* wildcard luns */ tgt_resource_t trt[MPT_MAX_LUNS]; uint16_t tgt_cmds_allocated; uint16_t els_cmds_allocated; /* FC only */ uint16_t timeouts; /* timeout count */ uint16_t success; /* successes afer timeout */ uint16_t sequence; /* Sequence Number */ uint16_t pad3; /* Paired port in some dual adapters configurations */ struct mpt_softc * mpt2; /* FW Image management */ uint32_t fw_image_size; uint8_t *fw_image; bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ bus_dmamap_t fw_dmap; /* DMA map for firmware image */ bus_addr_t fw_phys; /* BusAddr of firmware image */ /* Shutdown Event Handler. */ eventhandler_tag eh; TAILQ_ENTRY(mpt_softc) links; }; static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); static __inline void mpt_assign_serno(struct mpt_softc *mpt, request_t *req) { if ((req->serno = mpt->sequence++) == 0) { req->serno = mpt->sequence++; } } /***************************** Locking Primitives *****************************/ #if __FreeBSD_version < 500000 #define MPT_IFLAGS INTR_TYPE_CAM #define MPT_LOCK(mpt) mpt_lockspl(mpt) #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) #define MPT_OWNED(mpt) mpt->mpt_islocked #define MPTLOCK_2_CAMLOCK MPT_UNLOCK #define CAMLOCK_2_MPTLOCK MPT_LOCK #define MPT_LOCK_SETUP(mpt) #define MPT_LOCK_DESTROY(mpt) static __inline void mpt_lockspl(struct mpt_softc *mpt); static __inline void mpt_unlockspl(struct mpt_softc *mpt); static __inline void mpt_lockspl(struct mpt_softc *mpt) { int s; s = splcam(); if (mpt->mpt_islocked++ == 0) { mpt->mpt_splsaved = s; } else { splx(s); panic("Recursed lock with mask: 0x%x\n", s); } } static __inline void mpt_unlockspl(struct mpt_softc *mpt) { if (mpt->mpt_islocked) { if (--mpt->mpt_islocked == 0) { splx(mpt->mpt_splsaved); } } else panic("Negative lock count\n"); } static __inline int mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, const char *wmesg, int timo) { int saved_cnt; int saved_spl; int error; KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep")); saved_cnt = mpt->mpt_islocked; saved_spl = mpt->mpt_splsaved; mpt->mpt_islocked = 0; error = tsleep(ident, priority, wmesg, timo); KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup")); mpt->mpt_islocked = saved_cnt; mpt->mpt_splsaved = saved_spl; return (error); } #else #ifdef LOCKING_WORKED_AS_IT_SHOULD #error "Shouldn't Be Here!" #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE #define MPT_LOCK_SETUP(mpt) \ mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ mpt->mpt_locksetup = 1 #define MPT_LOCK_DESTROY(mpt) \ if (mpt->mpt_locksetup) { \ mtx_destroy(&mpt->mpt_lock); \ mpt->mpt_locksetup = 0; \ } #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) #define MPTLOCK_2_CAMLOCK(mpt) \ mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant) #define CAMLOCK_2_MPTLOCK(mpt) \ mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock) #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) #else #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY #define MPT_LOCK_SETUP(mpt) do { } while (0) #define MPT_LOCK_DESTROY(mpt) do { } while (0) #if 0 #define MPT_LOCK(mpt) \ device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); \ KASSERT(mpt->mpt_locksetup == 0, \ ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ mpt->mpt_locksetup = 1 #define MPT_UNLOCK(mpt) \ device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); \ KASSERT(mpt->mpt_locksetup == 1, \ ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ mpt->mpt_locksetup = 0 #else #define MPT_LOCK(mpt) \ KASSERT(mpt->mpt_locksetup == 0, \ ("recursive lock acquire at %s:%d", __FILE__, __LINE__)); \ mpt->mpt_locksetup = 1 #define MPT_UNLOCK(mpt) \ KASSERT(mpt->mpt_locksetup == 1, \ ("release unowned lock at %s:%d", __FILE__, __LINE__)); \ mpt->mpt_locksetup = 0 #endif #define MPT_OWNED(mpt) mpt->mpt_locksetup #define MPTLOCK_2_CAMLOCK(mpt) MPT_UNLOCK(mpt) #define CAMLOCK_2_MPTLOCK(mpt) MPT_LOCK(mpt) static __inline int mpt_sleep(struct mpt_softc *, void *, int, const char *, int); static __inline int mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) { int r; MPT_UNLOCK(mpt); r = tsleep(i, p, w, t); MPT_LOCK(mpt); return (r); } #endif #endif /******************************* Register Access ******************************/ static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); static __inline uint32_t mpt_read(struct mpt_softc *, int); static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); static __inline void mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) { bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); } static __inline uint32_t mpt_read(struct mpt_softc *mpt, int offset) { return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); } /* * Some operations (e.g. diagnostic register writes while the ARM proccessor * is disabled), must be performed using "PCI pio" operations. On non-PCI * busses, these operations likely map to normal register accesses. */ static __inline void mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) { bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); } static __inline uint32_t mpt_pio_read(struct mpt_softc *mpt, int offset) { return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); } /*********************** Reply Frame/Request Management ***********************/ /* Max MPT Reply we are willing to accept (must be power of 2) */ #define MPT_REPLY_SIZE 256 /* * Must be less than 16384 in order for target mode to work */ #define MPT_MAX_REQUESTS(mpt) 512 #define MPT_REQUEST_AREA 512 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) #define MPT_CONTEXT_CB_SHIFT (16) #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) #define MPT_CONTEXT_TO_CBI(x) \ (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) #define MPT_CONTEXT_REQI_MASK 0xFFFF #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) /* * Convert a 32bit physical address returned from IOC to an * offset into our reply frame memory or the kvm address needed * to access the data. The returned address is only the low * 32 bits, so mask our base physical address accordingly. */ #define MPT_REPLY_BADDR(x) \ (x << 1) #define MPT_REPLY_OTOV(m, i) \ ((void *)(&m->reply[i])) #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ do { \ if (mpt->verbose > MPT_PRT_DEBUG) \ mpt_dump_reply_frame(mpt, reply_frame); \ } while(0) static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); /* * Give the reply buffer back to the IOC after we have * finished processing it. */ static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) { mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); } /* Get a reply from the IOC */ static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt) { return mpt_read(mpt, MPT_OFFSET_REPLY_Q); } void mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); /************************** Scatter Gather Managment **************************/ /* MPT_RQSL- size of request frame, in bytes */ #define MPT_RQSL(mpt) (mpt->request_frame_size << 2) /* MPT_NSGL- how many SG entries can fit in a request frame size */ #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) /* MPT_NRFM- how many request frames can fit in each request alloc we make */ #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) /* * MPT_NSGL_FIRST- # of SG elements that can fit after * an I/O request but still within the request frame. * Do this safely based upon SGE_IO_UNION. * * Note that the first element is *within* the SCSI request. */ #define MPT_NSGL_FIRST(mpt) \ ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ sizeof (SGE_IO_UNION)) /***************************** IOC Initialization *****************************/ int mpt_reset(struct mpt_softc *, int /*reinit*/); /****************************** Debugging ************************************/ typedef struct mpt_decode_entry { char *name; u_int value; u_int mask; } mpt_decode_entry_t; int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, const char *name, u_int value, u_int *cur_column, u_int wrap_point); void mpt_dump_request(struct mpt_softc *, request_t *); enum { MPT_PRT_ALWAYS, MPT_PRT_FATAL, MPT_PRT_ERROR, MPT_PRT_WARN, MPT_PRT_INFO, MPT_PRT_NEGOTIATION, MPT_PRT_DEBUG, MPT_PRT_DEBUG1, MPT_PRT_DEBUG2, MPT_PRT_DEBUG3, MPT_PRT_TRACE, MPT_PRT_NONE=100 }; #if __FreeBSD_version > 500000 #define mpt_lprt(mpt, level, ...) \ do { \ if (level <= (mpt)->verbose) \ mpt_prt(mpt, __VA_ARGS__); \ } while (0) #define mpt_lprtc(mpt, level, ...) \ do { \ if (level <= (mpt)->debug_level) \ mpt_prtc(mpt, __VA_ARGS__); \ } while (0) #else void mpt_lprt(struct mpt_softc *, int, const char *, ...) __printflike(3, 4); void mpt_lprtc(struct mpt_softc *, int, const char *, ...) __printflike(3, 4); #endif void mpt_prt(struct mpt_softc *, const char *, ...) __printflike(2, 3); void mpt_prtc(struct mpt_softc *, const char *, ...) __printflike(2, 3); /**************************** Target Mode Related ***************************/ static __inline int mpt_cdblen(uint8_t, int); static __inline int mpt_cdblen(uint8_t cdb0, int maxlen) { int group = cdb0 >> 5; switch (group) { case 0: return (6); case 1: return (10); case 4: case 5: return (12); default: return (16); } } #ifdef INVARIANTS static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); static __inline request_t * mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) { uint16_t rtg = (tag >> 18); KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag)); KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); return (mpt->tgt_cmd_ptrs[rtg]); } static __inline int mpt_req_on_free_list(struct mpt_softc *, request_t *); static __inline int mpt_req_on_pending_list(struct mpt_softc *, request_t *); static __inline void mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); static __inline void mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); /* * Is request on freelist? */ static __inline int mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) { request_t *lrq; TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { if (lrq == req) { return (1); } } return (0); } /* * Is request on pending list? */ static __inline int mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) { request_t *lrq; TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { if (lrq == req) { return (1); } } return (0); } /* * Make sure that req *is* part of one of the special lists */ static __inline void mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) { int i; for (i = 0; i < mpt->els_cmds_allocated; i++) { if (req == mpt->els_cmd_ptrs[i]) { return; } } for (i = 0; i < mpt->tgt_cmds_allocated; i++) { if (req == mpt->tgt_cmd_ptrs[i]) { return; } } panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); } /* * Make sure that req is *not* part of one of the special lists. */ static __inline void mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) { int i; for (i = 0; i < mpt->els_cmds_allocated; i++) { KASSERT(req != mpt->els_cmd_ptrs[i], ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); } for (i = 0; i < mpt->tgt_cmds_allocated; i++) { KASSERT(req != mpt->tgt_cmd_ptrs[i], ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); } } #endif typedef enum { MPT_ABORT_TASK_SET=1234, MPT_CLEAR_TASK_SET, MPT_TARGET_RESET, MPT_CLEAR_ACA, MPT_TERMINATE_TASK, MPT_NIL_TMT_VALUE=5678 } mpt_task_mgmt_t; /**************************** Unclassified Routines ***************************/ void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); int mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply); int mpt_wait_req(struct mpt_softc *mpt, request_t *req, mpt_req_state_t state, mpt_req_state_t mask, int sleep_ok, int time_ms); void mpt_enable_ints(struct mpt_softc *mpt); void mpt_disable_ints(struct mpt_softc *mpt); int mpt_attach(struct mpt_softc *mpt); int mpt_shutdown(struct mpt_softc *mpt); int mpt_detach(struct mpt_softc *mpt); int mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd); request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); void mpt_free_request(struct mpt_softc *mpt, request_t *req); void mpt_intr(void *arg); void mpt_check_doorbell(struct mpt_softc *mpt); void mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame); void mpt_set_config_regs(struct mpt_softc *); int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, u_int /*Action*/, u_int /*PageVersion*/, u_int /*PageLength*/, u_int /*PageNumber*/, u_int /*PageType*/, uint32_t /*PageAddress*/, bus_addr_t /*addr*/, bus_size_t/*len*/, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, int /*PageNumber*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, size_t /*len*/, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, size_t /*len*/, int /*sleep_ok*/, int /*timeout_ms*/); static __inline int mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, PageAddress, hdr, len, sleep_ok, timeout_ms)); } static __inline int mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, PageAddress, hdr, len, sleep_ok, timeout_ms)); } /* mpt_debug.c functions */ void mpt_print_reply(void *vmsg); void mpt_print_db(uint32_t mb); void mpt_print_config_reply(void *vmsg); char *mpt_ioc_diag(uint32_t diag); void mpt_req_state(mpt_req_state_t state); void mpt_print_config_request(void *vmsg); void mpt_print_request(void *vmsg); void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg); void mpt_dump_sgl(SGE_IO_UNION *se, int offset); #endif /* _MPT_H_ */ Index: head/sys/dev/mpt/mpt_cam.c =================================================================== --- head/sys/dev/mpt/mpt_cam.c (revision 159918) +++ head/sys/dev/mpt/mpt_cam.c (revision 159919) @@ -1,4801 +1,4812 @@ /*- * FreeBSD/CAM specific routines for LSI '909 FC adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_init.h" #include "dev/mpt/mpilib/mpi_targ.h" #include "dev/mpt/mpilib/mpi_fc.h" #include #include static void mpt_poll(struct cam_sim *); static timeout_t mpt_timeout; static void mpt_action(struct cam_sim *, union ccb *); static int mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); static void mpt_setwidth(struct mpt_softc *, int, int); static void mpt_setsync(struct mpt_softc *, int, int, int); static int mpt_update_spi_config(struct mpt_softc *, int); static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); static mpt_reply_handler_t mpt_scsi_reply_handler; static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; static mpt_reply_handler_t mpt_fc_els_reply_handler; static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, MSG_DEFAULT_REPLY *); static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); static int mpt_fc_reset_link(struct mpt_softc *, int); static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); static void mpt_recovery_thread(void *arg); static void mpt_recover_commands(struct mpt_softc *mpt); static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, u_int, u_int, u_int, int); static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); static void mpt_post_target_command(struct mpt_softc *, request_t *, int); static int mpt_add_els_buffers(struct mpt_softc *mpt); static int mpt_add_target_commands(struct mpt_softc *mpt); static void mpt_free_els_buffers(struct mpt_softc *mpt); static void mpt_free_target_commands(struct mpt_softc *mpt); static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); static void mpt_target_start_io(struct mpt_softc *, union ccb *); static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, uint8_t, uint8_t const *); static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, tgt_resource_t *, int); static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; static mpt_probe_handler_t mpt_cam_probe; static mpt_attach_handler_t mpt_cam_attach; static mpt_enable_handler_t mpt_cam_enable; static mpt_event_handler_t mpt_cam_event; static mpt_reset_handler_t mpt_cam_ioc_reset; static mpt_detach_handler_t mpt_cam_detach; static struct mpt_personality mpt_cam_personality = { .name = "mpt_cam", .probe = mpt_cam_probe, .attach = mpt_cam_attach, .enable = mpt_cam_enable, .event = mpt_cam_event, .reset = mpt_cam_ioc_reset, .detach = mpt_cam_detach, }; DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); int mpt_cam_probe(struct mpt_softc *mpt) { /* * Only attach to nodes that support the initiator or target * role or have RAID physical devices that need CAM pass-thru support. */ if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { return (0); } return (ENODEV); } int mpt_cam_attach(struct mpt_softc *mpt) { struct cam_devq *devq; mpt_handler_t handler; int maxq; int error; TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))? mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); handler.reply_handler = mpt_scsi_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { goto cleanup0; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { goto cleanup0; } /* * If we're fibre channel and could support target mode, we register * an ELS reply handler and give it resources. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_fc_els_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { goto cleanup0; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; goto cleanup0; } maxq -= mpt->els_cmds_allocated; } /* * If we support target mode, we register a reply handler for it, * but don't add resources until we actually enable target mode. */ if ((mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_scsi_tgt_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { goto cleanup0; } } /* * We keep one request reserved for timeout TMF requests. */ mpt->tmf_req = mpt_get_request(mpt, FALSE); if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; goto cleanup0; } /* * Mark the request as free even though not on the free list. * There is only one TMF request allowed to be outstanding at * a time and the TMF routines perform their own allocation * tracking using the standard state flags. */ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; goto cleanup0; } /* * The rest of this is CAM foo, for which we need to drop our lock */ MPTLOCK_2_CAMLOCK(mpt); /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); error = ENOMEM; goto cleanup; } /* * Construct our SIM entry. */ mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, mpt->unit, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); error = ENOMEM; goto cleanup; } /* * Register exactly this bus. */ if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; goto cleanup; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; goto cleanup; } /* * Only register a second bus for RAID physical * devices if the controller supports RAID. */ if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { CAMLOCK_2_MPTLOCK(mpt); return (0); } /* * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, mpt->unit, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; goto cleanup; } /* * Register this bus. */ if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; goto cleanup; } if (xpt_create_path(&mpt->phydisk_path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; goto cleanup; } CAMLOCK_2_MPTLOCK(mpt); return (0); cleanup: CAMLOCK_2_MPTLOCK(mpt); cleanup0: mpt_cam_detach(mpt); return (error); } /* * Read FC configuration information */ static int mpt_read_config_info_fc(struct mpt_softc *mpt) { char *topology = NULL; - int rv, speed = 0; + int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", mpt->mpt_fcport_page0.Header.PageVersion, mpt->mpt_fcport_page0.Header.PageLength, mpt->mpt_fcport_page0.Header.PageNumber, mpt->mpt_fcport_page0.Header.PageType); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, sizeof(mpt->mpt_fcport_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read FC Port Page 0\n"); return (-1); } - speed = mpt->mpt_fcport_page0.CurrentSpeed; + mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; switch (mpt->mpt_fcport_page0.Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: - speed = 0; + mpt->mpt_fcport_speed = 0; topology = ""; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: topology = "N-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: topology = "NL-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: topology = "F-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: topology = "FL-Port"; break; default: - speed = 0; + mpt->mpt_fcport_speed = 0; topology = "?"; break; } mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " "Speed %u-Gbit\n", topology, mpt->mpt_fcport_page0.WWNN.High, mpt->mpt_fcport_page0.WWNN.Low, mpt->mpt_fcport_page0.WWPN.High, mpt->mpt_fcport_page0.WWPN.Low, - speed); + mpt->mpt_fcport_speed); return (0); } /* * Set FC configuration information. */ static int mpt_set_initial_config_fc(struct mpt_softc *mpt) { #if 0 CONFIG_PAGE_FC_PORT_1 fc; U32 fl; int r, doit = 0; if ((mpt->role & MPT_ROLE_TARGET) == 0) { return (0); } r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, &fc.Header, FALSE, 5000); if (r) { return (mpt_fc_reset_link(mpt, 1)); } r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0, &fc.Header, sizeof (fc), FALSE, 5000); if (r) { return (mpt_fc_reset_link(mpt, 1)); } fl = le32toh(fc.Flags); if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; doit = 1; } if (doit) { const char *cc; mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 1: New Flags %x \n", fl); fc.Flags = htole32(fl); r = mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header, sizeof(fc), FALSE, 5000); if (r != 0) { cc = "FC PORT PAGE1 UPDATE: FAILED\n"; } else { cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n"; } mpt_prt(mpt, cc); } #endif return (0); } /* * Read SAS configuration information. Nothing to do yet. */ static int mpt_read_config_info_sas(struct mpt_softc *mpt) { return (0); } /* * Set SAS configuration information. Nothing to do yet. */ static int mpt_set_initial_config_sas(struct mpt_softc *mpt) { return (0); } /* * Read SCSI configuration information */ static int mpt_read_config_info_spi(struct mpt_softc *mpt) { int rv, i; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, &mpt->mpt_port_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", mpt->mpt_port_page0.Header.PageVersion, mpt->mpt_port_page0.Header.PageLength, mpt->mpt_port_page0.Header.PageNumber, mpt->mpt_port_page0.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, &mpt->mpt_port_page1.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", mpt->mpt_port_page1.Header.PageVersion, mpt->mpt_port_page1.Header.PageLength, mpt->mpt_port_page1.Header.PageNumber, mpt->mpt_port_page1.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, &mpt->mpt_port_page2.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", mpt->mpt_port_page2.Header.PageVersion, mpt->mpt_port_page2.Header.PageLength, mpt->mpt_port_page2.Header.PageNumber, mpt->mpt_port_page2.Header.PageType); for (i = 0; i < 16; i++) { rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, mpt->mpt_dev_page0[i].Header.PageVersion, mpt->mpt_dev_page0[i].Header.PageLength, mpt->mpt_dev_page0[i].Header.PageNumber, mpt->mpt_dev_page0[i].Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, mpt->mpt_dev_page1[i].Header.PageVersion, mpt->mpt_dev_page1[i].Header.PageLength, mpt->mpt_dev_page1[i].Header.PageNumber, mpt->mpt_dev_page1[i].Header.PageType); } /* * At this point, we don't *have* to fail. As long as we have * valid config header information, we can (barely) lurch * along. */ rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, sizeof(mpt->mpt_port_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 0\n"); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", mpt->mpt_port_page0.Capabilities, mpt->mpt_port_page0.PhysicalInterface); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, sizeof(mpt->mpt_port_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 1\n"); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", mpt->mpt_port_page1.Configuration, mpt->mpt_port_page1.OnBusTimerValue); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, sizeof(mpt->mpt_port_page2), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 2\n"); } else { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "Port Page 2: Flags %x Settings %x\n", mpt->mpt_port_page2.PortFlags, mpt->mpt_port_page2.PortSettings); for (i = 0; i < 16; i++) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); } } for (i = 0; i < 16; i++) { rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 0\n", i); continue; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 0: Negotiated Params %x Information %x\n", i, mpt->mpt_dev_page0[i].NegotiatedParameters, mpt->mpt_dev_page0[i].Information); rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 1\n", i); continue; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 1: Requested Params %x Configuration %x\n", i, mpt->mpt_dev_page1[i].RequestedParameters, mpt->mpt_dev_page1[i].Configuration); } return (0); } /* * Validate SPI configuration information. * * In particular, validate SPI Port Page 1. */ static int mpt_set_initial_config_spi(struct mpt_softc *mpt) { int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; int error; mpt->mpt_disc_enable = 0xff; mpt->mpt_tag_enable = 0; if (mpt->mpt_port_page1.Configuration != pp1val) { CONFIG_PAGE_SCSI_PORT_1 tmp; mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); tmp = mpt->mpt_port_page1; tmp.Configuration = pp1val; error = mpt_write_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } error = mpt_read_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } if (tmp.Configuration != pp1val) { mpt_prt(mpt, "failed to reset SPI Port Page 1 Config value\n"); return (-1); } mpt->mpt_port_page1 = tmp; } /* * The purpose of this exercise is to get * all targets back to async/narrow. * * We skip this step if the BIOS has already negotiated * speeds with the targets and does not require us to * do Domain Validation. */ i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "honoring BIOS transfer negotiations\n"); } else { for (i = 0; i < 16; i++) { mpt->mpt_dev_page1[i].RequestedParameters = 0; mpt->mpt_dev_page1[i].Configuration = 0; (void) mpt_update_spi_config(mpt, i); } } return (0); } int mpt_cam_enable(struct mpt_softc *mpt) { if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { return (EIO); } if (mpt_set_initial_config_fc(mpt)) { return (EIO); } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { return (EIO); } if (mpt_set_initial_config_sas(mpt)) { return (EIO); } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { return (EIO); } if (mpt_set_initial_config_spi(mpt)) { return (EIO); } } return (0); } void mpt_cam_detach(struct mpt_softc *mpt) { mpt_handler_t handler; mpt_terminate_recovery_thread(mpt); handler.reply_handler = mpt_scsi_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_io_handler_id); handler.reply_handler = mpt_scsi_tmf_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_tmf_handler_id); handler.reply_handler = mpt_fc_els_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, fc_els_handler_id); handler.reply_handler = mpt_scsi_tgt_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, mpt->scsi_tgt_handler_id); if (mpt->tmf_req != NULL) { mpt->tmf_req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } if (mpt->sim != NULL) { MPTLOCK_2_CAMLOCK(mpt); xpt_free_path(mpt->path); xpt_bus_deregister(cam_sim_path(mpt->sim)); cam_sim_free(mpt->sim, TRUE); mpt->sim = NULL; CAMLOCK_2_MPTLOCK(mpt); } if (mpt->phydisk_sim != NULL) { MPTLOCK_2_CAMLOCK(mpt); xpt_free_path(mpt->phydisk_path); xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); cam_sim_free(mpt->phydisk_sim, TRUE); mpt->phydisk_sim = NULL; CAMLOCK_2_MPTLOCK(mpt); } } /* This routine is used after a system crash to dump core onto the swap device. */ static void mpt_poll(struct cam_sim *sim) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)cam_sim_softc(sim); MPT_LOCK(mpt); mpt_intr(mpt); MPT_UNLOCK(mpt); } /* * Watchdog timeout routine for SCSI requests. */ static void mpt_timeout(void *arg) { union ccb *ccb; struct mpt_softc *mpt; request_t *req; ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; MPT_LOCK(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); /* XXX: WHAT ARE WE TRYING TO DO HERE? */ if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } MPT_UNLOCK(mpt); } /* * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE64 *se; SGE_CHAIN64 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: istgt = 0; sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE64 pointers and start doing CHAIN64 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE64 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0, sizeof (*se)); se->Address.Low = dm_segs->ds_addr; if (sizeof(bus_addr_t) > 4) { se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32; } MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contiained in the current request frame) which points to * SIMPLE64 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN64 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialized the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; if (sizeof (bus_addr_t) > 4) { ce->Address.High = (uint32_t) ((uint64_t)chain_list_addr >> 32); } ce->Address.Low = (uint32_t) chain_list_addr; ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN64); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); } /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address.Low = dm_segs->ds_addr; if (sizeof (bus_addr_t) > 4) { se->Address.High = ((uint64_t)dm_segs->ds_addr) >> 32; } MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; CAMLOCK_2_MPTLOCK(mpt); nrq = mpt_get_request(mpt, FALSE); MPTLOCK_2_CAMLOCK(mpt); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { ccb->ccb_h.timeout_ch = timeout(mpt_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); } else { callout_handle_init(&ccb->ccb_h.timeout_ch); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } CAMLOCK_2_MPTLOCK(mpt); mpt_send_cmd(mpt, req); MPTLOCK_2_CAMLOCK(mpt); } static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE32 *se; SGE_CHAIN32 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE32 pointers and start doing CHAIN32 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE32 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0,sizeof (*se)); se->Address = dm_segs->ds_addr; MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contiained in the current request frame) which points to * SIMPLE32 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN32 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialized the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; ce->Address = chain_list_addr; ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN32); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); } /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address = dm_segs->ds_addr; MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; CAMLOCK_2_MPTLOCK(mpt); nrq = mpt_get_request(mpt, FALSE); MPTLOCK_2_CAMLOCK(mpt); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { ccb->ccb_h.timeout_ch = timeout(mpt_timeout, (caddr_t)ccb, (ccb->ccb_h.timeout * hz) / 1000); } else { callout_handle_init(&ccb->ccb_h.timeout_ch); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } CAMLOCK_2_MPTLOCK(mpt); mpt_send_cmd(mpt, req); MPTLOCK_2_CAMLOCK(mpt); } static void mpt_start(struct cam_sim *sim, union ccb *ccb) { request_t *req; struct mpt_softc *mpt; MSG_SCSI_IO_REQUEST *mpt_req; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); CAMLOCK_2_MPTLOCK(mpt); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif MPTLOCK_2_CAMLOCK(mpt); if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } /* * Link the ccb and the request structure so we can find * the other knowing either the request or the ccb */ req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; CAMLOCK_2_MPTLOCK(mpt); if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { MPTLOCK_2_CAMLOCK(mpt); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } MPTLOCK_2_CAMLOCK(mpt); mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; mpt_req->Bus = 0; /* XXX */ } mpt_req->SenseBufferLength = (csio->sense_len < MPT_SENSE_SIZE) ? csio->sense_len : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when we * Get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); /* Which physical device to do the I/O on */ mpt_req->TargetID = tgt; /* We assume a single level LUN type */ if (ccb->ccb_h.target_lun >= 256) { mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; } else { mpt_req->LUN[1] = ccb->ccb_h.target_lun; } /* Set the direction of the transfer */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { mpt_req->Control = MPI_SCSIIO_CONTROL_READ; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; } else { mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ACA_TASK: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; case MSG_ORDERED_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_SIMPLE_Q_TAG: default: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else { if (mpt->is_fc || mpt->is_sas) { mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; } else { /* XXX No such thing for a target doing packetized. */ mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; } } if (mpt->is_spi) { if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; } } /* Copy the scsi command block into place */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); } mpt_req->CDBLength = csio->cdb_len; mpt_req->DataLength = csio->dxfer_len; mpt_req->SenseBufferLowAddr = req->sense_pbuf; /* * Do a *short* print here if we're set to MPT_PRT_DEBUG */ if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_start: %s op 0x%x ", (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) { mpt_prtc(mpt, "(%s %u byte%s ", (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)? "read" : "write", csio->dxfer_len, (csio->dxfer_len == 1)? ")" : "s)"); } mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, ccb->ccb_h.target_lun, req, req->serno); } /* * If we have any data to send with this command map it into bus space. */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS) == 0) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; int s = splsoftvm(); error = bus_dmamap_load(mpt->buffer_dmat, req->dmap, csio->data_ptr, csio->dxfer_len, cb, req, 0); splx(s); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(mpt->sim, 1); ccbh->status |= CAM_RELEASE_SIMQ; } } else { /* * We have been given a pointer to single * physical buffer. */ struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; (*cb)(req, &seg, 1, 0); } } else { /* * We have been given a list of addresses. * This case could be easily supported but they are not * currently generated by the CAM subsystem so there * is no point in wasting the time right now. */ struct bus_dma_segment *segs; if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { (*cb)(req, NULL, 0, EFAULT); } else { /* Just use the segments provided */ segs = (struct bus_dma_segment *)csio->data_ptr; (*cb)(req, segs, csio->sglist_cnt, 0); } } } else { (*cb)(req, NULL, 0, 0); } } static int mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, int sleep_ok) { int error; uint16_t status; uint8_t response; error = mpt_scsi_send_tmf(mpt, (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 0, /* XXX How do I get the channel ID? */ tgt != CAM_TARGET_WILDCARD ? tgt : 0, lun != CAM_LUN_WILDCARD ? lun : 0, 0, sleep_ok); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. */ mpt_prt(mpt, "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); return (EIO); } /* Wait for bus reset to be processed by the IOC. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, 5000); status = mpt->tmf_req->IOCStatus; response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error) { mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " "Resetting controller.\n"); mpt_reset(mpt, TRUE); return (ETIMEDOUT); } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); return (EIO); } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); return (EIO); } return (0); } static int mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) { int r = 0; request_t *req; PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } fc = req->req_vbuf; memset(fc, 0, sizeof(*fc)); fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; fc->MsgContext = htole32(req->index | fc_els_handler_id); mpt_send_cmd(mpt, req); if (dowait) { r = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, 60 * 1000); if (r == 0) { mpt_free_request(mpt, req); } } return (r); } static int mpt_cam_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { switch(msg->Event & 0xFF) { case MPI_EVENT_UNIT_ATTENTION: mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n", (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: /* We generated a bus reset */ mpt_prt(mpt, "IOC Bus Reset Port: %d\n", (msg->Data[0] >> 8) & 0xff); xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_EXT_BUS_RESET: /* Someone else generated a bus reset */ mpt_prt(mpt, "External Bus Reset Detected\n"); /* * These replies don't return EventData like the MPI * spec says they do */ xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_RESCAN: /* * In general this means a device has been added to the loop. */ mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff); /* xpt_async(AC_FOUND_DEVICE, path, NULL); */ break; case MPI_EVENT_LINK_STATUS_CHANGE: mpt_prt(mpt, "Port %d: LinkState: %s\n", (msg->Data[1] >> 8) & 0xff, ((msg->Data[0] & 0xff) == 0)? "Failed" : "Active"); break; case MPI_EVENT_LOOP_STATE_CHANGE: switch ((msg->Data[0] >> 16) & 0xff) { case 0x01: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " "(Loop Initialization)\n", (msg->Data[1] >> 8) & 0xff, (msg->Data[0] >> 8) & 0xff, (msg->Data[0] ) & 0xff); switch ((msg->Data[0] >> 8) & 0xff) { case 0xF7: if ((msg->Data[0] & 0xff) == 0xF7) { mpt_prt(mpt, "Device needs AL_PA\n"); } else { mpt_prt(mpt, "Device %02x doesn't like " "FC performance\n", msg->Data[0] & 0xFF); } break; case 0xF8: if ((msg->Data[0] & 0xff) == 0xF7) { mpt_prt(mpt, "Device had loop failure " "at its receiver prior to acquiring" " AL_PA\n"); } else { mpt_prt(mpt, "Device %02x detected loop" " failure at its receiver\n", msg->Data[0] & 0xFF); } break; default: mpt_prt(mpt, "Device %02x requests that device " "%02x reset itself\n", msg->Data[0] & 0xFF, (msg->Data[0] >> 8) & 0xFF); break; } break; case 0x02: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPE(%02x,%02x) (Loop Port Enable)\n", (msg->Data[1] >> 8) & 0xff, /* Port */ (msg->Data[0] >> 8) & 0xff, /* Character 3 */ (msg->Data[0] ) & 0xff /* Character 4 */); break; case 0x03: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPB(%02x,%02x) (Loop Port Bypass)\n", (msg->Data[1] >> 8) & 0xff, /* Port */ (msg->Data[0] >> 8) & 0xff, /* Character 3 */ (msg->Data[0] ) & 0xff /* Character 4 */); break; default: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " "FC event (%02x %02x %02x)\n", (msg->Data[1] >> 8) & 0xff, /* Port */ (msg->Data[0] >> 16) & 0xff, /* Event */ (msg->Data[0] >> 8) & 0xff, /* Character 3 */ (msg->Data[0] ) & 0xff /* Character 4 */); } break; case MPI_EVENT_LOGOUT: mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", (msg->Data[1] >> 8) & 0xff, msg->Data[0]); break; case MPI_EVENT_EVENT_CHANGE: mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n"); break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: /* * Devices are attachin'..... */ mpt_prt(mpt, "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n"); break; default: mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", msg->Event & 0xFF); return (0); } return (1); } /* * Reply path for all SCSI I/O requests, called from our * interrupt handler by extracting our handler index from * the MsgContext field of the reply from the IOC. * * This routine is optimized for the common case of a * completion without error. All exception handling is * offloaded to non-inlined helper routines to minimize * cache footprint. */ static int mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_IO_REQUEST *scsi_req; union ccb *ccb; target_id_t tgt; if (req->state == REQ_STATE_FREE) { mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); return (TRUE); } scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", req, req->serno); return (TRUE); } tgt = scsi_req->TargetID; untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } if (reply_frame == NULL) { /* * Context only reply, completion without error status. */ ccb->csio.resid = 0; mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } else { mpt_scsi_reply_frame_handler(mpt, req, reply_frame); } if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { struct scsi_inquiry_data *iq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { /* * Fake out the device type so that only the * pass-thru device will attach. */ iq->device &= ~0x1F; iq->device |= T_NODEVICE; } } if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", req, req->serno); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); } KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, ("CCB req needed wakeup")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); #endif mpt_free_request(mpt, req); return (TRUE); } static int mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); #endif tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; /* Record IOC Status and Response Code of TMF for any waiters. */ req->IOCStatus = le16toh(tmf_reply->IOCStatus); req->ResponseCode = tmf_reply->ResponseCode; mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n", req, req->serno, le16toh(tmf_reply->IOCStatus)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { req->state |= REQ_STATE_DONE; wakeup(req); } else { mpt->tmf_req->state = REQ_STATE_FREE; } return (TRUE); } /* * XXX: Move to definitions file */ #define ELS 0x22 #define FC4LS 0x32 #define ABTS 0x81 #define BA_ACC 0x84 #define LS_RJT 0x01 #define LS_ACC 0x02 #define PLOGI 0x03 #define LOGO 0x05 #define SRR 0x14 #define PRLI 0x20 #define PRLO 0x21 #define ADISC 0x52 #define RSCN 0x61 static void mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) { MSG_LINK_SERVICE_RSP_REQUEST tmp; PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; /* * We are going to reuse the ELS request to send this response back. */ rsp = &tmp; memset(rsp, 0, sizeof(*rsp)); #ifdef USE_IMMEDIATE_LINK_DATA /* * Apparently the IMMEDIATE stuff doesn't seem to work. */ rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; #endif rsp->RspLength = length; rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; rsp->MsgContext = htole32(req->index | fc_els_handler_id); /* * Copy over information from the original reply frame to * it's correct place in the response. */ memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); /* * And now copy back the temporary area to the original frame. */ memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); rsp = req->req_vbuf; #ifdef USE_IMMEDIATE_LINK_DATA memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); #else { PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; bus_addr_t paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); se->FlagsLength = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT; se->FlagsLength |= (length); se->Address = (uint32_t) paddr; } #endif /* * Send it on... */ mpt_send_cmd(mpt, req); } static int mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; U8 rctl; U8 type; U8 cmd; U16 status = le16toh(reply_frame->IOCStatus); U32 *elsbuf; int ioindex; int do_refresh = TRUE; #ifdef INVARIANTS KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("fc_els_reply_handler: req %p:%u for function %x on freelist!", req, req->serno, rp->Function)); if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } else { mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } #endif mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p function %x\n", req, req->serno, reply_frame, reply_frame->Function); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", status, reply_frame->Function); if (status == MPI_IOCSTATUS_INVALID_STATE) { /* * XXX: to get around shutdown issue */ mpt->disabled = 1; return (TRUE); } return (TRUE); } /* * If the function of a link service response, we recycle the * response to be a refresh for a new link service request. * * The request pointer is bogus in this case and we have to fetch * it based upon the TransactionContext. */ if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { /* Freddie Uncle Charlie Katie */ /* We don't get the IOINDEX as part of the Link Svc Rsp */ for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) if (mpt->els_cmd_ptrs[ioindex] == req) { break; } KASSERT(ioindex < mpt->els_cmds_allocated, ("can't find my mommie!")); /* remove from active list as we're going to re-post it */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); return (TRUE); } if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Async Primitive Send Complete\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Complete\n"); wakeup(req); } return (TRUE); } if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " "Length %d Message Flags %x\n", rp->Function, rp->Flags, rp->MsgLength, rp->MsgFlags); return (TRUE); } if (rp->MsgLength <= 5) { /* * This is just a ack of an original ELS buffer post */ mpt_lprt(mpt, MPT_PRT_DEBUG, "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); return (TRUE); } rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; cmd = be32toh(elsbuf[0]) >> 24; if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); return (TRUE); } ioindex = le32toh(rp->TransactionContext); req = mpt->els_cmd_ptrs[ioindex]; if (rctl == ELS && type == 1) { switch (cmd) { case PRLI: /* * Send back a PRLI ACC */ mpt_prt(mpt, "PRLI from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); elsbuf[0] = htobe32(0x02100014); elsbuf[1] |= htobe32(0x00000100); elsbuf[4] = htobe32(0x00000002); if (mpt->role & MPT_ROLE_TARGET) elsbuf[4] |= htobe32(0x00000010); if (mpt->role & MPT_ROLE_INITIATOR) elsbuf[4] |= htobe32(0x00000020); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; case PRLO: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0x02100014); elsbuf[1] = htobe32(0x08000100); mpt_prt(mpt, "PRLO from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; default: mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); break; } } else if (rctl == ABTS && type == 0) { uint16_t rx_id = le16toh(rp->Rxid); uint16_t ox_id = le16toh(rp->Oxid); request_t *tgt_req = NULL; mpt_prt(mpt, "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); if (rx_id >= mpt->mpt_max_tgtcmds) { mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); } else if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "No TGT CMD PTRS\n"); } else { tgt_req = mpt->tgt_cmd_ptrs[rx_id]; } if (tgt_req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); uint8_t *vbuf; union ccb *ccb = tgt->ccb; uint32_t ct_id; vbuf = tgt_req->req_vbuf; vbuf += MPT_RQSL(mpt); /* * Check to make sure we have the correct command * The reply descriptor in the target state should * should contain an IoIndex that should match the * RX_ID. * * It'd be nice to have OX_ID to crosscheck with * as well. */ ct_id = GET_IO_INDEX(tgt->reply_desc); if (ct_id != rx_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", rx_id, ct_id); goto skip; } ccb = tgt->ccb; if (ccb) { mpt_prt(mpt, "CCB (%p): lun %u flags %x status %x\n", ccb, ccb->ccb_h.target_lun, ccb->ccb_h.flags, ccb->ccb_h.status); } mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " "%x nxfers %x\n", tgt->state, tgt->resid, tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); skip: if (mpt_abort_target_cmd(mpt, tgt_req)) { mpt_prt(mpt, "unable to start TargetAbort\n"); } } else { mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); } memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0); elsbuf[1] = htobe32((ox_id << 16) | rx_id); elsbuf[2] = htobe32(0x000ffff); /* * Dork with the reply frame so that the reponse to it * will be correct. */ rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 12); do_refresh = FALSE; } else { mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); } if (do_refresh == TRUE) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); } return (TRUE); } /* * Clean up all SCSI Initiator personality state in response * to a controller reset. */ static void mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) { /* * The pending list is already run down by * the generic handler. Perform the same * operation on the timed out request list. */ mpt_complete_request_chain(mpt, &mpt->request_timeout_list, MPI_IOCSTATUS_INVALID_STATE); /* * XXX: We need to repost ELS and Target Command Buffers? */ /* * Inform the XPT that a bus reset has occurred. */ xpt_async(AC_BUS_RESET, mpt->path, NULL); } /* * Parse additional completion information in the reply * frame for SCSI I/O requests. */ static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { union ccb *ccb; MSG_SCSI_IO_REPLY *scsi_io_reply; u_int ioc_status; u_int sstate; u_int loginfo; MPT_DUMP_REPLY_FRAME(mpt, reply_frame); KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, ("MPT SCSI I/O Handler called with incorrect reply type")); KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, ("MPT SCSI I/O Handler called with continuation reply")); scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; ioc_status = le16toh(scsi_io_reply->IOCStatus); loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE; ioc_status &= MPI_IOCSTATUS_MASK; sstate = scsi_io_reply->SCSIState; ccb = req->ccb; ccb->csio.resid = ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = ccb->csio.sense_len - scsi_io_reply->SenseCount; bcopy(req->sense_vbuf, &ccb->csio.sense_data, min(ccb->csio.sense_len, scsi_io_reply->SenseCount)); } if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { /* * Tag messages rejected, but non-tagged retry * was successful. XXXX mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); */ } switch(ioc_status) { case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* * XXX * Linux driver indicates that a zero * transfer length with this error code * indicates a CRC error. * * No need to swap the bytes for checking * against zero. */ if (scsi_io_reply->TransferCount == 0) { mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { /* * Status was never returned for this transaction. */ mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { /* XXX Handle SPI-Packet and FCP-2 reponse info. */ mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); break; case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * Since selection timeouts and "device really not * there" are grouped into this error code, report * selection timeout. Selection timeouts are * typically retried before giving up on the device * whereas "device not there" errors are considered * unretryable. */ mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: mpt_set_ccb_status(ccb, CAM_PATH_INVALID); break; case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: mpt_set_ccb_status(ccb, CAM_TID_INVALID); break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: ccb->ccb_h.status = CAM_UA_TERMIO; break; case MPI_IOCSTATUS_INVALID_STATE: /* * The IOC has been reset. Emulate a bus reset. */ /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* * Don't clobber any timeout status that has * already been set for this transaction. We * want the SCSI layer to be able to differentiate * between the command we aborted due to timeout * and any innocent bystanders. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); break; case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); break; case MPI_IOCSTATUS_BUSY: mpt_set_ccb_status(ccb, CAM_BUSY); break; case MPI_IOCSTATUS_INVALID_FUNCTION: case MPI_IOCSTATUS_INVALID_SGL: case MPI_IOCSTATUS_INTERNAL_ERROR: case MPI_IOCSTATUS_INVALID_FIELD: default: /* XXX * Some of the above may need to kick * of a recovery action!!!! */ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { mpt_freeze_ccb(ccb); } return (TRUE); } static void mpt_action(struct cam_sim *sim, union ccb *ccb) { struct mpt_softc *mpt; struct ccb_trans_settings *cts; target_id_t tgt; lun_id_t lun; int raid_passthru; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); mpt = (struct mpt_softc *)cam_sim_softc(sim); KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action")); raid_passthru = (sim == mpt->phydisk_sim); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { CAMLOCK_2_MPTLOCK(mpt); if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { MPTLOCK_2_CAMLOCK(mpt); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } MPTLOCK_2_CAMLOCK(mpt); } ccb->ccb_h.ccb_mpt_ptr = mpt; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } } /* Max supported CDB length is 16 bytes */ /* XXX Unless we implement the new 32byte message type */ if (ccb->csio.cdb_len > sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } ccb->csio.scsi_status = SCSI_STATUS_OK; mpt_start(sim, ccb); return; case XPT_RESET_BUS: case XPT_RESET_DEV: mpt_lprt(mpt, MPT_PRT_DEBUG, ccb->ccb_h.func_code == XPT_RESET_BUS ? "XPT_RESET_BUS\n" : "XPT_RESET_DEV\n"); CAMLOCK_2_MPTLOCK(mpt); (void) mpt_bus_reset(mpt, tgt, lun, FALSE); MPTLOCK_2_CAMLOCK(mpt); /* * mpt_bus_reset is always successful in that it * will fall back to a hard reset should a bus * reset attempt fail. */ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; CAMLOCK_2_MPTLOCK(mpt); switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMED_NOTIFY: ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); break; case XPT_CONT_TARGET_IO: mpt_prt(mpt, "cannot abort active CTIOs yet\n"); ccb->ccb_h.status = CAM_UA_ABORT; break; case XPT_SCSI_IO: ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } MPTLOCK_2_CAMLOCK(mpt); break; } #ifdef CAM_NEW_TRAN_CODE #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) #else #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) #endif #define DP_DISC_ENABLE 0x1 #define DP_DISC_DISABL 0x2 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) #define DP_TQING_ENABLE 0x4 #define DP_TQING_DISABL 0x8 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) #define DP_WIDE 0x10 #define DP_NARROW 0x20 #define DP_WIDTH (DP_WIDE|DP_NARROW) #define DP_SYNC 0x40 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; #endif uint8_t dval; u_int period; u_int offset; int i, j; cts = &ccb->cts; if (mpt->is_fc || mpt->is_sas) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } /* * Skip attempting settings on RAID volume disks. * Other devices on the bus get the normal treatment. */ if (mpt->phydisk_sim && raid_passthru == 0 && mpt_is_raid_volume(mpt, tgt) != 0) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "skipping transfer settings for RAID volumes\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "honoring BIOS transfer negotiations\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } dval = 0; period = 0; offset = 0; #ifndef CAM_NEW_TRAN_CODE if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { dval |= cts->bus_width ? DP_WIDE : DP_NARROW; } if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { dval |= DP_SYNC; period = cts->sync_period; offset = cts->sync_offset; } #else scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? DP_WIDE : DP_NARROW; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { dval |= DP_SYNC; period = spi->sync_period; offset = spi->sync_offset; } #endif CAMLOCK_2_MPTLOCK(mpt); if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { mpt->mpt_disc_enable &= ~(1 << tgt); } if (dval & DP_TQING_ENABLE) { mpt->mpt_tag_enable |= (1 << tgt); } else if (dval & DP_TQING_DISABL) { mpt->mpt_tag_enable &= ~(1 << tgt); } if (dval & DP_WIDTH) { mpt_setwidth(mpt, tgt, 1); } if (dval & DP_SYNC) { mpt_setsync(mpt, tgt, period, offset); } if (mpt_update_spi_config(mpt, tgt)) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } MPTLOCK_2_CAMLOCK(mpt); break; } case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; if (mpt->is_fc) { #ifndef CAM_NEW_TRAN_CODE /* * a lot of normal SCSI things don't make sense. */ cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; /* * How do you measure the width of a high * speed serial bus? Well, in bytes. * * Offset and period make no sense, though, so we set * (above) a 'base' transfer speed to be gigabit. */ cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; #else struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FC; cts->transport_version = 0; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; /* XXX: Need for 2Gb/s */ /* XXX: need a port database for each target */ #endif } else if (mpt->is_sas) { #ifndef CAM_NEW_TRAN_CODE cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; /* * How do you measure the width of a high * speed serial bus? Well, in bytes. * * Offset and period make no sense, though, so we set * (above) a 'base' transfer speed to be gigabit. */ cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; #else struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_3; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; sas->bitrate = 300000; /* XXX: Default 3Gbps */ #endif } else if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } mpt_calc_geometry(ccg, /*extended*/1); KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; + cpi->max_target = mpt->mpt_max_devices - 1; + /* + * XXX: FC cards report MAX_DEVICES of 512- but we + * XXX: seem to hang when going higher than 255. + */ + if (cpi->max_target > 255) + cpi->max_target = 255; cpi->max_lun = 7; + cpi->initiator_id = mpt->mpt_ini_id; + cpi->bus_id = cam_sim_bus(sim); - /* XXX Report base speed more accurately for FC/SAS, etc.*/ + /* + * Actual speed for each device varies. + * + * The base speed is the speed of the underlying connection. + * This is strictly determined for SPI (async, narrow). If + * link is up for Fibre Channel, then speed can be gotten + * from that. + */ if (mpt->is_fc) { - /* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */ - cpi->max_target = 255; cpi->hba_misc = PIM_NOBUSRESET; - cpi->initiator_id = mpt->mpt_ini_id; - cpi->base_transfer_speed = 100000; + cpi->base_transfer_speed = + mpt->mpt_fcport_speed * 100000; cpi->hba_inquiry = PI_TAG_ABLE; } else if (mpt->is_sas) { - cpi->max_target = 63; /* XXX */ cpi->hba_misc = PIM_NOBUSRESET; - cpi->initiator_id = mpt->mpt_ini_id; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; } else { - cpi->max_target = 15; cpi->hba_misc = PIM_SEQSCAN; - cpi->initiator_id = mpt->mpt_ini_id; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; } /* * We give our fake RAID passhtru bus a width that is MaxVolumes * wide, restrict it to one lun and have it *not* be a bus * that can have a SCSI bus reset. */ if (raid_passthru) { cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; cpi->initiator_id = cpi->max_target + 1; cpi->max_lun = 0; cpi->hba_misc |= PIM_NOBUSRESET; } if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { cpi->hba_misc |= PIM_NOINITIATOR; } if ((mpt->role & MPT_ROLE_TARGET) != 0) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_EN_LUN: /* Enable LUN as a target */ { int result; CAMLOCK_2_MPTLOCK(mpt); if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); MPTLOCK_2_CAMLOCK(mpt); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } break; } case XPT_NOTIFY_ACK: /* recycle notify ack */ case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tgt_resource_t *trtp; lun_id_t lun = ccb->ccb_h.target_lun; ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = mpt; ccb->ccb_h.flags = 0; if (lun == CAM_LUN_WILDCARD) { if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } trtp = &mpt->trt_wildcard; } else if (lun >= MPT_MAX_LUNS) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } else { trtp = &mpt->trt[lun]; } CAMLOCK_2_MPTLOCK(mpt); if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %d\n", ccb, lun); STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, sim_links.stqe); } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE INOT lun %d\n", lun); STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, sim_links.stqe); } else { mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); MPTLOCK_2_CAMLOCK(mpt); - break; + return; } case XPT_CONT_TARGET_IO: CAMLOCK_2_MPTLOCK(mpt); mpt_target_start_io(mpt, ccb); MPTLOCK_2_CAMLOCK(mpt); - break; + return; + default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static int mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) { #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; #endif target_id_t tgt; uint8_t dval, pval, oval; int rv; if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { return (-1); } } else { tgt = cts->ccb_h.target_id; } /* * XXX: We aren't looking Port Page 2 BIOS settings here. * XXX: For goal settings, we pick the max from port page 0 * * For current settings we read the current settings out from * device page 0 for that target. */ if (IS_CURRENT_SETTINGS(cts)) { CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; CAMLOCK_2_MPTLOCK(mpt); tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { MPTLOCK_2_CAMLOCK(mpt); mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } MPTLOCK_2_CAMLOCK(mpt); dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? DP_WIDE : DP_NARROW; dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? DP_DISC_ENABLE : DP_DISC_DISABL; dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? DP_TQING_ENABLE : DP_TQING_DISABL; oval = (tmp.NegotiatedParameters >> 16) & 0xff; pval = (tmp.NegotiatedParameters >> 8) & 0xff; mpt->mpt_dev_page0[tgt] = tmp; } else { /* * XXX: Just make theoretical maximum. */ dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE; oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; pval = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; } #ifndef CAM_NEW_TRAN_CODE cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if (dval & DP_DISC_ENABLE) { cts->flags |= CCB_TRANS_DISC_ENB; } if (dval & DP_TQING_ENABLE) { cts->flags |= CCB_TRANS_TAG_ENB; } if (dval & DP_WIDE) { cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } cts->valid = CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; if (oval) { cts->sync_period = pval; cts->sync_offset = oval; cts->valid |= CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; } #else cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (dval & DP_DISC_ENABLE) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } if (dval & DP_TQING_ENABLE) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } if (oval && pval) { spi->sync_offset = oval; spi->sync_period = pval; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; } spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DP_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else { scsi->valid = 0; } #endif mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt, IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); return (0); } static void mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; if (onoff) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; } else { ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; } } static void mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; ptr->RequestedParameters |= (period << 8) | (offset << 16); if (period < 0xa) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; } if (period < 0x9) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; } } static int mpt_update_spi_config(struct mpt_softc *mpt, int tgt) { CONFIG_PAGE_SCSI_DEVICE_1 tmp; int rv; mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); tmp = mpt->mpt_dev_page1[tgt]; rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); return (-1); } return (0); } static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) { #if __FreeBSD_version >= 500000 cam_calc_geometry(ccg, extended); #else uint32_t size_mb; uint32_t secs_per_cylinder; if (ccg->block_size == 0) { ccg->ccb_h.status = CAM_REQ_INVALID; return; } size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccg->ccb_h.status = CAM_REQ_CMP; #endif } /****************************** Timeout Recovery ******************************/ static int mpt_spawn_recovery_thread(struct mpt_softc *mpt) { int error; error = mpt_kthread_create(mpt_recovery_thread, mpt, &mpt->recovery_thread, /*flags*/0, /*altstack*/0, "mpt_recovery%d", mpt->unit); return (error); } static void mpt_terminate_recovery_thread(struct mpt_softc *mpt) { if (mpt->recovery_thread == NULL) { return; } mpt->shutdwn_recovery = 1; wakeup(mpt); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); } static void mpt_recovery_thread(void *arg) { struct mpt_softc *mpt; #if __FreeBSD_version >= 500000 mtx_lock(&Giant); #endif mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { mpt_sleep(mpt, mpt, PUSER, "idle", 0); } } if (mpt->shutdwn_recovery != 0) { break; } mpt_recover_commands(mpt); } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); MPT_UNLOCK(mpt); #if __FreeBSD_version >= 500000 mtx_unlock(&Giant); #endif kthread_exit(0); } static int mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) { MSG_SCSI_TASK_MGMT *tmf_req; int error; /* * Wait for any current TMF request to complete. * We're only allowed to issue one TMF at a time. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, sleep_ok, MPT_TMF_MAX_TIMEOUT); if (error != 0) { mpt_reset(mpt, TRUE); return (ETIMEDOUT); } mpt_assign_serno(mpt, mpt->tmf_req); mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; memset(tmf_req, 0, sizeof(*tmf_req)); tmf_req->TargetID = target; tmf_req->Bus = channel; tmf_req->ChainOffset = 0; tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; tmf_req->Reserved = 0; tmf_req->TaskType = type; tmf_req->Reserved1 = 0; tmf_req->MsgFlags = flags; tmf_req->MsgContext = htole32(mpt->tmf_req->index | scsi_tmf_handler_id); memset(&tmf_req->LUN, 0, sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2)); if (lun > 256) { tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); tmf_req->LUN[1] = lun & 0xff; } else { tmf_req->LUN[1] = lun; } tmf_req->TaskMsgContext = abort_ctx; mpt_lprt(mpt, MPT_PRT_INFO, "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, mpt->tmf_req->serno, tmf_req->MsgContext); if (mpt->verbose > MPT_PRT_DEBUG) { mpt_print_request(tmf_req); } KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, ("mpt_scsi_send_tmf: tmf_req already on pending list")); TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); if (error != MPT_OK) { mpt_reset(mpt, TRUE); } return (error); } /* * When a command times out, it is placed on the requeust_timeout_list * and we wake our recovery thread. The MPT-Fusion architecture supports * only a single TMF operation at a time, so we serially abort/bdr, etc, * the timedout transactions. The next TMF is issued either by the * completion handler of the current TMF waking our recovery thread, * or the TMF timeout handler causing a hard reset sequence. */ static void mpt_recover_commands(struct mpt_softc *mpt) { request_t *req; union ccb *ccb; int error; if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * No work to do- leave. */ mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); return; } /* * Flush any commands whose completion coincides with their timeout. */ mpt_intr(mpt); if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ mpt_prt(mpt, "Timedout requests already complete. " "Interrupts may not be functioning.\n"); mpt_enable_ints(mpt); return; } /* * We have no visibility into the current state of the * controller, so attempt to abort the commands in the * order they timed-out. For initiator commands, we * depend on the reply handler pulling requests off * the timeout list. */ while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { uint16_t status; uint8_t response; MSG_REQUEST_HEADER *hdrp = req->req_vbuf; mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", req, req->serno, hdrp->Function); ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "null ccb in timed out request. " "Resetting Controller.\n"); mpt_reset(mpt, TRUE); continue; } mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); /* * Check to see if this is not an initiator command and * deal with it differently if it is. */ switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: break; default: /* * XXX: FIX ME: need to abort target assists... */ mpt_prt(mpt, "just putting it back on the pend q\n"); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); continue; } error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, htole32(req->index | scsi_io_handler_id), TRUE); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. Our queue should be emptied * by the hard reset. */ continue; } error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 500); status = mpt->tmf_req->IOCStatus; response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error != 0) { /* * If we've errored out,, reset the controller. */ mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " "Resetting controller\n"); mpt_reset(mpt, TRUE); continue; } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); continue; } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); continue; } mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); } } /************************ Target Mode Support ****************************/ static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) { MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; PTR_SGE_TRANSACTION32 tep; PTR_SGE_SIMPLE32 se; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fc = req->req_vbuf; memset(fc, 0, MPT_REQUEST_AREA); fc->BufferCount = 1; fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; fc->MsgContext = htole32(req->index | fc_els_handler_id); /* * Okay, set up ELS buffer pointers. ELS buffer pointers * consist of a TE SGL element (with details length of zero) * followe by a SIMPLE SGL element which holds the address * of the buffer. */ tep = (PTR_SGE_TRANSACTION32) &fc->SGL; tep->ContextSize = 4; tep->Flags = 0; tep->TransactionContext[0] = htole32(ioindex); se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; se->FlagsLength = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT; se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); se->Address = (uint32_t) paddr; mpt_lprt(mpt, MPT_PRT_DEBUG, "add ELS index %d ioindex %d for %p:%u\n", req->index, ioindex, req, req->serno); KASSERT(((req->state & REQ_STATE_LOCKED) != 0), ("mpt_fc_post_els: request not locked")); mpt_send_cmd(mpt, req); } static void mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) { PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; PTR_CMD_BUFFER_DESCRIPTOR cb; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(req->req_vbuf, 0, MPT_REQUEST_AREA); MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; fc = req->req_vbuf; fc->BufferCount = 1; fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); cb = &fc->Buffer[0]; cb->IoIndex = htole16(ioindex); cb->u.PhysicalAddress32 = (U32) paddr; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); } static int mpt_add_els_buffers(struct mpt_softc *mpt) { int i; if (mpt->is_fc == 0) { return (TRUE); } if (mpt->els_cmds_allocated) { return (TRUE); } mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->els_cmd_ptrs == NULL) { return (FALSE); } /* * Feed the chip some ELS buffer resources */ for (i = 0; i < MPT_MAX_ELS; i++) { request_t *req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->els_cmd_ptrs[i] = req; mpt_fc_post_els(mpt, req, i); } if (i == 0) { mpt_prt(mpt, "unable to add ELS buffer resources\n"); free(mpt->els_cmd_ptrs, M_DEVBUF); mpt->els_cmd_ptrs = NULL; return (FALSE); } if (i != MPT_MAX_ELS) { mpt_lprt(mpt, MPT_PRT_INFO, "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); } mpt->els_cmds_allocated = i; return(TRUE); } static int mpt_add_target_commands(struct mpt_softc *mpt) { int i, max; if (mpt->tgt_cmd_ptrs) { return (TRUE); } max = MPT_MAX_REQUESTS(mpt) >> 1; if (max > mpt->mpt_max_tgtcmds) { max = mpt->mpt_max_tgtcmds; } mpt->tgt_cmd_ptrs = malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_add_target_commands: could not allocate cmd ptrs\n"); return (FALSE); } for (i = 0; i < max; i++) { request_t *req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->tgt_cmd_ptrs[i] = req; mpt_post_target_command(mpt, req, i); } if (i == 0) { mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); free(mpt->tgt_cmd_ptrs, M_DEVBUF); mpt->tgt_cmd_ptrs = NULL; return (FALSE); } mpt->tgt_cmds_allocated = i; if (i < max) { mpt_lprt(mpt, MPT_PRT_INFO, "added %d of %d target bufs\n", i, max); } return (i); } static void mpt_free_els_buffers(struct mpt_softc *mpt) { mpt_prt(mpt, "fix me! need to implement mpt_free_els_buffers"); } static void mpt_free_target_commands(struct mpt_softc *mpt) { mpt_prt(mpt, "fix me! need to implement mpt_free_target_commands"); } static int mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 1; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (mpt->tenabled == 0) { /* * Try to add some target command resources */ if (mpt_add_target_commands(mpt) == FALSE) { mpt_free_els_buffers(mpt); return (ENOMEM); } if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 1; } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 1; } else { mpt->trt[lun].enabled = 1; } return (0); } static int mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { int i; if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 0; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 0; } else { mpt->trt[lun].enabled = 0; } for (i = 0; i < MPT_MAX_LUNS; i++) { if (mpt->trt[lun].enabled) { break; } } if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { mpt_free_els_buffers(mpt); mpt_free_target_commands(mpt); if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 0; } return (0); } /* * Called with MPT lock held */ static void mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); switch (tgt->state) { case TGT_STATE_IN_CAM: break; case TGT_STATE_MOVING_DATA: mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); return; } if (csio->dxfer_len) { bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* * Record the currently active ccb and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas == 0) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); if (csio->ccb_h.target_lun > 256) { ta->LUN[0] = 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); ta->LUN[1] = csio->ccb_h.target_lun & 0xff; } else { ta->LUN[1] = csio->ccb_h.target_lun; } ta->RelativeOffset = tgt->bytes_xfered; ta->DataLength = ccb->csio.dxfer_len; if (ta->DataLength > tgt->resid) { ta->DataLength = tgt->resid; } /* * XXX Should be done after data transfer completes? */ tgt->resid -= csio->dxfer_len; tgt->bytes_xfered += csio->dxfer_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; } #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_AUTO_STATUS; } #endif tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; mpt_lprt(mpt, MPT_PRT_DEBUG, "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); MPTLOCK_2_CAMLOCK(mpt); if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { int error; int s = splsoftvm(); error = bus_dmamap_load(mpt->buffer_dmat, req->dmap, csio->data_ptr, csio->dxfer_len, cb, req, 0); splx(s); if (error == EINPROGRESS) { xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { /* * We have been given a pointer to single * physical buffer. */ struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t) (vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; (*cb)(req, &seg, 1, 0); } } else { /* * We have been given a list of addresses. * This case could be easily supported but they are not * currently generated by the CAM subsystem so there * is no point in wasting the time right now. */ struct bus_dma_segment *sgs; if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { (*cb)(req, NULL, 0, EFAULT); } else { /* Just use the segments provided */ sgs = (struct bus_dma_segment *)csio->data_ptr; (*cb)(req, sgs, csio->sglist_cnt, 0); } } CAMLOCK_2_MPTLOCK(mpt); } else { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; /* * XXX: I don't know why this seems to happen, but * XXX: completing the CCB seems to make things happy. * XXX: This seems to happen if the initiator requests * XXX: enough data that we have to do multiple CTIOs. */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Meaningless STATUS CCB (%p): flags %x status %x " "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); return; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &csio->sense_data, min(csio->sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); } } /* * Abort queued up CCBs */ static cam_status mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) { struct mpt_hdr_stailq *lp; struct ccb_hdr *srch; int found = 0; union ccb *accb = ccb->cab.abort_ccb; tgt_resource_t *trtp; mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { trtp = &mpt->trt_wildcard; } else { trtp = &mpt->trt[ccb->ccb_h.target_lun]; } if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &trtp->atios; } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { lp = &trtp->inots; } else { return (CAM_REQ_INVALID); } STAILQ_FOREACH(srch, lp, sim_links.stqe) { if (srch == &accb->ccb_h) { found = 1; STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); break; } } if (found) { accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); return (CAM_PATH_INVALID); } /* * Ask the MPT to abort the current target command */ static int mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) { int error; request_t *req; PTR_MSG_TARGET_MODE_ABORT abtp; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (-1); } abtp = req->req_vbuf; memset(abtp, 0, sizeof (*abtp)); abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); error = 0; if (mpt->is_fc || mpt->is_sas) { mpt_send_cmd(mpt, req); } else { error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); } return (error); } /* * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the * FC929 to set bogus FC_RSP fields (nonzero residuals * but w/o RESID fields set). This causes QLogic initiators * to think maybe that a frame was lost. * * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because * we use allocated requests to do TARGET_ASSIST and we * need to know when to release them. */ static void mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, uint8_t status, uint8_t const *sense_data) { uint8_t *cmd_vbuf; mpt_tgt_state_t *tgt; PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; request_t *req; bus_addr_t paddr; int resplen = 0; cmd_vbuf = cmd_req->req_vbuf; cmd_vbuf += MPT_RQSL(mpt); tgt = MPT_TGT_STATE(mpt, cmd_req); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); } else { mpt_prt(mpt, "XXXX could not allocate status req- dropping\n"); } return; } req->ccb = ccb; if (ccb) { ccb->ccb_h.ccb_mpt_ptr = mpt; ccb->ccb_h.ccb_req_ptr = req; } /* * Record the currently active ccb, if any, and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; tgt->state = TGT_STATE_SENDING_STATUS; tp = req->req_vbuf; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(tp, 0, sizeof (*tp)); tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; uint8_t *sts_vbuf; uint32_t *rsp; sts_vbuf = req->req_vbuf; sts_vbuf += MPT_RQSL(mpt); rsp = (uint32_t *) sts_vbuf; memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); /* * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. * It has to be big-endian in memory and is organized * in 32 bit words, which are much easier to deal with * as words which are swizzled as needed. * * All we're filling here is the FC_RSP payload. * We may just have the chip synthesize it if * we have no residual and an OK status. * */ memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); rsp[2] = status; if (tgt->resid) { rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(tgt->resid); #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } if (status == SCSI_STATUS_CHECK_COND) { int i; rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ rsp[4] = htobe32(MPT_SENSE_SIZE); if (sense_data) { memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); } else { mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" "TION but no sense data?\n"); memset(&rsp, 0, MPT_SENSE_SIZE); } for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { rsp[i] = htobe32(rsp[i]); } #ifdef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif } #ifndef WE_TRUST_AUTO_GOOD_STATUS resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); #endif rsp[2] = htobe32(rsp[2]); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; tp->StatusCode = status; tp->QueueTag = htole16(sp->Tag); memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); } tp->ReplyWord = htole32(tgt->reply_desc); tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); #ifdef WE_CAN_USE_AUTO_REPOST tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; #endif if (status == SCSI_STATUS_OK && resplen == 0) { tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; } else { tp->StatusDataSGE.u.Address32 = (uint32_t) paddr; tp->StatusDataSGE.FlagsLength = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT; tp->StatusDataSGE.FlagsLength |= resplen; } mpt_lprt(mpt, MPT_PRT_DEBUG, "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, req->serno, tgt->resid); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz); } mpt_send_cmd(mpt, req); } static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, tgt_resource_t *trtp, int init_id) { struct ccb_immed_notify *inot; mpt_tgt_state_t *tgt; tgt = MPT_TGT_STATE(mpt, req); inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); if (inot == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); memset(&inot->sense_data, 0, sizeof (inot->sense_data)); inot->sense_len = 0; memset(inot->message_args, 0, sizeof (inot->message_args)); inot->initiator_id = init_id; /* XXX */ /* * This is a somewhat grotesque attempt to map from task management * to old style SCSI messages. God help us all. */ switch (fc) { case MPT_ABORT_TASK_SET: inot->message_args[0] = MSG_ABORT_TAG; break; case MPT_CLEAR_TASK_SET: inot->message_args[0] = MSG_CLEAR_TASK_SET; break; case MPT_TARGET_RESET: inot->message_args[0] = MSG_TARGET_RESET; break; case MPT_CLEAR_ACA: inot->message_args[0] = MSG_CLEAR_ACA; break; case MPT_TERMINATE_TASK: inot->message_args[0] = MSG_ABORT_TAG; break; default: inot->message_args[0] = MSG_NOOP; break; } tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; MPTLOCK_2_CAMLOCK(mpt); xpt_done((union ccb *)inot); CAMLOCK_2_MPTLOCK(mpt); } static void mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) { struct ccb_accept_tio *atiop; lun_id_t lun; int tag_action = 0; mpt_tgt_state_t *tgt; tgt_resource_t *trtp = NULL; U8 *lunptr; U8 *vbuf; U16 itag; U16 ioindex; mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; uint8_t *cdbp; /* * First, DMA sync the received command- which is in the *request* * phys area. * XXX: We could optimize this for a range */ bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, BUS_DMASYNC_POSTREAD); /* * Stash info for the current command where we can get at it later. */ vbuf = req->req_vbuf; vbuf += MPT_RQSL(mpt); /* * Get our state pointer set up. */ tgt = MPT_TGT_STATE(mpt, req); if (tgt->state != TGT_STATE_LOADED) { mpt_tgt_dump_req_state(mpt, req); panic("bad target state in mpt_scsi_tgt_atio"); } memset(tgt, 0, sizeof (mpt_tgt_state_t)); tgt->state = TGT_STATE_IN_CAM; tgt->reply_desc = reply_desc; ioindex = GET_IO_INDEX(reply_desc); if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc; fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; if (fc->FcpCntl[2]) { /* * Task Management Request */ switch (fc->FcpCntl[2]) { case 0x2: fct = MPT_ABORT_TASK_SET; break; case 0x4: fct = MPT_CLEAR_TASK_SET; break; case 0x20: fct = MPT_TARGET_RESET; break; case 0x40: fct = MPT_CLEAR_ACA; break; case 0x80: fct = MPT_TERMINATE_TASK; break; default: mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", fc->FcpCntl[2]); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); return; } } else { switch (fc->FcpCntl[1]) { case 0: tag_action = MSG_SIMPLE_Q_TAG; break; case 1: tag_action = MSG_HEAD_OF_Q_TAG; break; case 2: tag_action = MSG_ORDERED_Q_TAG; break; default: /* * Bah. Ignore Untagged Queing and ACA */ tag_action = MSG_SIMPLE_Q_TAG; break; } } tgt->resid = be32toh(fc->FcpDl); cdbp = fc->FcpCdb; lunptr = fc->FcpLun; itag = be16toh(fc->OptionalOxid); } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; cdbp = ssp->CDB; lunptr = ssp->LogicalUnitNumber; itag = ssp->InitiatorTag; } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; cdbp = sp->CDB; lunptr = sp->LogicalUnitNumber; itag = sp->Tag; } /* * Generate a simple lun */ switch (lunptr[0] & 0xc0) { case 0x40: lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; break; case 0: lun = lunptr[1]; break; default: mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); lun = 0xffff; break; } /* * Deal with non-enabled or bad luns here. */ if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || mpt->trt[lun].enabled == 0) { if (mpt->twildcard) { trtp = &mpt->trt_wildcard; } else if (fct != MPT_NIL_TMT_VALUE) { const uint8_t sp[MPT_SENSE_SIZE] = { 0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25 }; mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_CHECK_COND, sp); return; } } else { trtp = &mpt->trt[lun]; } /* * Deal with any task management */ if (fct != MPT_NIL_TMT_VALUE) { if (trtp == NULL) { mpt_prt(mpt, "task mgmt function %x but no listener\n", fct); mpt_scsi_tgt_status(mpt, 0, req, SCSI_STATUS_OK, 0); } else { mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, GET_INITIATOR_INDEX(reply_desc)); } return; } atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); if (atiop == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no ATIOs for lun %u- sending back %s\n", lun, mpt->tenabled? "QUEUE FULL" : "BUSY"); mpt_scsi_tgt_status(mpt, NULL, req, mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, NULL); return; } STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); atiop->ccb_h.ccb_mpt_ptr = mpt; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; atiop->init_id = GET_INITIATOR_INDEX(reply_desc); atiop->cdb_len = mpt_cdblen(cdbp[0], 16); memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); /* * The tag we construct here allows us to find the * original request that the command came in with. * * This way we don't have to depend on anything but the * tag to find things when CCBs show back up from CAM. */ atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); tgt->tag_id = atiop->tag_id; if (tag_action) { atiop->tag_action = tag_action; atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; } if (mpt->verbose >= MPT_PRT_DEBUG) { int i; mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, atiop->ccb_h.target_lun); for (i = 0; i < atiop->cdb_len; i++) { mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, (i == (atiop->cdb_len - 1))? '>' : ' '); } mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", itag, atiop->tag_id, tgt->reply_desc, tgt->resid); } MPTLOCK_2_CAMLOCK(mpt); xpt_done((union ccb *)atiop); CAMLOCK_2_MPTLOCK(mpt); } static void mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, tgt->tag_id, tgt->state); } static void mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) { mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, req->index, req->index, req->state); mpt_tgt_dump_tgt_state(mpt, req); } static int mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int dbg; union ccb *ccb; U16 status; if (reply_frame == NULL) { /* * Figure out what the state of the command is. */ mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); #ifdef INVARIANTS mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); if (tgt->req) { mpt_req_not_spcl(mpt, tgt->req, "turbo scsi_tgt_reply associated req", __LINE__); } #endif switch(tgt->state) { case TGT_STATE_LOADED: /* * This is a new command starting. */ mpt_scsi_tgt_atio(mpt, req, reply_desc); break; case TGT_STATE_MOVING_DATA: { uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request moving data"); /* NOTREACHED */ } if (ccb == NULL) { panic("mpt: turbo target reply with null " "associated ccb moving data"); /* NOTREACHED */ } tgt->ccb = NULL; tgt->nxfers++; untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); /* * Free the Target Assist Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * Do we need to send status now? That is, are * we done with all our data transfers? */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); tgt->state = TGT_STATE_IN_CAM; if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); break; } /* * Otherwise, send status (and sense) */ if (ccb->ccb_h.flags & CAM_SEND_SENSE) { sp = sense; memcpy(sp, &ccb->csio.sense_data, min(ccb->csio.sense_len, MPT_SENSE_SIZE)); } mpt_scsi_tgt_status(mpt, ccb, req, ccb->csio.scsi_status, sp); break; } case TGT_STATE_SENDING_STATUS: case TGT_STATE_MOVING_DATA_AND_STATUS: { int ioindex; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request sending status"); /* NOTREACHED */ } if (ccb) { tgt->ccb = NULL; if (tgt->state == TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS tag %x sts %x flgs %x req " "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, ccb->ccb_h.flags, tgt->req); /* * Free the Target Send Status Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); /* * Notify CAM that we're done */ mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("ZERO ccb sts at %d\n", __LINE__)); tgt->ccb = NULL; } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS non-CAM for req %p:%u\n", tgt->req, tgt->req->serno); } TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * And re-post the Command Buffer. * This wil reset the state. */ ioindex = GET_IO_INDEX(reply_desc); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_post_target_command(mpt, req, ioindex); /* * And post a done for anyone who cares */ if (ccb) { if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } MPTLOCK_2_CAMLOCK(mpt); xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); } break; } case TGT_STATE_NIL: /* XXX This Never Happens XXX */ tgt->state = TGT_STATE_LOADED; break; default: mpt_prt(mpt, "Unknown Target State 0x%x in Context " "Reply Function\n", tgt->state); } return (TRUE); } status = le16toh(reply_frame->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { dbg = MPT_PRT_ERROR; } else { dbg = MPT_PRT_DEBUG1; } mpt_lprt(mpt, dbg, "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", req, req->serno, reply_frame, reply_frame->Function, status); switch (reply_frame->Function) { case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: { mpt_tgt_state_t *tgt; #ifdef INVARIANTS mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); #endif if (status != MPI_IOCSTATUS_SUCCESS) { /* * XXX What to do? */ break; } tgt = MPT_TGT_STATE(mpt, req); KASSERT(tgt->state == TGT_STATE_LOADING, ("bad state 0x%x on reply to buffer post\n", tgt->state)); mpt_assign_serno(mpt, req); tgt->state = TGT_STATE_LOADED; break; } case MPI_FUNCTION_TARGET_ASSIST: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); #endif mpt_prt(mpt, "target assist completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_STATUS_SEND: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); #endif mpt_prt(mpt, "status send completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_MODE_ABORT: { PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; PTR_MSG_TARGET_MODE_ABORT abtp = (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); #endif mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; } default: mpt_prt(mpt, "Unknown Target Address Reply Function code: " "0x%x\n", reply_frame->Function); break; } return (TRUE); } Index: head/sys/dev/mpt/mpt_pci.c =================================================================== --- head/sys/dev/mpt/mpt_pci.c (revision 159918) +++ head/sys/dev/mpt/mpt_pci.c (revision 159919) @@ -1,902 +1,904 @@ /*- * PCI specific probe and attach routines for LSI Fusion Adapters * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * Partially derived from Matt Jacob's ISP driver. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002 by Matthew Jacob * Feral Software * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /* * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifndef PCI_VENDOR_LSI #define PCI_VENDOR_LSI 0x1000 #endif #ifndef PCI_PRODUCT_LSI_FC909 #define PCI_PRODUCT_LSI_FC909 0x0620 #endif #ifndef PCI_PRODUCT_LSI_FC909A #define PCI_PRODUCT_LSI_FC909A 0x0621 #endif #ifndef PCI_PRODUCT_LSI_FC919 #define PCI_PRODUCT_LSI_FC919 0x0624 #endif #ifndef PCI_PRODUCT_LSI_FC929 #define PCI_PRODUCT_LSI_FC929 0x0622 #endif #ifndef PCI_PRODUCT_LSI_FC929X #define PCI_PRODUCT_LSI_FC929X 0x0626 #endif #ifndef PCI_PRODUCT_LSI_FC919X #define PCI_PRODUCT_LSI_FC919X 0x0628 #endif #ifndef PCI_PRODUCT_LSI_FC7X04X #define PCI_PRODUCT_LSI_FC7X04X 0x0640 #endif #ifndef PCI_PRODUCT_LSI_1030 #define PCI_PRODUCT_LSI_1030 0x0030 #endif #ifndef PCI_PRODUCT_LSI_SAS1064 #define PCI_PRODUCT_LSI_SAS1064 0x0050 #endif #ifndef PCI_PRODUCT_LSI_SAS1064A #define PCI_PRODUCT_LSI_SAS1064A 0x005C #endif #ifndef PCI_PRODUCT_LSI_SAS1064E #define PCI_PRODUCT_LSI_SAS1064E 0x0056 #endif #ifndef PCI_PRODUCT_LSI_SAS1066 #define PCI_PRODUCT_LSI_SAS1066 0x005E #endif #ifndef PCI_PRODUCT_LSI_SAS1066E #define PCI_PRODUCT_LSI_SAS1066E 0x005A #endif #ifndef PCI_PRODUCT_LSI_SAS1068 #define PCI_PRODUCT_LSI_SAS1068 0x0054 #endif #ifndef PCI_PRODUCT_LSI_SAS1068E #define PCI_PRODUCT_LSI_SAS1068E 0x0058 #endif #ifndef PCI_PRODUCT_LSI_SAS1078 #define PCI_PRODUCT_LSI_SAS1078 0x0060 #endif #ifndef PCIM_CMD_SERRESPEN #define PCIM_CMD_SERRESPEN 0x0100 #endif #define MPT_IO_BAR 0 #define MPT_MEM_BAR 1 static int mpt_pci_probe(device_t); static int mpt_pci_attach(device_t); static void mpt_free_bus_resources(struct mpt_softc *mpt); static int mpt_pci_detach(device_t); static int mpt_pci_shutdown(device_t); static int mpt_dma_mem_alloc(struct mpt_softc *mpt); static void mpt_dma_mem_free(struct mpt_softc *mpt); static void mpt_read_config_regs(struct mpt_softc *mpt); static void mpt_pci_intr(void *); static device_method_t mpt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mpt_pci_probe), DEVMETHOD(device_attach, mpt_pci_attach), DEVMETHOD(device_detach, mpt_pci_detach), DEVMETHOD(device_shutdown, mpt_pci_shutdown), { 0, 0 } }; static driver_t mpt_driver = { "mpt", mpt_methods, sizeof(struct mpt_softc) }; static devclass_t mpt_devclass; DRIVER_MODULE(mpt, pci, mpt_driver, mpt_devclass, 0, 0); MODULE_VERSION(mpt, 1); static int mpt_pci_probe(device_t dev) { char *desc; if (pci_get_vendor(dev) != PCI_VENDOR_LSI) return (ENXIO); switch ((pci_get_device(dev) & ~1)) { case PCI_PRODUCT_LSI_FC909: desc = "LSILogic FC909 FC Adapter"; break; case PCI_PRODUCT_LSI_FC909A: desc = "LSILogic FC909A FC Adapter"; break; case PCI_PRODUCT_LSI_FC919: desc = "LSILogic FC919 FC Adapter"; break; case PCI_PRODUCT_LSI_FC929: desc = "LSILogic FC929 FC Adapter"; break; case PCI_PRODUCT_LSI_FC919X: desc = "LSILogic FC919X FC Adapter"; break; case PCI_PRODUCT_LSI_FC929X: desc = "LSILogic FC929X 2Gb/s FC Adapter"; break; case PCI_PRODUCT_LSI_FC7X04X: desc = "LSILogic FC7X04X 4Gb/s FC Adapter"; break; case PCI_PRODUCT_LSI_1030: desc = "LSILogic 1030 Ultra4 Adapter"; break; case PCI_PRODUCT_LSI_SAS1064: case PCI_PRODUCT_LSI_SAS1064A: case PCI_PRODUCT_LSI_SAS1064E: case PCI_PRODUCT_LSI_SAS1066: case PCI_PRODUCT_LSI_SAS1066E: case PCI_PRODUCT_LSI_SAS1068: case PCI_PRODUCT_LSI_SAS1068E: case PCI_PRODUCT_LSI_SAS1078: desc = "LSILogic SAS Adapter"; break; default: return (ENXIO); } device_set_desc(dev, desc); return (0); } #if __FreeBSD_version < 500000 static void mpt_set_options(struct mpt_softc *mpt) { int bitmap; bitmap = 0; if (getenv_int("mpt_disable", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->disabled = 1; } } bitmap = 0; if (getenv_int("mpt_debug", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG; } } bitmap = 0; if (getenv_int("mpt_debug1", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG1; } } bitmap = 0; if (getenv_int("mpt_debug2", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG2; } } bitmap = 0; if (getenv_int("mpt_debug3", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG3; } } } #else static void mpt_set_options(struct mpt_softc *mpt) { int tval; tval = 0; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "disable", &tval) == 0 && tval != 0) { mpt->disabled = 1; } tval = 0; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "debug", &tval) == 0 && tval != 0) { mpt->verbose = tval; } tval = 0; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "role", &tval) == 0 && tval != 0 && tval <= 3) { mpt->role = tval; } } #endif static void mpt_link_peer(struct mpt_softc *mpt) { struct mpt_softc *mpt2; if (mpt->unit == 0) { return; } /* * XXX: depends on probe order */ mpt2 = (struct mpt_softc *)devclass_get_softc(mpt_devclass,mpt->unit-1); if (mpt2 == NULL) { return; } if (pci_get_vendor(mpt2->dev) != pci_get_vendor(mpt->dev)) { return; } if (pci_get_device(mpt2->dev) != pci_get_device(mpt->dev)) { return; } mpt->mpt2 = mpt2; mpt2->mpt2 = mpt; if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_prt(mpt, "linking with peer (mpt%d)\n", device_get_unit(mpt2->dev)); } } static void mpt_unlink_peer(struct mpt_softc *mpt) { if (mpt->mpt2) { mpt->mpt2->mpt2 = NULL; } } static int mpt_pci_attach(device_t dev) { struct mpt_softc *mpt; int iqd; uint32_t data, cmd; /* Allocate the softc structure */ mpt = (struct mpt_softc*)device_get_softc(dev); if (mpt == NULL) { device_printf(dev, "cannot allocate softc\n"); return (ENOMEM); } memset(mpt, 0, sizeof(struct mpt_softc)); switch ((pci_get_device(dev) & ~1)) { case PCI_PRODUCT_LSI_FC909: case PCI_PRODUCT_LSI_FC909A: case PCI_PRODUCT_LSI_FC919: case PCI_PRODUCT_LSI_FC929: case PCI_PRODUCT_LSI_FC919X: case PCI_PRODUCT_LSI_FC7X04X: mpt->is_fc = 1; break; case PCI_PRODUCT_LSI_SAS1064: case PCI_PRODUCT_LSI_SAS1064A: case PCI_PRODUCT_LSI_SAS1064E: case PCI_PRODUCT_LSI_SAS1066: case PCI_PRODUCT_LSI_SAS1066E: case PCI_PRODUCT_LSI_SAS1068: case PCI_PRODUCT_LSI_SAS1068E: case PCI_PRODUCT_LSI_SAS1078: mpt->is_sas = 1; break; default: mpt->is_spi = 1; break; } mpt->dev = dev; mpt->unit = device_get_unit(dev); mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT; mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT; mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT; mpt->verbose = MPT_PRT_NONE; mpt->role = MPT_ROLE_NONE; mpt_set_options(mpt); if (mpt->verbose == MPT_PRT_NONE) { mpt->verbose = MPT_PRT_WARN; /* Print INFO level (if any) if bootverbose is set */ mpt->verbose += (bootverbose != 0)? 1 : 0; } /* Make sure memory access decoders are enabled */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "Memory accesses disabled"); return (ENXIO); } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd |= PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_BIOS, 4); data &= ~1; pci_write_config(dev, PCIR_BIOS, data, 4); /* * Is this part a dual? * If so, link with our partner (around yet) */ if ((pci_get_device(dev) & ~1) == PCI_PRODUCT_LSI_FC929 || (pci_get_device(dev) & ~1) == PCI_PRODUCT_LSI_FC7X04X || (pci_get_device(dev) & ~1) == PCI_PRODUCT_LSI_1030) { mpt_link_peer(mpt); } /* * Set up register access. PIO mode is required for * certain reset operations (but must be disabled for * some cards otherwise). */ mpt->pci_pio_rid = PCIR_BAR(MPT_IO_BAR); mpt->pci_pio_reg = bus_alloc_resource(dev, SYS_RES_IOPORT, &mpt->pci_pio_rid, 0, ~0, 0, RF_ACTIVE); if (mpt->pci_pio_reg == NULL) { device_printf(dev, "unable to map registers in PIO mode\n"); goto bad; } mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg); mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg); /* Allocate kernel virtual memory for the 9x9's Mem0 region */ mpt->pci_mem_rid = PCIR_BAR(MPT_MEM_BAR); mpt->pci_reg = bus_alloc_resource(dev, SYS_RES_MEMORY, &mpt->pci_mem_rid, 0, ~0, 0, RF_ACTIVE); if (mpt->pci_reg == NULL) { device_printf(dev, "Unable to memory map registers.\n"); if (mpt->is_sas) { device_printf(dev, "Giving Up.\n"); goto bad; } device_printf(dev, "Falling back to PIO mode.\n"); mpt->pci_st = mpt->pci_pio_st; mpt->pci_sh = mpt->pci_pio_sh; } else { mpt->pci_st = rman_get_bustag(mpt->pci_reg); mpt->pci_sh = rman_get_bushandle(mpt->pci_reg); } /* Get a handle to the interrupt */ iqd = 0; #if __FreeBSD_version < 500000 mpt->pci_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); #else mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | RF_SHAREABLE); #endif if (mpt->pci_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } MPT_LOCK_SETUP(mpt); /* Disable interrupts at the part */ mpt_disable_ints(mpt); /* Register the interrupt handler */ if (bus_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, mpt_pci_intr, mpt, &mpt->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* Allocate dma memory */ /* XXX JGibbs -Should really be done based on IOCFacts. */ if (mpt_dma_mem_alloc(mpt)) { mpt_prt(mpt, "Could not allocate DMA memory\n"); goto bad; } /* * Save the PCI config register values * * Hard resets are known to screw up the BAR for diagnostic * memory accesses (Mem1). * * Using Mem1 is known to make the chip stop responding to * configuration space transfers, so we need to save it now */ mpt_read_config_regs(mpt); /* * Disable PIO until we need it */ - pci_disable_io(dev, SYS_RES_IOPORT); + if (mpt->is_sas) { + pci_disable_io(dev, SYS_RES_IOPORT); + } /* Initialize the hardware */ if (mpt->disabled == 0) { MPT_LOCK(mpt); if (mpt_attach(mpt) != 0) { MPT_UNLOCK(mpt); goto bad; } MPT_UNLOCK(mpt); } else { mpt_prt(mpt, "device disabled at user request\n"); goto bad; } mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown, dev, SHUTDOWN_PRI_DEFAULT); if (mpt->eh == NULL) { mpt_prt(mpt, "shutdown event registration failed\n"); MPT_LOCK(mpt); (void) mpt_detach(mpt); MPT_UNLOCK(mpt); goto bad; } KASSERT(MPT_OWNED(mpt) == 0, ("leaving attach with device locked")); return (0); bad: mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_unlink_peer(mpt); MPT_LOCK_DESTROY(mpt); /* * but return zero to preserve unit numbering */ return (0); } /* * Free bus resources */ static void mpt_free_bus_resources(struct mpt_softc *mpt) { if (mpt->ih) { bus_teardown_intr(mpt->dev, mpt->pci_irq, mpt->ih); mpt->ih = 0; } if (mpt->pci_irq) { bus_release_resource(mpt->dev, SYS_RES_IRQ, 0, mpt->pci_irq); mpt->pci_irq = 0; } if (mpt->pci_pio_reg) { bus_release_resource(mpt->dev, SYS_RES_IOPORT, mpt->pci_pio_rid, mpt->pci_pio_reg); mpt->pci_pio_reg = 0; } if (mpt->pci_reg) { bus_release_resource(mpt->dev, SYS_RES_MEMORY, mpt->pci_mem_rid, mpt->pci_reg); mpt->pci_reg = 0; } MPT_LOCK_DESTROY(mpt); } /* * Disconnect ourselves from the system. */ static int mpt_pci_detach(device_t dev) { struct mpt_softc *mpt; mpt = (struct mpt_softc*)device_get_softc(dev); if (mpt) { MPT_LOCK(mpt); mpt_disable_ints(mpt); mpt_detach(mpt); mpt_reset(mpt, /*reinit*/FALSE); mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_raid_free_mem(mpt); if (mpt->eh != NULL) { EVENTHANDLER_DEREGISTER(shutdown_final, mpt->eh); } MPT_UNLOCK(mpt); } return(0); } /* * Disable the hardware */ static int mpt_pci_shutdown(device_t dev) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)device_get_softc(dev); if (mpt) { int r; MPT_LOCK(mpt); r = mpt_shutdown(mpt); MPT_UNLOCK(mpt); return (r); } return(0); } static int mpt_dma_mem_alloc(struct mpt_softc *mpt) { int i, error, nsegs; uint8_t *vptr; uint32_t pptr, end; size_t len; struct mpt_map_info mi; /* Check if we alreay have allocated the reply memory */ if (mpt->reply_phys != 0) { return 0; } len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt); #ifdef RELENG_4 mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } memset(mpt->request_pool, 0, len); #else mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } #endif /* * Create a parent dma tag for this device. * * Align at byte boundaries, * Limit to 32-bit addressing for request/reply queues. */ if (mpt_dma_tag_create(mpt, /*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_MAXSIZE_32BIT, /*maxsegsz*/BUS_SPACE_UNRESTRICTED, /*flags*/0, &mpt->parent_dmat) != 0) { mpt_prt(mpt, "cannot create parent dma tag\n"); return (1); } /* Create a child tag for reply buffers */ if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->reply_dmat) != 0) { mpt_prt(mpt, "cannot create a dma tag for replies\n"); return (1); } /* Allocate some DMA accessable memory for replies */ if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply, BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) { mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n", (u_long) (2 * PAGE_SIZE)); return (1); } mi.mpt = mpt; mi.error = 0; /* Load and lock it into "bus space" */ bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply, 2 * PAGE_SIZE, mpt_map_rquest, &mi, 0); if (mi.error) { mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n", mi.error); return (1); } mpt->reply_phys = mi.phys; /* Create a child tag for data buffers */ /* * XXX: we should say that nsegs is 'unrestricted, but that * XXX: tickles a horrible bug in the busdma code. Instead, * XXX: we'll derive a reasonable segment limit from MAXPHYS */ nsegs = (MAXPHYS / PAGE_SIZE) + 1; if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, nsegs, BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->buffer_dmat) != 0) { mpt_prt(mpt, "cannot create a dma tag for data buffers\n"); return (1); } /* Create a child tag for request buffers */ if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->request_dmat) != 0) { mpt_prt(mpt, "cannot create a dma tag for requests\n"); return (1); } /* Allocate some DMA accessable memory for requests */ if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request, BUS_DMA_NOWAIT, &mpt->request_dmap) != 0) { mpt_prt(mpt, "cannot allocate %d bytes of request memory\n", MPT_REQ_MEM_SIZE(mpt)); return (1); } mi.mpt = mpt; mi.error = 0; /* Load and lock it into "bus space" */ bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request, MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0); if (mi.error) { mpt_prt(mpt, "error %d loading dma map for DMA request queue\n", mi.error); return (1); } mpt->request_phys = mi.phys; /* * Now create per-request dma maps */ i = 0; pptr = mpt->request_phys; vptr = mpt->request; end = pptr + MPT_REQ_MEM_SIZE(mpt); while(pptr < end) { request_t *req = &mpt->request_pool[i]; req->index = i++; /* Store location of Request Data */ req->req_pbuf = pptr; req->req_vbuf = vptr; pptr += MPT_REQUEST_AREA; vptr += MPT_REQUEST_AREA; req->sense_pbuf = (pptr - MPT_SENSE_SIZE); req->sense_vbuf = (vptr - MPT_SENSE_SIZE); error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap); if (error) { mpt_prt(mpt, "error %d creating per-cmd DMA maps\n", error); return (1); } } return (0); } /* Deallocate memory that was allocated by mpt_dma_mem_alloc */ static void mpt_dma_mem_free(struct mpt_softc *mpt) { int i; /* Make sure we aren't double destroying */ if (mpt->reply_dmat == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n"); return; } for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap); } bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap); bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap); bus_dma_tag_destroy(mpt->request_dmat); bus_dma_tag_destroy(mpt->buffer_dmat); bus_dmamap_unload(mpt->reply_dmat, mpt->reply_dmap); bus_dmamem_free(mpt->reply_dmat, mpt->reply, mpt->reply_dmap); bus_dma_tag_destroy(mpt->reply_dmat); bus_dma_tag_destroy(mpt->parent_dmat); mpt->reply_dmat = 0; free(mpt->request_pool, M_DEVBUF); mpt->request_pool = 0; } /* Reads modifiable (via PCI transactions) config registers */ static void mpt_read_config_regs(struct mpt_softc *mpt) { mpt->pci_cfg.Command = pci_read_config(mpt->dev, PCIR_COMMAND, 2); mpt->pci_cfg.LatencyTimer_LineSize = pci_read_config(mpt->dev, PCIR_CACHELNSZ, 2); mpt->pci_cfg.IO_BAR = pci_read_config(mpt->dev, PCIR_BAR(0), 4); mpt->pci_cfg.Mem0_BAR[0] = pci_read_config(mpt->dev, PCIR_BAR(1), 4); mpt->pci_cfg.Mem0_BAR[1] = pci_read_config(mpt->dev, PCIR_BAR(2), 4); mpt->pci_cfg.Mem1_BAR[0] = pci_read_config(mpt->dev, PCIR_BAR(3), 4); mpt->pci_cfg.Mem1_BAR[1] = pci_read_config(mpt->dev, PCIR_BAR(4), 4); mpt->pci_cfg.ROM_BAR = pci_read_config(mpt->dev, PCIR_BIOS, 4); mpt->pci_cfg.IntLine = pci_read_config(mpt->dev, PCIR_INTLINE, 1); mpt->pci_cfg.PMCSR = pci_read_config(mpt->dev, 0x44, 4); } /* Sets modifiable config registers */ void mpt_set_config_regs(struct mpt_softc *mpt) { uint32_t val; #define MPT_CHECK(reg, offset, size) \ val = pci_read_config(mpt->dev, offset, size); \ if (mpt->pci_cfg.reg != val) { \ mpt_prt(mpt, \ "Restoring " #reg " to 0x%X from 0x%X\n", \ mpt->pci_cfg.reg, val); \ } if (mpt->verbose >= MPT_PRT_DEBUG) { MPT_CHECK(Command, PCIR_COMMAND, 2); MPT_CHECK(LatencyTimer_LineSize, PCIR_CACHELNSZ, 2); MPT_CHECK(IO_BAR, PCIR_BAR(0), 4); MPT_CHECK(Mem0_BAR[0], PCIR_BAR(1), 4); MPT_CHECK(Mem0_BAR[1], PCIR_BAR(2), 4); MPT_CHECK(Mem1_BAR[0], PCIR_BAR(3), 4); MPT_CHECK(Mem1_BAR[1], PCIR_BAR(4), 4); MPT_CHECK(ROM_BAR, PCIR_BIOS, 4); MPT_CHECK(IntLine, PCIR_INTLINE, 1); MPT_CHECK(PMCSR, 0x44, 4); } #undef MPT_CHECK pci_write_config(mpt->dev, PCIR_COMMAND, mpt->pci_cfg.Command, 2); pci_write_config(mpt->dev, PCIR_CACHELNSZ, mpt->pci_cfg.LatencyTimer_LineSize, 2); pci_write_config(mpt->dev, PCIR_BAR(0), mpt->pci_cfg.IO_BAR, 4); pci_write_config(mpt->dev, PCIR_BAR(1), mpt->pci_cfg.Mem0_BAR[0], 4); pci_write_config(mpt->dev, PCIR_BAR(2), mpt->pci_cfg.Mem0_BAR[1], 4); pci_write_config(mpt->dev, PCIR_BAR(3), mpt->pci_cfg.Mem1_BAR[0], 4); pci_write_config(mpt->dev, PCIR_BAR(4), mpt->pci_cfg.Mem1_BAR[1], 4); pci_write_config(mpt->dev, PCIR_BIOS, mpt->pci_cfg.ROM_BAR, 4); pci_write_config(mpt->dev, PCIR_INTLINE, mpt->pci_cfg.IntLine, 1); pci_write_config(mpt->dev, 0x44, mpt->pci_cfg.PMCSR, 4); } static void mpt_pci_intr(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); mpt_intr(mpt); MPT_UNLOCK(mpt); }