Index: head/sys/dev/mpr/mpr.c =================================================================== --- head/sys/dev/mpr/mpr.c (revision 299264) +++ head/sys/dev/mpr/mpr.c (revision 299265) @@ -1,2801 +1,2796 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT3 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); static int mpr_init_queues(struct mpr_softc *sc); static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); static int mpr_transition_operational(struct mpr_softc *sc); static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); static void mpr_iocfacts_free(struct mpr_softc *sc); static void mpr_startup(void *arg); static int mpr_send_iocinit(struct mpr_softc *sc); static int mpr_alloc_queues(struct mpr_softc *sc); static int mpr_alloc_replies(struct mpr_softc *sc); static int mpr_alloc_requests(struct mpr_softc *sc); static int mpr_attach_log(struct mpr_softc *sc); static __inline void mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm); static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply); -static void mpr_config_complete(struct mpr_softc *sc, - struct mpr_command *cm); +static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mpr_periodic(void *); static int mpr_reregister_events(struct mpr_softc *sc); -static void mpr_enqueue_request(struct mpr_softc *sc, - struct mpr_command *cm); -static int mpr_get_iocfacts(struct mpr_softc *sc, - MPI2_IOC_FACTS_REPLY *facts); +static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); +static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); /* * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of * any state and back to its initialization state machine. */ static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; /* * Added this union to smoothly convert le64toh cm->cm_desc.Words. * Compiler only supports unint64_t to be passed as an argument. * Otherwise it will through this error: * "aggregate value used where an integer was expected" */ typedef union _reply_descriptor { u64 word; struct { u32 low; u32 high; } u; }reply_descriptor,address_descriptor; /* Rate limit chain-fail messages to 1 per minute */ static struct timeval mpr_chainfail_interval = { 60, 0 }; /* * sleep_flag can be either CAN_SLEEP or NO_SLEEP. * If this function is called from process context, it can sleep * and there is no harm to sleep, in case if this fuction is called * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. * based on sleep flags driver will call either msleep, pause or DELAY. * msleep and pause are of same variant, but pause is used when mpr_mtx * is not hold by driver. */ static int mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) { uint32_t reg; int i, error, tries = 0; uint8_t first_wait_done = FALSE; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* Clear any pending interrupts */ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* * Force NO_SLEEP for threads prohibited to sleep * e.a Thread from interrupt handler are prohibited to sleep. */ #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flag = NO_SLEEP; /* Push the magic sequence */ error = ETIMEDOUT; while (tries++ < 20) { for (i = 0; i < sizeof(mpt2_reset_magic); i++) mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, mpt2_reset_magic[i]); /* wait 100 msec */ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdiag", hz/10); else if (sleep_flag == CAN_SLEEP) pause("mprdiag", hz/10); else DELAY(100 * 1000); reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { error = 0; break; } } if (error) return (error); /* Send the actual reset. XXX need to refresh the reg? */ mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg | MPI2_DIAG_RESET_ADAPTER); /* Wait up to 300 seconds in 50ms intervals */ error = ETIMEDOUT; for (i = 0; i < 6000; i++) { /* * Wait 50 msec. If this is the first time through, wait 256 * msec to satisfy Diag Reset timing requirements. */ if (first_wait_done) { if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdiag", hz/20); else if (sleep_flag == CAN_SLEEP) pause("mprdiag", hz/20); else DELAY(50 * 1000); } else { DELAY(256 * 1000); first_wait_done = TRUE; } /* * Check for the RESET_ADAPTER bit to be cleared first, then * wait for the RESET state to be cleared, which takes a little * longer. */ reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_RESET_ADAPTER) { continue; } reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { error = 0; break; } } if (error) return (error); mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); return (0); } static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) { MPR_FUNCTRACE(sc); mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI2_DOORBELL_FUNCTION_SHIFT); if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed : <%s>\n", __func__); return (ETIMEDOUT); } return (0); } static int mpr_transition_ready(struct mpr_softc *sc) { uint32_t reg, state; int error, tries = 0; int sleep_flags; MPR_FUNCTRACE(sc); /* If we are in attach call, do not sleep */ sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) ? CAN_SLEEP : NO_SLEEP; error = 0; while (tries++ < 1200) { reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); /* * Ensure the IOC is ready to talk. If it's not, try * resetting it. */ if (reg & MPI2_DOORBELL_USED) { mpr_diag_reset(sc, sleep_flags); DELAY(50000); continue; } /* Is the adapter owned by another peer? */ if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { device_printf(sc->mpr_dev, "IOC is under the control " "of another peer host, aborting initialization.\n"); return (ENXIO); } state = reg & MPI2_IOC_STATE_MASK; if (state == MPI2_IOC_STATE_READY) { /* Ready to go! */ error = 0; break; } else if (state == MPI2_IOC_STATE_FAULT) { mpr_dprint(sc, MPR_FAULT, "IOC in fault state 0x%x\n", state & MPI2_DOORBELL_FAULT_CODE_MASK); mpr_diag_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_OPERATIONAL) { /* Need to take ownership */ mpr_message_unit_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_RESET) { /* Wait a bit, IOC might be in transition */ mpr_dprint(sc, MPR_FAULT, "IOC in unexpected reset state\n"); } else { mpr_dprint(sc, MPR_FAULT, "IOC in unknown state 0x%x\n", state); error = EINVAL; break; } /* Wait 50ms for things to settle down. */ DELAY(50000); } if (error) device_printf(sc->mpr_dev, "Cannot transition IOC to ready\n"); return (error); } static int mpr_transition_operational(struct mpr_softc *sc) { uint32_t reg, state; int error; MPR_FUNCTRACE(sc); error = 0; reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); state = reg & MPI2_IOC_STATE_MASK; if (state != MPI2_IOC_STATE_READY) { if ((error = mpr_transition_ready(sc)) != 0) { mpr_dprint(sc, MPR_FAULT, "%s failed to transition ready\n", __func__); return (error); } } error = mpr_send_iocinit(sc); return (error); } /* * This is called during attach and when re-initializing due to a Diag Reset. * IOC Facts is used to allocate many of the structures needed by the driver. * If called from attach, de-allocation is not required because the driver has * not allocated any structures yet, but if called from a Diag Reset, previously * allocated structures based on IOC Facts will need to be freed and re- * allocated bases on the latest IOC Facts. */ static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) { int error; Mpi2IOCFactsReply_t saved_facts; uint8_t saved_mode, reallocating; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* Save old IOC Facts and then only reallocate if Facts have changed */ if (!attaching) { bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); } /* * Get IOC Facts. In all cases throughout this function, panic if doing * a re-initialization and only return the error if attaching so the OS * can handle it. */ if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { if (attaching) { mpr_dprint(sc, MPR_FAULT, "%s failed to get IOC Facts " "with error %d\n", __func__, error); return (error); } else { panic("%s failed to get IOC Facts with error %d\n", __func__, error); } } mpr_print_iocfacts(sc, sc->facts); snprintf(sc->fw_version, sizeof(sc->fw_version), "%02d.%02d.%02d.%02d", sc->facts->FWVersion.Struct.Major, sc->facts->FWVersion.Struct.Minor, sc->facts->FWVersion.Struct.Unit, sc->facts->FWVersion.Struct.Dev); mpr_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, MPR_DRIVER_VERSION); mpr_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"); /* * If the chip doesn't support event replay then a hard reset will be * required to trigger a full discovery. Do the reset here then * retransition to Ready. A hard reset might have already been done, * but it doesn't hurt to do it again. Only do this if attaching, not * for a Diag Reset. */ if (attaching) { if ((sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { mpr_diag_reset(sc, NO_SLEEP); if ((error = mpr_transition_ready(sc)) != 0) { mpr_dprint(sc, MPR_FAULT, "%s failed to " "transition to ready with error %d\n", __func__, error); return (error); } } } /* * Set flag if IR Firmware is loaded. If the RAID Capability has * changed from the previous IOC Facts, log a warning, but only if * checking this after a Diag Reset and not during attach. */ saved_mode = sc->ir_firmware; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) sc->ir_firmware = 1; if (!attaching) { if (sc->ir_firmware != saved_mode) { mpr_dprint(sc, MPR_FAULT, "%s new IR/IT mode in IOC " "Facts does not match previous mode\n", __func__); } } /* Only deallocate and reallocate if relevant IOC Facts have changed */ reallocating = FALSE; if ((!attaching) && ((saved_facts.MsgVersion != sc->facts->MsgVersion) || (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || (saved_facts.RequestCredit != sc->facts->RequestCredit) || (saved_facts.ProductID != sc->facts->ProductID) || (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || (saved_facts.IOCRequestFrameSize != sc->facts->IOCRequestFrameSize) || (saved_facts.MaxTargets != sc->facts->MaxTargets) || (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || (saved_facts.MaxReplyDescriptorPostQueueDepth != sc->facts->MaxReplyDescriptorPostQueueDepth) || (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || (saved_facts.MaxPersistentEntries != sc->facts->MaxPersistentEntries))) { reallocating = TRUE; } /* * Some things should be done if attaching or re-allocating after a Diag * Reset, but are not needed after a Diag Reset if the FW has not * changed. */ if (attaching || reallocating) { /* * Check if controller supports FW diag buffers and set flag to * enable each type. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. enabled = TRUE; /* * Set flag if EEDP is supported and if TLR is supported. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) sc->eedp_enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) sc->control_TLR = TRUE; /* * Size the queues. Since the reply queues always need one free * entry, we'll just deduct one reply message here. */ sc->num_reqs = MIN(MPR_REQ_FRAMES, sc->facts->RequestCredit); sc->num_replies = MIN(MPR_REPLY_FRAMES + MPR_EVT_REPLY_FRAMES, sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; /* * Initialize all Tail Queues */ TAILQ_INIT(&sc->req_list); TAILQ_INIT(&sc->high_priority_req_list); TAILQ_INIT(&sc->chain_list); TAILQ_INIT(&sc->tm_list); } /* * If doing a Diag Reset and the FW is significantly different * (reallocating will be set above in IOC Facts comparison), then all * buffers based on the IOC Facts will need to be freed before they are * reallocated. */ if (reallocating) { mpr_iocfacts_free(sc); mprsas_realloc_targets(sc, saved_facts.MaxTargets); } /* * Any deallocation has been completed. Now start reallocating * if needed. Will only need to reallocate if attaching or if the new * IOC Facts are different from the previous IOC Facts after a Diag * Reset. Targets have already been allocated above if needed. */ if (attaching || reallocating) { if (((error = mpr_alloc_queues(sc)) != 0) || ((error = mpr_alloc_replies(sc)) != 0) || ((error = mpr_alloc_requests(sc)) != 0)) { if (attaching ) { mpr_dprint(sc, MPR_FAULT, "%s failed to alloc " "queues with error %d\n", __func__, error); mpr_free(sc); return (error); } else { panic("%s failed to alloc queues with error " "%d\n", __func__, error); } } } /* Always initialize the queues */ bzero(sc->free_queue, sc->fqdepth * 4); mpr_init_queues(sc); /* * Always get the chip out of the reset state, but only panic if not * attaching. If attaching and there is an error, that is handled by * the OS. */ error = mpr_transition_operational(sc); if (error != 0) { if (attaching) { - mpr_printf(sc, "%s failed to transition to " - "operational with error %d\n", __func__, error); + mpr_printf(sc, "%s failed to transition to operational " + "with error %d\n", __func__, error); mpr_free(sc); return (error); } else { panic("%s failed to transition to operational with " "error %d\n", __func__, error); } } /* * Finish the queue initialization. * These are set here instead of in mpr_init_queues() because the * IOC resets these values during the state transition in * mpr_transition_operational(). The free index is set to 1 * because the corresponding index in the IOC is set to 0, and the * IOC treats the queues as full if both are set to the same value. * Hence the reason that the queue can't hold all of the possible * replies. */ sc->replypostindex = 0; mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); /* * Attach the subsystems so they can prepare their event masks. */ /* XXX Should be dynamic so that IM/IR and user modules can attach */ if (attaching) { if (((error = mpr_attach_log(sc)) != 0) || ((error = mpr_attach_sas(sc)) != 0) || ((error = mpr_attach_user(sc)) != 0)) { mpr_printf(sc, "%s failed to attach all subsystems: " "error %d\n", __func__, error); mpr_free(sc); return (error); } if ((error = mpr_pci_setup_interrupts(sc)) != 0) { mpr_printf(sc, "%s failed to setup interrupts\n", __func__); mpr_free(sc); return (error); } } return (error); } /* * This is called if memory is being free (during detach for example) and when * buffers need to be reallocated due to a Diag Reset. */ static void mpr_iocfacts_free(struct mpr_softc *sc) { struct mpr_command *cm; int i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if (sc->free_busaddr != 0) bus_dmamap_unload(sc->queues_dmat, sc->queues_map); if (sc->free_queue != NULL) bus_dmamem_free(sc->queues_dmat, sc->free_queue, sc->queues_map); if (sc->queues_dmat != NULL) bus_dma_tag_destroy(sc->queues_dmat); if (sc->chain_busaddr != 0) bus_dmamap_unload(sc->chain_dmat, sc->chain_map); if (sc->chain_frames != NULL) bus_dmamem_free(sc->chain_dmat, sc->chain_frames, sc->chain_map); if (sc->chain_dmat != NULL) bus_dma_tag_destroy(sc->chain_dmat); if (sc->sense_busaddr != 0) bus_dmamap_unload(sc->sense_dmat, sc->sense_map); if (sc->sense_frames != NULL) bus_dmamem_free(sc->sense_dmat, sc->sense_frames, sc->sense_map); if (sc->sense_dmat != NULL) bus_dma_tag_destroy(sc->sense_dmat); if (sc->reply_busaddr != 0) bus_dmamap_unload(sc->reply_dmat, sc->reply_map); if (sc->reply_frames != NULL) bus_dmamem_free(sc->reply_dmat, sc->reply_frames, sc->reply_map); if (sc->reply_dmat != NULL) bus_dma_tag_destroy(sc->reply_dmat); if (sc->req_busaddr != 0) bus_dmamap_unload(sc->req_dmat, sc->req_map); if (sc->req_frames != NULL) bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); if (sc->req_dmat != NULL) bus_dma_tag_destroy(sc->req_dmat); if (sc->chains != NULL) free(sc->chains, M_MPR); if (sc->commands != NULL) { for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); } free(sc->commands, M_MPR); } if (sc->buffer_dmat != NULL) bus_dma_tag_destroy(sc->buffer_dmat); } /* * The terms diag reset and hard reset are used interchangeably in the MPI * docs to mean resetting the controller chip. In this code diag reset * cleans everything up, and the hard reset function just sends the reset * sequence to the chip. This should probably be refactored so that every * subsystem gets a reset notification of some sort, and can clean up * appropriately. */ int mpr_reinit(struct mpr_softc *sc) { int error; struct mprsas_softc *sassc; sassc = sc->sassc; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { mpr_dprint(sc, MPR_INIT, "%s reset already in progress\n", - __func__); + __func__); return 0; } mpr_dprint(sc, MPR_INFO, "Reinitializing controller,\n"); /* make sure the completion callbacks can recognize they're getting * a NULL cm_reply due to a reset. */ sc->mpr_flags |= MPR_FLAGS_DIAGRESET; /* * Mask interrupts here. */ mpr_dprint(sc, MPR_INIT, "%s mask interrupts\n", __func__); mpr_mask_intr(sc); error = mpr_diag_reset(sc, CAN_SLEEP); if (error != 0) { panic("%s hard reset failed with error %d\n", __func__, error); } /* Restore the PCI state, including the MSI-X registers */ mpr_pci_restore(sc); /* Give the I/O subsystem special priority to get itself prepared */ mprsas_handle_reinit(sc); /* * Get IOC Facts and allocate all structures based on this information. * The attach function will also call mpr_iocfacts_allocate at startup. * If relevant values have changed in IOC Facts, this function will free * all of the memory based on IOC Facts and reallocate that memory. */ if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { panic("%s IOC Facts based allocation failed with error %d\n", __func__, error); } /* * Mapping structures will be re-allocated after getting IOC Page8, so * free these structures here. */ mpr_mapping_exit(sc); /* * The static page function currently read is IOC Page8. Others can be * added in future. It's possible that the values in IOC Page8 have * changed after a Diag Reset due to user modification, so always read * these. Interrupts are masked, so unmask them before getting config * pages. */ mpr_unmask_intr(sc); sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; mpr_base_static_config_pages(sc); /* * Some mapping info is based in IOC Page8 data, so re-initialize the * mapping tables. */ mpr_mapping_initialize(sc); /* * Restart will reload the event masks clobbered by the reset, and * then enable the port. */ mpr_reregister_events(sc); /* the end of discovery will release the simq, so we're done. */ mpr_dprint(sc, MPR_INFO, "%s finished sc %p post %u free %u\n", __func__, sc, sc->replypostindex, sc->replyfreeindex); mprsas_release_simq_reinit(sassc); return 0; } /* Wait for the chip to ACK a word that we've put into its FIFO * Wait for seconds. In single loop wait for busy loop * for 500 microseconds. * Total is [ 0.5 * (2000 * ) ] in miliseconds. * */ static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) { u32 cntdn, count; u32 int_status; u32 doorbell; count = 0; cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; do { int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { mpr_dprint(sc, MPR_INIT, "%s: successful count(%d), " "timeout(%d)\n", __func__, count, timeout); return 0; } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpr_dprint(sc, MPR_FAULT, "fault_state(0x%04x)!\n", doorbell); return (EFAULT); } } else if (int_status == 0xFFFFFFFF) goto out; /* * If it can sleep, sleep for 1 milisecond, else busy loop for * 0.5 milisecond */ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", hz/1000); else if (sleep_flag == CAN_SLEEP) pause("mprdba", hz/1000); else DELAY(500); count++; } while (--cntdn); out: mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " "int_status(%x)!\n", __func__, count, int_status); return (ETIMEDOUT); } /* Wait for the chip to signal that the next word in its FIFO can be fetched */ static int mpr_wait_db_int(struct mpr_softc *sc) { int retry; for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & MPI2_HIS_IOC2SYS_DB_STATUS) != 0) return (0); DELAY(2000); } return (ETIMEDOUT); } /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ static int mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, int req_sz, int reply_sz, int timeout) { uint32_t *data32; uint16_t *data16; int i, count, ioc_sz, residual; int sleep_flags = CAN_SLEEP; #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flags = NO_SLEEP; /* Step 1 */ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Step 2 */ if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) return (EBUSY); /* Step 3 * Announce that a message is coming through the doorbell. Messages * are pushed at 32bit words, so round up if needed. */ count = (req_sz + 3) / 4; mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); /* Step 4 */ if (mpr_wait_db_int(sc) || (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); return (ENXIO); } mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); return (ENXIO); } /* Step 5 */ /* Clock out the message data synchronously in 32-bit dwords*/ data32 = (uint32_t *)req; for (i = 0; i < count; i++) { mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout while writing doorbell\n"); return (ENXIO); } } /* Step 6 */ /* Clock in the reply in 16-bit words. The total length of the * message is always in the 4th byte, so clock out the first 2 words * manually, then loop the rest. */ data16 = (uint16_t *)reply; if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); return (ENXIO); } data16[0] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); return (ENXIO); } data16[1] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Number of 32bit words in the message */ ioc_sz = reply->MsgLength; /* * Figure out how many 16bit words to clock in without overrunning. * The precision loss with dividing reply_sz can safely be * ignored because the messages can only be multiples of 32bits. */ residual = 0; count = MIN((reply_sz / 4), ioc_sz) * 2; if (count < ioc_sz * 2) { residual = ioc_sz * 2 - count; mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " "residual message words\n", residual); } for (i = 2; i < count; i++) { if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell %d\n", i); return (ENXIO); } data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* * Pull out residual words that won't fit into the provided buffer. * This keeps the chip from hanging due to a driver programming * error. */ while (residual--) { if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); return (ENXIO); } (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* Step 7 */ if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); return (ENXIO); } if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); return (0); } static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) { reply_descriptor rd; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", cm->cm_desc.Default.SMID, cm, cm->cm_ccb); if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & MPR_FLAGS_SHUTDOWN)) mtx_assert(&sc->mpr_mtx, MA_OWNED); if (++sc->io_cmds_active > sc->io_cmds_highwater) sc->io_cmds_highwater++; rd.u.low = cm->cm_desc.Words.Low; rd.u.high = cm->cm_desc.Words.High; rd.word = htole64(rd.word); /* TODO-We may need to make below regwrite atomic */ mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, rd.u.low); mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, rd.u.high); } /* * Just the FACTS, ma'am. */ static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) { MPI2_DEFAULT_REPLY *reply; MPI2_IOC_FACTS_REQUEST request; int error, req_sz, reply_sz; MPR_FUNCTRACE(sc); req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); reply = (MPI2_DEFAULT_REPLY *)facts; bzero(&request, req_sz); request.Function = MPI2_FUNCTION_IOC_FACTS; error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); return (error); } static int mpr_send_iocinit(struct mpr_softc *sc) { MPI2_IOC_INIT_REQUEST init; MPI2_DEFAULT_REPLY reply; int req_sz, reply_sz, error; struct timeval now; uint64_t time_in_msec; MPR_FUNCTRACE(sc); req_sz = sizeof(MPI2_IOC_INIT_REQUEST); reply_sz = sizeof(MPI2_IOC_INIT_REPLY); bzero(&init, req_sz); bzero(&reply, reply_sz); /* * Fill in the init block. Note that most addresses are * deliberately in the lower 32bits of memory. This is a micro- * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. */ init.Function = MPI2_FUNCTION_IOC_INIT; init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; init.MsgVersion = htole16(MPI2_VERSION); init.HeaderVersion = htole16(MPI2_HEADER_VERSION); init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); init.ReplyFreeQueueDepth = htole16(sc->fqdepth); init.SenseBufferAddressHigh = 0; init.SystemReplyAddressHigh = 0; init.SystemRequestFrameBaseAddress.High = 0; init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr); init.ReplyDescriptorPostQueueAddress.High = 0; init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr); init.ReplyFreeQueueAddress.High = 0; init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); getmicrotime(&now); time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); return (error); } void mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } static int mpr_alloc_queues(struct mpr_softc *sc) { bus_addr_t queues_busaddr; uint8_t *queues; int qsize, fqsize, pqsize; /* * The reply free queue contains 4 byte entries in multiples of 16 and * aligned on a 16 byte boundary. There must always be an unused entry. * This queue supplies fresh reply frames for the firmware to use. * * The reply descriptor post queue contains 8 byte entries in * multiples of 16 and aligned on a 16 byte boundary. This queue * contains filled-in reply frames sent from the firmware to the host. * * These two queues are allocated together for simplicity. */ sc->fqdepth = roundup2(sc->num_replies + 1, 16); sc->pqdepth = roundup2(sc->num_replies + 1, 16); fqsize= sc->fqdepth * 4; pqsize = sc->pqdepth * 8; qsize = fqsize + pqsize; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ qsize, /* maxsize */ 1, /* nsegments */ qsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->queues_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate queues DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, &sc->queues_map)) { device_printf(sc->mpr_dev, "Cannot allocate queues memory\n"); return (ENOMEM); } bzero(queues, qsize); bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, mpr_memaddr_cb, &queues_busaddr, 0); sc->free_queue = (uint32_t *)queues; sc->free_busaddr = queues_busaddr; sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); sc->post_busaddr = queues_busaddr + fqsize; return (0); } static int mpr_alloc_replies(struct mpr_softc *sc) { int rsize, num_replies; /* * sc->num_replies should be one less than sc->fqdepth. We need to * allocate space for sc->fqdepth replies, but only sc->num_replies * replies can be used at once. */ num_replies = max(sc->fqdepth, sc->num_replies); rsize = sc->facts->ReplyFrameSize * num_replies * 4; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->reply_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate replies DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, BUS_DMA_NOWAIT, &sc->reply_map)) { device_printf(sc->mpr_dev, "Cannot allocate replies memory\n"); return (ENOMEM); } bzero(sc->reply_frames, rsize); bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, mpr_memaddr_cb, &sc->reply_busaddr, 0); return (0); } static int mpr_alloc_requests(struct mpr_softc *sc) { struct mpr_command *cm; struct mpr_chain *chain; int i, rsize, nsegs; rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->req_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate request DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, BUS_DMA_NOWAIT, &sc->req_map)) { device_printf(sc->mpr_dev, "Cannot allocate request memory\n"); return (ENOMEM); } bzero(sc->req_frames, rsize); bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, mpr_memaddr_cb, &sc->req_busaddr, 0); rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->chain_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate chain DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, BUS_DMA_NOWAIT, &sc->chain_map)) { device_printf(sc->mpr_dev, "Cannot allocate chain memory\n"); return (ENOMEM); } bzero(sc->chain_frames, rsize); bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, mpr_memaddr_cb, &sc->chain_busaddr, 0); rsize = MPR_SENSE_LEN * sc->num_reqs; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sense_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate sense DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, BUS_DMA_NOWAIT, &sc->sense_map)) { device_printf(sc->mpr_dev, "Cannot allocate sense memory\n"); return (ENOMEM); } bzero(sc->sense_frames, rsize); bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, mpr_memaddr_cb, &sc->sense_busaddr, 0); sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR, M_WAITOK | M_ZERO); if (!sc->chains) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } for (i = 0; i < sc->max_chains; i++) { chain = &sc->chains[i]; chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + i * sc->facts->IOCRequestFrameSize * 4); chain->chain_busaddr = sc->chain_busaddr + i * sc->facts->IOCRequestFrameSize * 4; mpr_free_chain(sc, chain); sc->chain_free_lowwater++; } /* XXX Need to pick a more precise value */ nsegs = (MAXPHYS / PAGE_SIZE) + 1; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ nsegs, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mpr_mtx, /* lockarg */ &sc->buffer_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate buffer DMA tag\n"); return (ENOMEM); } /* * SMID 0 cannot be used as a free command per the firmware spec. * Just drop that command instead of risking accounting bugs. */ sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, M_MPR, M_WAITOK | M_ZERO); if (!sc->commands) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; cm->cm_req = sc->req_frames + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_req_busaddr = sc->req_busaddr + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_sense = &sc->sense_frames[i]; cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; cm->cm_desc.Default.SMID = i; cm->cm_sc = sc; TAILQ_INIT(&cm->cm_chain_list); callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); /* XXX Is a failure here a critical problem? */ if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) if (i <= sc->facts->HighPriorityCredit) mpr_free_high_priority_command(sc, cm); else mpr_free_command(sc, cm); else { panic("failed to allocate command %d\n", i); sc->num_reqs = i; break; } } return (0); } static int mpr_init_queues(struct mpr_softc *sc) { int i; memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); /* * According to the spec, we need to use one less reply than we * have space for on the queue. So sc->num_replies (the number we * use) should be less than sc->fqdepth (allocated size). */ if (sc->num_replies >= sc->fqdepth) return (EINVAL); /* * Initialize all of the free queue entries. */ for (i = 0; i < sc->fqdepth; i++) sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); sc->replyfreeindex = sc->num_replies; return (0); } /* Get the driver parameter tunables. Lowest priority are the driver defaults. * Next are the global settings, if they exist. Highest are the per-unit * settings, if they exist. */ static void mpr_get_tunables(struct mpr_softc *sc) { char tmpstr[80]; /* XXX default to some debugging for now */ sc->mpr_debug = MPR_INFO | MPR_FAULT; sc->disable_msix = 0; sc->disable_msi = 0; sc->max_chains = MPR_CHAIN_FRAMES; sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug); TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", device_get_unit(sc->mpr_dev)); TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); } static void mpr_setup_sysctl(struct mpr_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", device_get_unit(sc->mpr_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0, "mpr debug level"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, "Disable the use of MSI-X interrupts"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0, "Disable the use of MSI interrupts"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, strlen(sc->fw_version), "firmware version"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, strlen(MPR_DRIVER_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_active", CTLFLAG_RD, &sc->io_cmds_active, 0, "number of currently active commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "maximum active commands seen"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free", CTLFLAG_RD, &sc->chain_free, 0, "number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_chains", CTLFLAG_RD, &sc->max_chains, 0,"maximum chain frames that will be allocated"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, "enable SSU to SATA SSD/HDD at shutdown"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, &sc->chain_alloc_fail, "chain allocation failures"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "spinup_wait_time", CTLFLAG_RD, &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " "spinup after SATA ID error"); } int mpr_attach(struct mpr_softc *sc) { int error; mpr_get_tunables(sc); MPR_FUNCTRACE(sc); mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); TAILQ_INIT(&sc->event_list); timevalclear(&sc->lastfail); if ((error = mpr_transition_ready(sc)) != 0) { mpr_printf(sc, "%s failed to transition ready\n", __func__); return (error); } sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, M_ZERO|M_NOWAIT); if (!sc->facts) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } /* * Get IOC Facts and allocate all structures based on this information. * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC * Facts. If relevant values have changed in IOC Facts, this function * will free all of the memory based on IOC Facts and reallocate that * memory. If this fails, any allocated memory should already be freed. */ if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { mpr_dprint(sc, MPR_FAULT, "%s IOC Facts based allocation " "failed with error %d\n", __func__, error); return (error); } /* Start the periodic watchdog check on the IOC Doorbell */ mpr_periodic(sc); /* * The portenable will kick off discovery events that will drive the * rest of the initialization process. The CAM/SAS module will * hold up the boot sequence until discovery is complete. */ sc->mpr_ich.ich_func = mpr_startup; sc->mpr_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mpr_ich) != 0) { mpr_dprint(sc, MPR_ERROR, "Cannot establish MPR config hook\n"); error = EINVAL; } /* * Allow IR to shutdown gracefully when shutdown occurs. */ sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); if (sc->shutdown_eh == NULL) mpr_dprint(sc, MPR_ERROR, "shutdown event registration " "failed\n"); mpr_setup_sysctl(sc); sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; return (error); } /* Run through any late-start handlers. */ static void mpr_startup(void *arg) { struct mpr_softc *sc; sc = (struct mpr_softc *)arg; mpr_lock(sc); mpr_unmask_intr(sc); /* initialize device mapping tables */ mpr_base_static_config_pages(sc); mpr_mapping_initialize(sc); mprsas_startup(sc); mpr_unlock(sc); } /* Periodic watchdog. Is called with the driver lock already held. */ static void mpr_periodic(void *arg) { struct mpr_softc *sc; uint32_t db; sc = (struct mpr_softc *)arg; if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) return; db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { panic("TEMPERATURE FAULT: STOPPING."); } mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); mpr_reinit(sc); } callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); } static void mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *event) { MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; mpr_print_event(sc, event); switch (event->Event) { case MPI2_EVENT_LOG_DATA: mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); if (sc->mpr_debug & MPR_EVENT) hexdump(event->EventData, event->EventDataLength, NULL, 0); break; case MPI2_EVENT_LOG_ENTRY_ADDED: entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " "0x%x Sequence %d:\n", entry->LogEntryQualifier, entry->LogSequence); break; default: break; } return; } static int mpr_attach_log(struct mpr_softc *sc) { uint8_t events[16]; bzero(events, 16); setbit(events, MPI2_EVENT_LOG_DATA); setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); mpr_register_events(sc, events, mpr_log_evt_handler, NULL, &sc->mpr_log_eh); return (0); } static int mpr_detach_log(struct mpr_softc *sc) { if (sc->mpr_log_eh != NULL) mpr_deregister_events(sc, sc->mpr_log_eh); return (0); } /* * Free all of the driver resources and detach submodules. Should be called * without the lock held. */ int mpr_free(struct mpr_softc *sc) { int error; /* Turn off the watchdog */ mpr_lock(sc); sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; mpr_unlock(sc); /* Lock must not be held for this */ callout_drain(&sc->periodic); if (((error = mpr_detach_log(sc)) != 0) || ((error = mpr_detach_sas(sc)) != 0)) return (error); mpr_detach_user(sc); /* Put the IOC back in the READY state. */ mpr_lock(sc); if ((error = mpr_transition_ready(sc)) != 0) { mpr_unlock(sc); return (error); } mpr_unlock(sc); if (sc->facts != NULL) free(sc->facts, M_MPR); /* * Free all buffers that are based on IOC Facts. A Diag Reset may need * to free these buffers too. */ mpr_iocfacts_free(sc); if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); /* Deregister the shutdown function */ if (sc->shutdown_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); mtx_destroy(&sc->mpr_mtx); return (0); } static __inline void mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) { MPR_FUNCTRACE(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); return; } if (cm->cm_flags & MPR_CM_FLAGS_POLLED) cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mpr_dprint(sc, MPR_TRACE, - "%s cm %p calling cm_complete %p data %p reply %p\n", - __func__, cm, cm->cm_complete, cm->cm_complete_data, - cm->cm_reply); + "%s cm %p calling cm_complete %p data %p reply %p\n", + __func__, cm, cm->cm_complete, cm->cm_complete_data, + cm->cm_reply); cm->cm_complete(sc, cm); } if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); wakeup(cm); } if (sc->io_cmds_active != 0) { sc->io_cmds_active--; } else { mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " "out of sync - resynching to 0\n"); } } static void mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) { union loginfo_type { u32 loginfo; struct { u32 subcode:16; u32 code:8; u32 originator:4; u32 bus_type:4; } dw; }; union loginfo_type sas_loginfo; char *originator_str = NULL; sas_loginfo.loginfo = log_info; if (sas_loginfo.dw.bus_type != 3 /*SAS*/) return; /* each nexus loss loginfo */ if (log_info == 0x31170000) return; /* eat the loginfos associated with task aborts */ if ((log_info == 30050000) || (log_info == 0x31140000) || (log_info == 0x31130000)) return; switch (sas_loginfo.dw.originator) { case 0: originator_str = "IOP"; break; case 1: originator_str = "PL"; break; case 2: originator_str = "IR"; break; } mpr_dprint(sc, MPR_INFO, "log_info(0x%08x): originator(%s), " - "code(0x%02x), sub_code(0x%04x)\n", log_info, - originator_str, sas_loginfo.dw.code, - sas_loginfo.dw.subcode); + "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, + sas_loginfo.dw.code, sas_loginfo.dw.subcode); } static void mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) { MPI2DefaultReply_t *mpi_reply; u16 sc_status; mpi_reply = (MPI2DefaultReply_t*)reply; sc_status = le16toh(mpi_reply->IOCStatus); if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); } void mpr_intr(void *data) { struct mpr_softc *sc; uint32_t status; sc = (struct mpr_softc *)data; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* * Check interrupt status register to flush the bus. This is * needed for both INTx interrupts and driver-driven polling */ status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) return; mpr_lock(sc); mpr_intr_locked(data); mpr_unlock(sc); return; } /* * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the * chip. Hopefully this theory is correct. */ void mpr_intr_msi(void *data) { struct mpr_softc *sc; sc = (struct mpr_softc *)data; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); mpr_lock(sc); mpr_intr_locked(data); mpr_unlock(sc); return; } /* * The locking is overly broad and simplistic, but easy to deal with for now. */ void mpr_intr_locked(void *data) { MPI2_REPLY_DESCRIPTORS_UNION *desc; struct mpr_softc *sc; struct mpr_command *cm = NULL; uint8_t flags; u_int pq; MPI2_DIAG_RELEASE_REPLY *rel_rep; mpr_fw_diagnostic_buffer_t *pBuffer; sc = (struct mpr_softc *)data; pq = sc->replypostindex; mpr_dprint(sc, MPR_TRACE, "%s sc %p starting with replypostindex %u\n", __func__, sc, sc->replypostindex); for ( ;; ) { cm = NULL; desc = &sc->post_queue[sc->replypostindex]; flags = desc->Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || (le32toh(desc->Words.High) == 0xffffffff)) break; /* increment the replypostindex now, so that event handlers * and cm completion handlers which decide to do a diag * reset can zero it without it getting incremented again * afterwards, and we break out of this loop on the next * iteration since the reply post queue has been cleared to * 0xFF and all descriptors look unused (which they are). */ if (++sc->replypostindex >= sc->pqdepth) sc->replypostindex = 0; switch (flags) { case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; cm->cm_reply = NULL; break; case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: { uint32_t baddr; uint8_t *reply; /* * Re-compose the reply address from the address * sent back from the chip. The ReplyFrameAddress * is the lower 32 bits of the physical address of * particular reply frame. Convert that address to * host format, and then use that to provide the * offset against the virtual address base * (sc->reply_frames). */ baddr = le32toh(desc->AddressReply.ReplyFrameAddress); reply = sc->reply_frames + (baddr - ((uint32_t)sc->reply_busaddr)); /* * Make sure the reply we got back is in a valid * range. If not, go ahead and panic here, since * we'll probably panic as soon as we deference the * reply pointer anyway. */ if ((reply < sc->reply_frames) || (reply > (sc->reply_frames + (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { printf("%s: WARNING: reply %p out of range!\n", __func__, reply); printf("%s: reply_frames %p, fqdepth %d, " "frame size %d\n", __func__, sc->reply_frames, sc->fqdepth, sc->facts->ReplyFrameSize * 4); printf("%s: baddr %#x,\n", __func__, baddr); /* LSI-TODO. See Linux Code for Graceful exit */ panic("Reply address out of range"); } if (le16toh(desc->AddressReply.SMID) == 0) { if (((MPI2_DEFAULT_REPLY *)reply)->Function == MPI2_FUNCTION_DIAG_BUFFER_POST) { /* * If SMID is 0 for Diag Buffer Post, * this implies that the reply is due to * a release function with a status that * the buffer has been released. Set * the buffer flags accordingly. */ rel_rep = (MPI2_DIAG_RELEASE_REPLY *)reply; if (le16toh(rel_rep->IOCStatus) == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) { pBuffer = &sc->fw_diag_buffer_list[ rel_rep->BufferType]; pBuffer->valid_data = TRUE; pBuffer->owned_by_firmware = FALSE; pBuffer->immediate = FALSE; } } else mpr_dispatch_event(sc, baddr, (MPI2_EVENT_NOTIFICATION_REPLY *) reply); } else { cm = &sc->commands[ le16toh(desc->AddressReply.SMID)]; cm->cm_reply = reply; cm->cm_reply_data = le32toh(desc->AddressReply. ReplyFrameAddress); } break; } case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: default: /* Unhandled */ mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", desc->Default.ReplyFlags); cm = NULL; break; } if (cm != NULL) { // Print Error reply frame if (cm->cm_reply) mpr_display_reply_info(sc,cm->cm_reply); mpr_complete_command(sc, cm); } desc->Words.Low = 0xffffffff; desc->Words.High = 0xffffffff; } if (pq != sc->replypostindex) { mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n", __func__, sc, sc->replypostindex); mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); } return; } static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply) { struct mpr_event_handle *eh; int event, handled = 0; event = le16toh(reply->Event); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { if (isset(eh->mask, event)) { eh->callback(sc, data, reply); handled++; } } if (handled == 0) mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", le16toh(event)); /* * This is the only place that the event/reply should be freed. * Anything wanting to hold onto the event data should have * already copied it into their own storage. */ mpr_free_reply(sc, data); } static void mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) { mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if (cm->cm_reply) mpr_print_event(sc, (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); mpr_free_command(sc, cm); /* next, send a port enable */ mprsas_startup(sc); } /* * For both register_events and update_events, the caller supplies a bitmap * of events that it _wants_. These functions then turn that into a bitmask * suitable for the controller. */ int mpr_register_events(struct mpr_softc *sc, uint8_t *mask, mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) { struct mpr_event_handle *eh; int error = 0; eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); if (!eh) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } eh->callback = cb; eh->data = data; TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); if (mask != NULL) error = mpr_update_events(sc, eh, mask); *handle = eh; return (error); } int mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, uint8_t *mask) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; MPI2_EVENT_NOTIFICATION_REPLY *reply; struct mpr_command *cm; struct mpr_event_handle *eh; int error, i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((mask != NULL) && (handle != NULL)) bcopy(mask, &handle->mask[0], 16); memset(sc->event_mask, 0xff, 16); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { for (i = 0; i < 16; i++) sc->event_mask[i] &= ~eh->mask[i]; } if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPR_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); } #else bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_request_polled(sc, cm); reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; if ((reply == NULL) || (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; if (reply) mpr_print_event(sc, reply); mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); mpr_free_command(sc, cm); return (error); } static int mpr_reregister_events(struct mpr_softc *sc) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; struct mpr_command *cm; struct mpr_event_handle *eh; int error, i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* first, reregister events */ memset(sc->event_mask, 0xff, 16); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { for (i = 0; i < 16; i++) sc->event_mask[i] &= ~eh->mask[i]; } if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPR_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); } #else bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; cm->cm_complete = mpr_reregister_events_complete; error = mpr_map_command(sc, cm); mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, error); return (error); } int mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) { TAILQ_REMOVE(&sc->event_list, handle, eh_list); free(handle, M_MPR); return (mpr_update_events(sc, NULL, NULL)); } /* * Add a chain element as the next SGE for the specified command. * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are * only required for IEEE commands. Therefore there is no code for commands * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands * shouldn't be requesting chains). */ static int mpr_add_chain(struct mpr_command *cm, int segsleft) { struct mpr_softc *sc = cm->cm_sc; MPI2_REQUEST_HEADER *req; MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; struct mpr_chain *chain; int space, sgc_size, current_segs, rem_segs, segs_per_frame; uint8_t next_chain_offset = 0; /* * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 * only IEEE commands should be requesting chains. Return some error * code other than 0. */ if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " "an MPI SGL.\n"); return(ENOBUFS); } sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); if (cm->cm_sglsize < sgc_size) panic("MPR: Need SGE Error Code\n"); chain = mpr_alloc_chain(cm->cm_sc); if (chain == NULL) return (ENOBUFS); space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; /* * Note: a double-linked list is used to make it easier to walk for * debugging. */ TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); /* * Need to know if the number of frames left is more than 1 or not. If * more than 1 frame is required, NextChainOffset will need to be set, * which will just be the last segment of the frame. */ rem_segs = 0; if (cm->cm_sglsize < (sgc_size * segsleft)) { /* * rem_segs is the number of segements remaining after the * segments that will go into the current frame. Since it is * known that at least one more frame is required, account for * the chain element. To know if more than one more frame is * required, just check if there will be a remainder after using * the current frame (with this chain) and the next frame. If * so the NextChainOffset must be the last element of the next * frame. */ current_segs = (cm->cm_sglsize / sgc_size) - 1; rem_segs = segsleft - current_segs; segs_per_frame = space / sgc_size; if (rem_segs > segs_per_frame) { next_chain_offset = segs_per_frame - 1; } } ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; ieee_sgc->Length = next_chain_offset ? htole32((uint32_t)space) : htole32((uint32_t)rem_segs * (uint32_t)sgc_size); ieee_sgc->NextChainOffset = next_chain_offset; ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); ieee_sgc->Address.Low = htole32(chain->chain_busaddr); ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; req = (MPI2_REQUEST_HEADER *)cm->cm_req; req->ChainOffset = ((sc->facts->IOCRequestFrameSize * 4) - sgc_size) >> 4; cm->cm_sglsize = space; return (0); } /* * Add one scatter-gather element to the scatter-gather list for a command. * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a * chain, so don't consider any chain additions. */ int mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, int segsleft) { uint32_t saved_buf_len, saved_address_low, saved_address_high; u32 sge_flags; /* * case 1: >=1 more segment, no room for anything (error) * case 2: 1 more segment and enough room for it */ if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { mpr_dprint(cm->cm_sc, MPR_ERROR, "%s: warning: Not enough room for MPI SGL in frame.\n", __func__); return(ENOBUFS); } KASSERT(segsleft == 1, ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", segsleft)); /* * There is one more segment left to add for the MPI SGL and there is * enough room in the frame to add it. This is the normal case because * MPI SGL's don't have chains, otherwise something is wrong. * * If this is a bi-directional request, need to account for that * here. Save the pre-filled sge values. These will be used * either for the 2nd SGL or for a single direction SGL. If * cm_out_len is non-zero, this is a bi-directional request, so * fill in the OUT SGL first, then the IN SGL, otherwise just * fill in the IN SGL. Note that at this time, when filling in * 2 SGL's for a bi-directional request, they both use the same * DMA buffer (same cm command). */ saved_buf_len = sge->FlagsLength & 0x00FFFFFF; saved_address_low = sge->Address.Low; saved_address_high = sge->Address.High; if (cm->cm_out_len) { sge->FlagsLength = cm->cm_out_len | ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT); cm->cm_sglsize -= len; /* Endian Safe code */ sge_flags = sge->FlagsLength; sge->FlagsLength = htole32(sge_flags); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sge, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); } sge->FlagsLength = saved_buf_len | ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT); if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { sge->FlagsLength |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << MPI2_SGE_FLAGS_SHIFT); } else { sge->FlagsLength |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << MPI2_SGE_FLAGS_SHIFT); } sge->Address.Low = saved_address_low; sge->Address.High = saved_address_high; cm->cm_sglsize -= len; /* Endian Safe code */ sge_flags = sge->FlagsLength; sge->FlagsLength = htole32(sge_flags); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sge, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); return (0); } /* * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- * gather list for a command. Maintain cm_sglsize and cm_sge as the * remaining size and pointer to the next SGE to fill in, respectively. */ int mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) { MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); uint32_t saved_buf_len, saved_address_low, saved_address_high; uint32_t sge_length; /* * case 1: No room for chain or segment (error). * case 2: Two or more segments left but only room for chain. * case 3: Last segment and room for it, so set flags. */ /* * There should be room for at least one element, or there is a big * problem. */ if (cm->cm_sglsize < ieee_sge_size) panic("MPR: Need SGE Error Code\n"); if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { if ((error = mpr_add_chain(cm, segsleft)) != 0) return (error); } if (segsleft == 1) { /* * If this is a bi-directional request, need to account for that * here. Save the pre-filled sge values. These will be used * either for the 2nd SGL or for a single direction SGL. If * cm_out_len is non-zero, this is a bi-directional request, so * fill in the OUT SGL first, then the IN SGL, otherwise just * fill in the IN SGL. Note that at this time, when filling in * 2 SGL's for a bi-directional request, they both use the same * DMA buffer (same cm command). */ saved_buf_len = sge->Length; saved_address_low = sge->Address.Low; saved_address_high = sge->Address.High; if (cm->cm_out_len) { sge->Length = cm->cm_out_len; sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); cm->cm_sglsize -= ieee_sge_size; /* Endian Safe code */ sge_length = sge->Length; sge->Length = htole32(sge_length); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sgep, cm->cm_sge, ieee_sge_size); cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + ieee_sge_size); } sge->Length = saved_buf_len; sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI25_IEEE_SGE_FLAGS_END_OF_LIST); sge->Address.Low = saved_address_low; sge->Address.High = saved_address_high; } cm->cm_sglsize -= ieee_sge_size; /* Endian Safe code */ sge_length = sge->Length; sge->Length = htole32(sge_length); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sgep, cm->cm_sge, ieee_sge_size); cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + ieee_sge_size); return (0); } /* * Add one dma segment to the scatter-gather list for a command. */ int mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, int segsleft) { MPI2_SGE_SIMPLE64 sge; MPI2_IEEE_SGE_SIMPLE64 ieee_sge; if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); ieee_sge.Length = len; mpr_from_u64(pa, &ieee_sge.Address); return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); } else { /* * This driver always uses 64-bit address elements for * simplicity. */ flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING; /* Set Endian safe macro in mpr_push_sge */ sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); mpr_from_u64(pa, &sge.Address); return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); } } static void mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mpr_softc *sc; struct mpr_command *cm; u_int i, dir, sflags; cm = (struct mpr_command *)arg; sc = cm->cm_sc; /* * In this case, just print out a warning and let the chip tell the * user they did the wrong thing. */ if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { - mpr_dprint(sc, MPR_ERROR, - "%s: warning: busdma returned %d segments, " - "more than the %d allowed\n", __func__, nsegs, - cm->cm_max_segs); + mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " + "segments, more than the %d allowed\n", __func__, nsegs, + cm->cm_max_segs); } /* * Set up DMA direction flags. Bi-directional requests are also handled * here. In that case, both direction flags will be set. */ sflags = 0; if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { /* * We have to add a special case for SMP passthrough, there * is no easy way to generically handle it. The first * S/G element is used for the command (therefore the * direction bit needs to be set). The second one is used * for the reply. We'll leave it to the caller to make * sure we only have two buffers. */ /* * Even though the busdma man page says it doesn't make * sense to have both direction flags, it does in this case. * We have one s/g element being accessed in each direction. */ dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; /* * Set the direction flag on the first buffer in the SMP * passthrough request. We'll clear it for the second one. */ sflags |= MPI2_SGE_FLAGS_DIRECTION | MPI2_SGE_FLAGS_END_OF_BUFFER; } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; dir = BUS_DMASYNC_PREWRITE; } else dir = BUS_DMASYNC_PREREAD; for (i = 0; i < nsegs; i++) { if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { sflags &= ~MPI2_SGE_FLAGS_DIRECTION; } error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, sflags, nsegs - i); if (error != 0) { /* Resource shortage, roll back! */ if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) mpr_dprint(sc, MPR_INFO, "Out of chain frames, " "consider increasing hw.mpr.max_chains.\n"); cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; mpr_complete_command(sc, cm); return; } } bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); mpr_enqueue_request(sc, cm); return; } static void mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { mpr_data_cb(arg, segs, nsegs, error); } /* * This is the routine to enqueue commands ansynchronously. * Note that the only error path here is from bus_dmamap_load(), which can * return EINPROGRESS if it is waiting for resources. Other than this, it's * assumed that if you have a command in-hand, then you have enough credits * to use it. */ int mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) { int error = 0; if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, &cm->cm_uio, mpr_data_cb2, cm, 0); } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, mpr_data_cb, cm, 0); } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); } else { /* Add a zero-length element as needed */ if (cm->cm_sge != NULL) mpr_add_dmaseg(cm, 0, 0, 0, 1); mpr_enqueue_request(sc, cm); } return (error); } /* * This is the routine to enqueue commands synchronously. An error of * EINPROGRESS from mpr_map_command() is ignored since the command will * be executed and enqueued automatically. Other errors come from msleep(). */ int mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, int timeout, int sleep_flag) { int error, rc; struct timeval cur_time, start_time; if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) return EBUSY; cm->cm_complete = NULL; cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); error = mpr_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) return (error); // Check for context and wait for 50 mSec at a time until time has // expired or the command has finished. If msleep can't be used, need // to poll. #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flag = NO_SLEEP; getmicrotime(&start_time); if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); } else { while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { mpr_intr_locked(sc); if (sleep_flag == CAN_SLEEP) pause("mprwait", hz/20); else DELAY(50000); getmicrotime(&cur_time); if ((cur_time.tv_sec - start_time.tv_sec) > timeout) { error = EWOULDBLOCK; break; } } } if (error == EWOULDBLOCK) { mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); rc = mpr_reinit(sc); mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : "failed"); error = ETIMEDOUT; } return (error); } /* * This is the routine to enqueue a command synchonously and poll for * completion. Its use should be rare. */ int mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm) { int error, timeout = 0, rc; struct timeval cur_time, start_time; error = 0; cm->cm_flags |= MPR_CM_FLAGS_POLLED; cm->cm_complete = NULL; mpr_map_command(sc, cm); getmicrotime(&start_time); while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { mpr_intr_locked(sc); if (mtx_owned(&sc->mpr_mtx)) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprpoll", hz/20); else pause("mprpoll", hz/20); /* * Check for real-time timeout and fail if more than 60 seconds. */ getmicrotime(&cur_time); timeout = cur_time.tv_sec - start_time.tv_sec; if (timeout > 60) { mpr_dprint(sc, MPR_FAULT, "polling failed\n"); error = ETIMEDOUT; break; } } if (error) { mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); rc = mpr_reinit(sc); - mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? - "success" : "failed"); + mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : + "failed"); } return (error); } /* * The MPT driver had a verbose interface for config pages. In this driver, * reduce it to much simpler terms, similar to the Linux driver. */ int mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) { MPI2_CONFIG_REQUEST *req; struct mpr_command *cm; int error; if (sc->mpr_flags & MPR_FLAGS_BUSY) { return (EBUSY); } cm = mpr_alloc_command(sc); if (cm == NULL) { return (EBUSY); } req = (MPI2_CONFIG_REQUEST *)cm->cm_req; req->Function = MPI2_FUNCTION_CONFIG; req->Action = params->action; req->SGLFlags = 0; req->ChainOffset = 0; req->PageAddress = params->page_address; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Ext; req->ExtPageType = hdr->ExtPageType; req->ExtPageLength = hdr->ExtPageLength; req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; req->Header.PageLength = 0; /* Must be set to zero */ req->Header.PageNumber = hdr->PageNumber; req->Header.PageVersion = hdr->PageVersion; } else { MPI2_CONFIG_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Struct; req->Header.PageType = hdr->PageType; req->Header.PageNumber = hdr->PageNumber; req->Header.PageLength = hdr->PageLength; req->Header.PageVersion = hdr->PageVersion; } cm->cm_data = params->buffer; cm->cm_length = params->length; if (cm->cm_data != NULL) { cm->cm_sge = &req->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; } else cm->cm_sge = NULL; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete_data = params; if (params->callback != NULL) { cm->cm_complete = mpr_config_complete; return (mpr_map_command(sc, cm)); } else { error = mpr_wait_command(sc, cm, 0, CAN_SLEEP); if (error) { mpr_dprint(sc, MPR_FAULT, "Error %d reading config page\n", error); mpr_free_command(sc, cm); return (error); } mpr_config_complete(sc, cm); } return (0); } int mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) { return (EINVAL); } static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_CONFIG_REPLY *reply; struct mpr_config_params *params; MPR_FUNCTRACE(sc); params = cm->cm_complete_data; if (cm->cm_data != NULL) { bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } /* * XXX KDM need to do more error recovery? This results in the * device in question not getting probed. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (reply == NULL) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } params->status = reply->IOCStatus; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { params->hdr.Ext.ExtPageType = reply->ExtPageType; params->hdr.Ext.ExtPageLength = reply->ExtPageLength; params->hdr.Ext.PageType = reply->Header.PageType; params->hdr.Ext.PageNumber = reply->Header.PageNumber; params->hdr.Ext.PageVersion = reply->Header.PageVersion; } else { params->hdr.Struct.PageType = reply->Header.PageType; params->hdr.Struct.PageNumber = reply->Header.PageNumber; params->hdr.Struct.PageLength = reply->Header.PageLength; params->hdr.Struct.PageVersion = reply->Header.PageVersion; } done: mpr_free_command(sc, cm); if (params->callback != NULL) params->callback(sc, params); return; } Index: head/sys/dev/mpr/mpr_config.c =================================================================== --- head/sys/dev/mpr/mpr_config.c (revision 299264) +++ head/sys/dev/mpr/mpr_config.c (revision 299265) @@ -1,1303 +1,1303 @@ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD */ #include __FBSDID("$FreeBSD$"); /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * mpr_config_get_ioc_pg8 - obtain ioc page 8 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_ioc_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; MPI2_CONFIG_PAGE_IOC_8 *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; request->Header.PageNumber = 8; request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; request->Header.PageNumber = 8; request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; request->Header.PageLength = mpi_reply->Header.PageLength; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc((cm->cm_length), M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, (sizeof(Mpi2IOCPage8_t)))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_iounit_pg8 - obtain iounit page 8 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_iounit_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; MPI2_CONFIG_PAGE_IO_UNIT_8 *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; request->Header.PageNumber = 8; request->Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; request->Header.PageNumber = 8; request->Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; request->Header.PageLength = mpi_reply->Header.PageLength; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc((cm->cm_length), M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, (sizeof(Mpi2IOUnitPage8_t)))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_base_static_config_pages - static start of day config pages. * @sc: per adapter object * * Return nothing. */ void mpr_base_static_config_pages(struct mpr_softc *sc) { Mpi2ConfigReply_t mpi_reply; int retry; retry = 0; while (mpr_config_get_ioc_pg8(sc, &mpi_reply, &sc->ioc_pg8)) { retry++; if (retry > 5) { /* We need to Handle this situation */ /*FIXME*/ break; } } retry = 0; while (mpr_config_get_iounit_pg8(sc, &mpi_reply, &sc->iounit_pg8)) { retry++; if (retry > 5) { /* We need to Handle this situation */ /*FIXME*/ break; } } } /** * mpr_config_get_dpm_pg0 - obtain driver persistent mapping page0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @sz: size of buffer passed in config_page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_dpm_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2DriverMappingPage0_t *config_page, u16 sz) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2DriverMappingPage0_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); memset(config_page, 0, sz); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; request->PageAddress = sc->max_dpm_entries << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_NVRAM; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; request->PageAddress = sc->max_dpm_entries << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; request->ExtPageLength = mpi_reply->ExtPageLength; cm->cm_length = le16toh(request->ExtPageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO|M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sz)); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_set_dpm_pg0 - write an entry in driver persistent mapping page0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @entry_idx: entry index in DPM Page0 to be modified * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_set_dpm_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2DriverMappingPage0_t *config_page, u16 entry_idx) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; /* We can remove below two lines ????*/ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; request->PageAddress |= htole16(entry_idx); cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; request->ExtPageLength = mpi_reply->ExtPageLength; request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; request->PageAddress |= htole16(entry_idx); cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAOUT; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } bcopy(config_page, page, MIN(cm->cm_length, (sizeof(Mpi2DriverMappingPage0_t)))); cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request to write page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page written with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_sas_device_pg0 - obtain sas device page 0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: device handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_sas_device_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u16 handle) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2SasDevicePage0_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; request->ExtPageLength = mpi_reply->ExtPageLength; request->PageAddress = htole32(form | handle); cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2SasDevicePage0_t))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_bios_pg3 - obtain BIOS page 3 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_bios_pg3(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage3_t *config_page) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2BiosPage3_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; request->Header.PageNumber = 3; request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; request->Header.PageNumber = 3; request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; request->Header.PageLength = mpi_reply->Header.PageLength; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2BiosPage3_t))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_raid_volume_pg0 - obtain raid volume page 0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @page_address: form and handle value used to get page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_raid_volume_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2RaidVolPage0_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mpr_request_polled(sc, cm); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 0; request->Header.PageLength = mpi_reply->Header.PageLength; request->Header.PageVersion = mpi_reply->Header.PageVersion; request->PageAddress = page_address; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mpr_request_polled(sc, cm); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, cm->cm_length); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_raid_volume_pg1 - obtain raid volume page 1 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @form: GET_NEXT_HANDLE or HANDLE * @handle: volume handle * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_raid_volume_pg1(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u16 handle) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2RaidVolPage1_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 1; request->Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 1; request->Header.PageLength = mpi_reply->Header.PageLength; request->Header.PageVersion = mpi_reply->Header.PageVersion; request->PageAddress = htole32(form | handle); cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2RaidVolPage1_t))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } /** * mpr_config_get_volume_wwid - returns wwid given the volume handle * @sc: per adapter object * @volume_handle: volume handle * @wwid: volume wwid * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_volume_wwid(struct mpr_softc *sc, u16 volume_handle, u64 *wwid) { Mpi2ConfigReply_t mpi_reply; Mpi2RaidVolPage1_t raid_vol_pg1; *wwid = 0; if (!(mpr_config_get_raid_volume_pg1(sc, &mpi_reply, &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, volume_handle))) { *wwid = le64toh((u64)raid_vol_pg1.WWID.High << 32 | raid_vol_pg1.WWID.Low); return 0; } else return -1; } /** * mpr_config_get_pd_pg0 - obtain raid phys disk page 0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @page_address: form and handle value used to get page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mpr_config_get_raid_pd_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 page_address) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mpr_command *cm; Mpi2RaidPhysDiskPage0_t *page = NULL; int error = 0; u16 ioc_status; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mpr_request_polled(sc, cm); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mpr_free_command(sc, cm); if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; request->Header.PageNumber = 0; request->Header.PageLength = mpi_reply->Header.PageLength; request->Header.PageVersion = mpi_reply->Header.PageVersion; request->PageAddress = page_address; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mpr_request_polled(sc, cm); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2RaidPhysDiskPage0_t))); out: free(page, M_MPR); if (cm) mpr_free_command(sc, cm); return (error); } Index: head/sys/dev/mpr/mpr_ioctl.h =================================================================== --- head/sys/dev/mpr/mpr_ioctl.h (revision 299264) +++ head/sys/dev/mpr/mpr_ioctl.h (revision 299265) @@ -1,387 +1,387 @@ /*- * Copyright (c) 2008 Yahoo!, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD userland interface * * $FreeBSD$ */ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #ifndef _MPR_IOCTL_H_ #define _MPR_IOCTL_H_ #include #include #include #include /* * For the read header requests, the header should include the page * type or extended page type, page number, and page version. The * buffer and length are unused. The completed header is returned in * the 'header' member. * * For the read page and write page requests, 'buf' should point to a * buffer of 'len' bytes which holds the entire page (including the * header). * * All requests specify the page address in 'page_address'. */ struct mpr_cfg_page_req { MPI2_CONFIG_PAGE_HEADER header; uint32_t page_address; void *buf; int len; uint16_t ioc_status; }; struct mpr_ext_cfg_page_req { MPI2_CONFIG_EXTENDED_PAGE_HEADER header; uint32_t page_address; void *buf; int len; uint16_t ioc_status; }; struct mpr_raid_action { uint8_t action; uint8_t volume_bus; uint8_t volume_id; uint8_t phys_disk_num; uint32_t action_data_word; void *buf; int len; uint32_t volume_status; uint32_t action_data[4]; uint16_t action_status; uint16_t ioc_status; uint8_t write; }; struct mpr_usr_command { void *req; uint32_t req_len; void *rpl; uint32_t rpl_len; void *buf; int len; uint32_t flags; }; typedef struct mpr_pci_bits { union { struct { uint32_t DeviceNumber :5; uint32_t FunctionNumber :3; uint32_t BusNumber :24; } bits; uint32_t AsDWORD; } u; uint32_t PciSegmentId; } mpr_pci_bits_t; /* * The following is the MPRIOCTL_GET_ADAPTER_DATA data structure. This data * structure is setup so that we hopefully are properly aligned for both * 32-bit and 64-bit mode applications. * * Adapter Type - Value = 6 = SCSI Protocol through SAS-3 adapter * * MPI Port Number - The PCI Function number for this device * * PCI Device HW Id - The PCI device number for this device * */ #define MPRIOCTL_ADAPTER_TYPE_SAS3 6 typedef struct mpr_adapter_data { uint32_t StructureLength; uint32_t AdapterType; uint32_t MpiPortNumber; uint32_t PCIDeviceHwId; uint32_t PCIDeviceHwRev; uint32_t SubSystemId; uint32_t SubsystemVendorId; uint32_t Reserved1; uint32_t MpiFirmwareVersion; uint32_t BiosVersion; uint8_t DriverVersion[32]; uint8_t Reserved2; uint8_t ScsiId; uint16_t Reserved3; mpr_pci_bits_t PciInformation; } mpr_adapter_data_t; typedef struct mpr_update_flash { uint64_t PtrBuffer; uint32_t ImageChecksum; uint32_t ImageOffset; uint32_t ImageSize; uint32_t ImageType; } mpr_update_flash_t; #define MPR_PASS_THRU_DIRECTION_NONE 0 #define MPR_PASS_THRU_DIRECTION_READ 1 #define MPR_PASS_THRU_DIRECTION_WRITE 2 #define MPR_PASS_THRU_DIRECTION_BOTH 3 typedef struct mpr_pass_thru { uint64_t PtrRequest; uint64_t PtrReply; uint64_t PtrData; uint32_t RequestSize; uint32_t ReplySize; uint32_t DataSize; uint32_t DataDirection; uint64_t PtrDataOut; uint32_t DataOutSize; uint32_t Timeout; } mpr_pass_thru_t; /* * Event queue defines */ #define MPR_EVENT_QUEUE_SIZE (50) /* Max Events stored in driver */ #define MPR_MAX_EVENT_DATA_LENGTH (48) /* Size of each event in Dwords */ typedef struct mpr_event_query { uint16_t Entries; uint16_t Reserved; uint32_t Types[4]; } mpr_event_query_t; typedef struct mpr_event_enable { uint32_t Types[4]; } mpr_event_enable_t; /* * Event record entry for ioctl. */ typedef struct mpr_event_entry { uint32_t Type; uint32_t Number; uint32_t Data[MPR_MAX_EVENT_DATA_LENGTH]; } mpr_event_entry_t; typedef struct mpr_event_report { uint32_t Size; uint64_t PtrEvents; } mpr_event_report_t; typedef struct mpr_pci_info { uint32_t BusNumber; uint8_t DeviceNumber; uint8_t FunctionNumber; uint16_t InterruptVector; uint8_t PciHeader[256]; } mpr_pci_info_t; typedef struct mpr_diag_action { uint32_t Action; uint32_t Length; uint64_t PtrDiagAction; uint32_t ReturnCode; } mpr_diag_action_t; #define MPR_FW_DIAGNOSTIC_UID_NOT_FOUND (0xFF) #define MPR_FW_DIAG_NEW (0x806E6577) #define MPR_FW_DIAG_TYPE_REGISTER (0x00000001) #define MPR_FW_DIAG_TYPE_UNREGISTER (0x00000002) #define MPR_FW_DIAG_TYPE_QUERY (0x00000003) #define MPR_FW_DIAG_TYPE_READ_BUFFER (0x00000004) #define MPR_FW_DIAG_TYPE_RELEASE (0x00000005) #define MPR_FW_DIAG_INVALID_UID (0x00000000) #define MPR_DIAG_SUCCESS 0 #define MPR_DIAG_FAILURE 1 #define MPR_FW_DIAG_ERROR_SUCCESS (0x00000000) #define MPR_FW_DIAG_ERROR_FAILURE (0x00000001) #define MPR_FW_DIAG_ERROR_INVALID_PARAMETER (0x00000002) #define MPR_FW_DIAG_ERROR_POST_FAILED (0x00000010) #define MPR_FW_DIAG_ERROR_INVALID_UID (0x00000011) #define MPR_FW_DIAG_ERROR_RELEASE_FAILED (0x00000012) #define MPR_FW_DIAG_ERROR_NO_BUFFER (0x00000013) #define MPR_FW_DIAG_ERROR_ALREADY_RELEASED (0x00000014) typedef struct mpr_fw_diag_register { uint8_t ExtendedType; uint8_t BufferType; uint16_t ApplicationFlags; uint32_t DiagnosticFlags; uint32_t ProductSpecific[23]; uint32_t RequestedBufferSize; uint32_t UniqueId; } mpr_fw_diag_register_t; typedef struct mpr_fw_diag_unregister { uint32_t UniqueId; } mpr_fw_diag_unregister_t; #define MPR_FW_DIAG_FLAG_APP_OWNED (0x0001) #define MPR_FW_DIAG_FLAG_BUFFER_VALID (0x0002) #define MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS (0x0004) typedef struct mpr_fw_diag_query { uint8_t ExtendedType; uint8_t BufferType; uint16_t ApplicationFlags; uint32_t DiagnosticFlags; uint32_t ProductSpecific[23]; uint32_t TotalBufferSize; uint32_t DriverAddedBufferSize; uint32_t UniqueId; } mpr_fw_diag_query_t; typedef struct mpr_fw_diag_release { uint32_t UniqueId; } mpr_fw_diag_release_t; #define MPR_FW_DIAG_FLAG_REREGISTER (0x0001) #define MPR_FW_DIAG_FLAG_FORCE_RELEASE (0x0002) typedef struct mpr_diag_read_buffer { uint8_t Status; uint8_t Reserved; uint16_t Flags; uint32_t StartingOffset; uint32_t BytesToRead; uint32_t UniqueId; uint64_t PtrDataBuffer; } mpr_diag_read_buffer_t; /* * Register Access */ #define REG_IO_READ 1 #define REG_IO_WRITE 2 #define REG_MEM_READ 3 #define REG_MEM_WRITE 4 typedef struct mpr_reg_access { uint32_t Command; uint32_t RegOffset; uint32_t RegData; } mpr_reg_access_t; typedef struct mpr_btdh_mapping { uint16_t TargetID; uint16_t Bus; uint16_t DevHandle; uint16_t Reserved; } mpr_btdh_mapping_t; #define MPRIO_MPR_COMMAND_FLAG_VERBOSE 0x01 #define MPRIO_MPR_COMMAND_FLAG_DEBUG 0x02 #define MPRIO_READ_CFG_HEADER _IOWR('M', 200, struct mpr_cfg_page_req) #define MPRIO_READ_CFG_PAGE _IOWR('M', 201, struct mpr_cfg_page_req) #define MPRIO_READ_EXT_CFG_HEADER _IOWR('M', 202, struct mpr_ext_cfg_page_req) #define MPRIO_READ_EXT_CFG_PAGE _IOWR('M', 203, struct mpr_ext_cfg_page_req) #define MPRIO_WRITE_CFG_PAGE _IOWR('M', 204, struct mpr_cfg_page_req) #define MPRIO_RAID_ACTION _IOWR('M', 205, struct mpr_raid_action) #define MPRIO_MPR_COMMAND _IOWR('M', 210, struct mpr_usr_command) #define MPTIOCTL ('I') #define MPTIOCTL_GET_ADAPTER_DATA _IOWR(MPTIOCTL, 1,\ struct mpr_adapter_data) #define MPTIOCTL_UPDATE_FLASH _IOWR(MPTIOCTL, 2,\ struct mpr_update_flash) #define MPTIOCTL_RESET_ADAPTER _IO(MPTIOCTL, 3) #define MPTIOCTL_PASS_THRU _IOWR(MPTIOCTL, 4,\ struct mpr_pass_thru) #define MPTIOCTL_EVENT_QUERY _IOWR(MPTIOCTL, 5,\ struct mpr_event_query) #define MPTIOCTL_EVENT_ENABLE _IOWR(MPTIOCTL, 6,\ struct mpr_event_enable) #define MPTIOCTL_EVENT_REPORT _IOWR(MPTIOCTL, 7,\ struct mpr_event_report) #define MPTIOCTL_GET_PCI_INFO _IOWR(MPTIOCTL, 8,\ struct mpr_pci_info) #define MPTIOCTL_DIAG_ACTION _IOWR(MPTIOCTL, 9,\ struct mpr_diag_action) #define MPTIOCTL_REG_ACCESS _IOWR(MPTIOCTL, 10,\ struct mpr_reg_access) #define MPTIOCTL_BTDH_MAPPING _IOWR(MPTIOCTL, 11,\ struct mpr_btdh_mapping) #endif /* !_MPR_IOCTL_H_ */ Index: head/sys/dev/mpr/mpr_mapping.c =================================================================== --- head/sys/dev/mpr/mpr_mapping.c (revision 299264) +++ head/sys/dev/mpr/mpr_mapping.c (revision 299265) @@ -1,2267 +1,2267 @@ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD */ #include __FBSDID("$FreeBSD$"); /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * _mapping_clear_entry - Clear a particular mapping entry. * @map_entry: map table entry * * Returns nothing. */ static inline void _mapping_clear_map_entry(struct dev_mapping_table *map_entry) { map_entry->physical_id = 0; map_entry->device_info = 0; map_entry->phy_bits = 0; map_entry->dpm_entry_num = MPR_DPM_BAD_IDX; map_entry->dev_handle = 0; map_entry->channel = -1; map_entry->id = -1; map_entry->missing_count = 0; map_entry->init_complete = 0; map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /** * _mapping_clear_enc_entry - Clear a particular enclosure table entry. * @enc_entry: enclosure table entry * * Returns nothing. */ static inline void _mapping_clear_enc_entry(struct enc_mapping_table *enc_entry) { enc_entry->enclosure_id = 0; enc_entry->start_index = MPR_MAPTABLE_BAD_IDX; enc_entry->phy_bits = 0; enc_entry->dpm_entry_num = MPR_DPM_BAD_IDX; enc_entry->enc_handle = 0; enc_entry->num_slots = 0; enc_entry->start_slot = 0; enc_entry->missing_count = 0; enc_entry->removal_flag = 0; enc_entry->skip_search = 0; enc_entry->init_complete = 0; } /** * _mapping_commit_enc_entry - write a particular enc entry in DPM page0. * @sc: per adapter object * @enc_entry: enclosure table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_enc_entry(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { Mpi2DriverMap0Entry_t *dpm_entry; struct dev_mapping_table *mt_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *) sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += et_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = ( 0xFFFFFFFF & et_entry->enclosure_id); dpm_entry->PhysicalIdentifier.High = ( et_entry->enclosure_id >> 32); mt_entry = &sc->mapping_table[et_entry->start_index]; dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; dpm_entry->MappingInformation |= et_entry->missing_count; dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation); dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits); dpm_entry->Reserved1 = 0; memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, et_entry->dpm_entry_num)) { printf("%s: write of dpm entry %d for enclosure failed\n", __func__, et_entry->dpm_entry_num); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return 0; } /** * _mapping_commit_map_entry - write a particular map table entry in DPM page0. * @sc: per adapter object * @enc_entry: enclosure table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_map_entry(struct mpr_softc *sc, struct dev_mapping_table *mt_entry) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = dpm_entry + mt_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = htole16(mt_entry->missing_count); dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, mt_entry->dpm_entry_num)) { printf("%s: write of dpm entry %d for device failed\n", __func__, mt_entry->dpm_entry_num); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return 0; } /** * _mapping_get_ir_maprange - get start and end index for IR map range. * @sc: per adapter object * @start_idx: place holder for start index * @end_idx: place holder for end index * * The IR volumes can be mapped either at start or end of the mapping table * this function gets the detail of where IR volume mapping starts and ends * in the device mapping table * * Returns nothing. */ static void _mapping_get_ir_maprange(struct mpr_softc *sc, u32 *start_idx, u32 *end_idx) { u16 volume_mapping_flags; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { *start_idx = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) *start_idx = 1; } else *start_idx = sc->max_devices - sc->max_volumes; *end_idx = *start_idx + sc->max_volumes - 1; } /** * _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID * @sc: per adapter object * @enc_id: enclosure logical identifier * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_id(struct mpr_softc *sc, u64 enc_id, u64 phy_bits) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->enclosure_id == le64toh(enc_id)) && (!et_entry->phy_bits || (et_entry->phy_bits & le32toh(phy_bits)))) return enc_idx; } return MPR_ENCTABLE_BAD_IDX; } /** * _mapping_get_enc_idx_from_handle - get enclosure index from handle * @sc: per adapter object * @enc_id: enclosure handle * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_handle(struct mpr_softc *sc, u16 handle) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->missing_count) continue; if (et_entry->enc_handle == handle) return enc_idx; } return MPR_ENCTABLE_BAD_IDX; } /** * _mapping_get_high_missing_et_idx - get missing enclosure index * @sc: per adapter object * * Search through the enclosure table and identifies the enclosure entry * with high missing count and returns it's index * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_high_missing_et_idx(struct mpr_softc *sc) { struct enc_mapping_table *et_entry; u8 high_missing_count = 0; u8 enc_idx, high_idx = MPR_ENCTABLE_BAD_IDX; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->missing_count > high_missing_count) && !et_entry->skip_search) { high_missing_count = et_entry->missing_count; high_idx = enc_idx; } } return high_idx; } /** * _mapping_get_high_missing_mt_idx - get missing map table index * @sc: per adapter object * * Search through the map table and identifies the device entry * with high missing count and returns it's index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_high_missing_mt_idx(struct mpr_softc *sc) { u32 map_idx, high_idx = MPR_ENCTABLE_BAD_IDX; u8 high_missing_count = 0; u32 start_idx, end_idx, start_idx_ir, end_idx_ir; struct dev_mapping_table *mt_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); start_idx = 0; start_idx_ir = 0; end_idx_ir = 0; end_idx = sc->max_devices; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) start_idx = 1; if (sc->ir_firmware) { _mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir); if (start_idx == start_idx_ir) start_idx = end_idx_ir + 1; else end_idx = start_idx_ir; } mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) { if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } return high_idx; } /** * _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID * @sc: per adapter object * @wwid: world wide unique ID of the volume * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_wwid(struct mpr_softc *sc, u64 wwid) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->physical_id == wwid) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_id - get map table index from a device ID * @sc: per adapter object * @dev_id: device identifer (SAS Address) * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_id(struct mpr_softc *sc, u64 dev_id) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->physical_id == dev_id) return map_idx; } return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle * @sc: per adapter object * @wwid: volume device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_handle(struct mpr_softc *sc, u16 volHandle) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->dev_handle == volHandle) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_handle - get map table index from handle * @sc: per adapter object * @dev_id: device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_handle(struct mpr_softc *sc, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle) return map_idx; } return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_free_ir_mt_idx - get first free index for a volume * @sc: per adapter object * * Search through mapping table for free index for a volume and if no free * index then looks for a volume with high mapping index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_ir_mt_idx(struct mpr_softc *sc) { u8 high_missing_count = 0; u32 start_idx, end_idx, map_idx; u32 high_idx = MPR_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (!(mt_entry->device_info & MPR_MAP_IN_USE)) return map_idx; mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) { if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } return high_idx; } /** * _mapping_get_free_mt_idx - get first free index for a device * @sc: per adapter object * @start_idx: offset in the table to start search * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_mt_idx(struct mpr_softc *sc, u32 start_idx) { u32 map_idx, max_idx = sc->max_devices; struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx]; u16 volume_mapping_flags; volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) max_idx -= sc->max_volumes; for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++) if (!(mt_entry->device_info & (MPR_MAP_IN_USE | MPR_DEV_RESERVED))) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_dpm_idx_from_id - get DPM index from ID * @sc: per adapter object * @id: volume WWID or enclosure ID or device ID * * Returns the index of DPM entry on success or bad index. */ static u16 _mapping_get_dpm_idx_from_id(struct mpr_softc *sc, u64 id, u32 phy_bits) { u16 entry_num; uint64_t PhysicalIdentifier; Mpi2DriverMap0Entry_t *dpm_entry; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High; PhysicalIdentifier = (PhysicalIdentifier << 32) | dpm_entry->PhysicalIdentifier.Low; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) if ((id == PhysicalIdentifier) && (!phy_bits || !dpm_entry->PhysicalBitsMapping || (phy_bits & dpm_entry->PhysicalBitsMapping))) return entry_num; return MPR_DPM_BAD_IDX; } /** * _mapping_get_free_dpm_idx - get first available DPM index * @sc: per adapter object * * Returns the index of DPM entry on success or bad index. */ static u32 _mapping_get_free_dpm_idx(struct mpr_softc *sc) { u16 entry_num; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { if (!sc->dpm_entry_used[entry_num]) return entry_num; } return MPR_DPM_BAD_IDX; } /** * _mapping_update_ir_missing_cnt - Updates missing count for a volume * @sc: per adapter object * @map_idx: map table index of the volume * @element: IR configuration change element * @wwid: IR volume ID. * * Updates the missing count in the map table and in the DPM entry for a volume * * Returns nothing. */ static void _mapping_update_ir_missing_cnt(struct mpr_softc *sc, u32 map_idx, Mpi2EventIrConfigElement_t *element, u64 wwid) { struct dev_mapping_table *mt_entry; u8 missing_cnt, reason = element->ReasonCode; u16 dpm_idx; Mpi2DriverMap0Entry_t *dpm_entry; if (!sc->is_dpm_enable) return; mt_entry = &sc->mapping_table[map_idx]; if (reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) { mt_entry->missing_count = 0; } else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) { mt_entry->missing_count = 0; mt_entry->init_complete = 0; } else if ((reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED) || (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)) { if (!mt_entry->init_complete) { if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) mt_entry->missing_count++; else mt_entry->init_complete = 1; } if (!mt_entry->missing_count) mt_entry->missing_count++; mt_entry->dev_handle = 0; } dpm_idx = mt_entry->dpm_entry_num; if (dpm_idx == MPR_DPM_BAD_IDX) { if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) || (reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED)) dpm_idx = _mapping_get_dpm_idx_from_id(sc, mt_entry->physical_id, 0); else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) return; } if (dpm_idx != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; if ((mt_entry->physical_id == le64toh((u64)dpm_entry->PhysicalIdentifier.High | dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt == mt_entry->missing_count)) mt_entry->init_complete = 1; } else { dpm_idx = _mapping_get_free_dpm_idx(sc); mt_entry->init_complete = 0; } if ((dpm_idx != MPR_DPM_BAD_IDX) && !mt_entry->init_complete) { mt_entry->init_complete = 1; mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = map_idx; dpm_entry->MappingInformation = mt_entry->missing_count; dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; sc->dpm_flush_entry[dpm_idx] = 1; sc->dpm_entry_used[dpm_idx] = 1; } else if (dpm_idx == MPR_DPM_BAD_IDX) { printf("%s: no space to add entry in DPM table\n", __func__); mt_entry->init_complete = 1; } } /** * _mapping_add_to_removal_table - mark an entry for removal * @sc: per adapter object * @handle: Handle of enclosures/device/volume * * Adds the handle or DPM entry number in removal table. * * Returns nothing. */ static void _mapping_add_to_removal_table(struct mpr_softc *sc, u16 handle, u16 dpm_idx) { struct map_removal_table *remove_entry; u32 i; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); remove_entry = sc->removal_table; for (i = 0; i < sc->max_devices; i++, remove_entry++) { if (remove_entry->dev_handle || remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) continue; if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { if (dpm_idx) remove_entry->dpm_entry_num = dpm_idx; if (remove_entry->dpm_entry_num == MPR_DPM_BAD_IDX) remove_entry->dev_handle = handle; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) remove_entry->dev_handle = handle; break; } } /** * _mapping_update_missing_count - Update missing count for a device * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change list and if any device is found not * responding it's associated map table entry and DPM entry is updated * * Returns nothing. */ static void _mapping_update_missing_count(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u8 entry; struct _map_phy_change *phy_change; u32 map_idx; struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (!phy_change->dev_handle || (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change-> dev_handle); phy_change->is_processed = 1; if (map_idx == MPR_MAPTABLE_BAD_IDX) { printf("%s: device is already removed from mapping " "table\n", __func__); continue; } mt_entry = &sc->mapping_table[map_idx]; if (!mt_entry->init_complete) { if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) mt_entry->missing_count++; else mt_entry->init_complete = 1; } if (!mt_entry->missing_count) mt_entry->missing_count++; _mapping_add_to_removal_table(sc, mt_entry->dev_handle, 0); mt_entry->dev_handle = 0; if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) && sc->is_dpm_enable && !mt_entry->init_complete && mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += mt_entry->dpm_entry_num; dpm_entry->MappingInformation = mt_entry->missing_count; sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1; } mt_entry->init_complete = 1; } } /** * _mapping_find_enc_map_space -find map table entries for enclosure * @sc: per adapter object * @et_entry: enclosure entry * * Search through the mapping table defragment it and provide contiguous * space in map table for a particular enclosure entry * * Returns start index in map table or bad index. */ static u32 _mapping_find_enc_map_space(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { u16 vol_mapping_flags; u32 skip_count, end_of_table, map_idx, enc_idx; u16 num_found; u32 start_idx = MPR_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; struct enc_mapping_table *enc_entry; unsigned char done_flag = 0, found_space; u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); skip_count = sc->num_rsvd_entries; num_found = 0; vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (!sc->ir_firmware) end_of_table = sc->max_devices; else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) end_of_table = sc->max_devices; else end_of_table = sc->max_devices - sc->max_volumes; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if ((et_entry->enclosure_id == mt_entry->physical_id) && (!mt_entry->phy_bits || (mt_entry->phy_bits & et_entry->phy_bits))) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; return start_idx; } } else num_found = 0; } for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPR_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; return start_idx; } } else num_found = 0; } while (!done_flag) { enc_idx = _mapping_get_high_missing_et_idx(sc); if (enc_idx == MPR_ENCTABLE_BAD_IDX) return MPR_MAPTABLE_BAD_IDX; enc_entry = &sc->enclosure_table[enc_idx]; /*VSP FIXME*/ enc_entry->skip_search = 1; mt_entry = &sc->mapping_table[enc_entry->start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info &= ~MPR_DEV_RESERVED; found_space = 0; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPR_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; found_space = 1; } } else num_found = 0; } if (!found_space) continue; for (map_idx = start_idx; map_idx < (start_idx + num_found); map_idx++) { enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (map_idx < enc_entry->start_index || map_idx > (enc_entry->start_index + enc_entry->num_slots)) continue; if (!enc_entry->removal_flag) { enc_entry->removal_flag = 1; _mapping_add_to_removal_table(sc, 0, enc_entry->dpm_entry_num); } mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->device_info & MPR_MAP_IN_USE) { _mapping_add_to_removal_table(sc, mt_entry->dev_handle, 0); _mapping_clear_map_entry(mt_entry); } if (map_idx == (enc_entry->start_index + enc_entry->num_slots - 1)) _mapping_clear_enc_entry(et_entry); } } enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (!enc_entry->removal_flag) { mt_entry = &sc->mapping_table[enc_entry-> start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info |= MPR_DEV_RESERVED; et_entry->skip_search = 0; } } done_flag = 1; } return start_idx; } /** * _mapping_get_dev_info -get information about newly added devices * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and issues sas device pg0 * requests for the newly added device and reserved entries in tables * * Returns nothing */ static void _mapping_get_dev_info(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u8 entry, enc_idx, phy_idx, sata_end_device; u32 map_idx, index, device_info; struct _map_phy_change *phy_change, *tmp_phy_change; uint64_t sas_address; struct enc_mapping_table *et_entry; struct dev_mapping_table *mt_entry; u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; int rc = 1; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed || !phy_change->dev_handle || phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) continue; if (mpr_config_get_sas_device_pg0(sc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, phy_change->dev_handle)) { phy_change->is_processed = 1; continue; } /* * Always get SATA Identify information because this is used * to determine if Start/Stop Unit should be sent to the drive * when the system is shutdown. */ device_info = le32toh(sas_device_pg0.DeviceInfo); sas_address = sas_device_pg0.SASAddress.High; sas_address = (sas_address << 32) | sas_device_pg0.SASAddress.Low; sata_end_device = 0; if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) { sata_end_device = 1; rc = mprsas_get_sas_address_for_sata_disk(sc, &sas_address, phy_change->dev_handle, device_info, &phy_change->is_SATA_SSD); if (rc) { mpr_dprint(sc, MPR_ERROR, "%s: failed to get " "disk type (SSD or HDD) and SAS Address " "for SATA device with handle 0x%04x\n", __func__, phy_change->dev_handle); } else { mpr_dprint(sc, MPR_INFO, "SAS Address for SATA " "device = %jx\n", sas_address); } } phy_change->physical_id = sas_address; phy_change->slot = le16toh(sas_device_pg0.Slot); phy_change->device_info = le32toh(sas_device_pg0.DeviceInfo); if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { enc_idx = _mapping_get_enc_idx_from_handle(sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_MAPPING, "%s: failed to add " "the device with handle 0x%04x because the " "enclosure is not in the mapping table\n", __func__, phy_change->dev_handle); continue; } if (!((phy_change->device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (phy_change->device_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET | MPI2_SAS_DEVICE_INFO_STP_TARGET | MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) { phy_change->is_processed = 1; continue; } et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX) continue; if (!topo_change->exp_handle) { map_idx = sc->num_rsvd_entries; et_entry->start_index = map_idx; } else { map_idx = _mapping_find_enc_map_space(sc, et_entry); et_entry->start_index = map_idx; if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { phy_change->is_processed = 1; for (phy_idx = 0; phy_idx < topo_change->num_entries; phy_idx++) { tmp_phy_change = &topo_change->phy_details [phy_idx]; if (tmp_phy_change->reason == add_code) tmp_phy_change-> is_processed = 1; } break; } } mt_entry = &sc->mapping_table[map_idx]; for (index = map_idx; index < (et_entry->num_slots + map_idx); index++, mt_entry++) { mt_entry->device_info = MPR_DEV_RESERVED; mt_entry->physical_id = et_entry->enclosure_id; mt_entry->phy_bits = et_entry->phy_bits; } } } } /** * _mapping_set_mid_to_eid -set map table data from enclosure table * @sc: per adapter object * @et_entry: enclosure entry * * Returns nothing */ static inline void _mapping_set_mid_to_eid(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { struct dev_mapping_table *mt_entry; u16 slots = et_entry->num_slots, map_idx; u32 start_idx = et_entry->start_index; if (start_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[start_idx]; for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++) mt_entry->physical_id = et_entry->enclosure_id; } } /** * _mapping_clear_removed_entries - mark the entries to be cleared * @sc: per adapter object * * Search through the removal table and mark the entries which needs to be * flushed to DPM and also updates the map table and enclosure table by * clearing the corresponding entries. * * Returns nothing */ static void _mapping_clear_removed_entries(struct mpr_softc *sc) { u32 remove_idx; struct map_removal_table *remove_entry; Mpi2DriverMap0Entry_t *dpm_entry; u8 done_flag = 0, num_entries, m, i; struct enc_mapping_table *et_entry, *from, *to; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); if (sc->is_dpm_enable) { remove_entry = sc->removal_table; for (remove_idx = 0; remove_idx < sc->max_devices; remove_idx++, remove_entry++) { if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += remove_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = 0; dpm_entry->PhysicalIdentifier.High = 0; dpm_entry->DeviceIndex = 0; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_flush_entry[remove_entry-> dpm_entry_num] = 1; sc->dpm_entry_used[remove_entry->dpm_entry_num] = 0; remove_entry->dpm_entry_num = MPR_DPM_BAD_IDX; } } } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { num_entries = sc->num_enc_table_entries; while (!done_flag) { done_flag = 1; et_entry = sc->enclosure_table; for (i = 0; i < num_entries; i++, et_entry++) { if (!et_entry->enc_handle && et_entry-> init_complete) { done_flag = 0; if (i != (num_entries - 1)) { from = &sc->enclosure_table [i+1]; to = &sc->enclosure_table[i]; for (m = i; m < (num_entries - 1); m++, from++, to++) { _mapping_set_mid_to_eid (sc, to); *to = *from; } _mapping_clear_enc_entry(to); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } else { _mapping_clear_enc_entry (et_entry); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } } } } } } /** * _mapping_add_new_device -Add the new device into mapping table * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and updates map table, * enclosure table and DPM pages for for the newly added devices. * * Returns nothing */ static void _mapping_add_new_device(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u8 enc_idx, missing_cnt, is_removed = 0; u16 dpm_idx; u32 search_idx, map_idx; u32 entry; struct dev_mapping_table *mt_entry; struct enc_mapping_table *et_entry; struct _map_phy_change *phy_change; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2DriverMap0Entry_t *dpm_entry; uint64_t temp64_var; u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed) continue; if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED || !phy_change->dev_handle) { phy_change->is_processed = 1; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { enc_idx = _mapping_get_enc_idx_from_handle (sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; printf("%s: failed to add the device with " "handle 0x%04x because the enclosure is " "not in the mapping table\n", __func__, phy_change->dev_handle); continue; } et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { phy_change->is_processed = 1; if (!sc->mt_full_retry) { sc->mt_add_device_failed = 1; continue; } printf("%s: failed to add the device with " "handle 0x%04x because there is no free " "space available in the mapping table\n", __func__, phy_change->dev_handle); continue; } map_idx = et_entry->start_index + phy_change->slot - et_entry->start_slot; mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->channel = 0; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->dpm_entry_num = et_entry->dpm_entry_num; mt_entry->device_info = phy_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); if (sc->is_dpm_enable) { dpm_idx = et_entry->dpm_entry_num; if (dpm_idx == MPR_DPM_BAD_IDX) dpm_idx = _mapping_get_dpm_idx_from_id (sc, et_entry->enclosure_id, et_entry->phy_bits); if (dpm_idx == MPR_DPM_BAD_IDX) { dpm_idx = _mapping_get_free_dpm_idx(sc); if (dpm_idx != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry-> PhysicalIdentifier.Low = (0xFFFFFFFF & et_entry->enclosure_id); dpm_entry-> PhysicalIdentifier.High = ( et_entry->enclosure_id >> 32); dpm_entry->DeviceIndex = (U16)et_entry->start_index; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; et_entry->dpm_entry_num = dpm_idx; /* FIXME Do I need to set the dpm_idxin mt_entry too */ sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else { phy_change->is_processed = 1; mpr_dprint(sc, MPR_INFO, "%s: " "failed to add the device " "with handle 0x%04x to " "persistent table because " "there is no free space " "available\n", __func__, phy_change->dev_handle); } } else { et_entry->dpm_entry_num = dpm_idx; mt_entry->dpm_entry_num = dpm_idx; } } /* FIXME Why not mt_entry too? */ et_entry->init_complete = 1; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { map_idx = _mapping_get_mt_idx_from_id (sc, phy_change->physical_id); if (map_idx == MPR_MAPTABLE_BAD_IDX) { search_idx = sc->num_rsvd_entries; if (topo_change->exp_handle) search_idx += max_num_phy_ids; map_idx = _mapping_get_free_mt_idx(sc, search_idx); } if (map_idx == MPR_MAPTABLE_BAD_IDX) { map_idx = _mapping_get_high_missing_mt_idx(sc); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle) { _mapping_add_to_removal_table (sc, mt_entry->dev_handle, 0); is_removed = 1; } mt_entry->init_complete = 0; } } if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->channel = 0; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->device_info = phy_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); } else { phy_change->is_processed = 1; if (!sc->mt_full_retry) { sc->mt_add_device_failed = 1; continue; } printf("%s: failed to add the device with " "handle 0x%04x because there is no free " "space available in the mapping table\n", __func__, phy_change->dev_handle); continue; } if (sc->is_dpm_enable) { if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_idx = mt_entry->dpm_entry_num; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; missing_cnt = dpm_entry-> MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; temp64_var = dpm_entry-> PhysicalIdentifier.High; temp64_var = (temp64_var << 32) | dpm_entry->PhysicalIdentifier.Low; if ((mt_entry->physical_id == temp64_var) && !missing_cnt) mt_entry->init_complete = 1; } else { dpm_idx = _mapping_get_free_dpm_idx(sc); mt_entry->init_complete = 0; } if (dpm_idx != MPR_DPM_BAD_IDX && !mt_entry->init_complete) { mt_entry->init_complete = 1; mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = (U16) map_idx; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else if (dpm_idx == MPR_DPM_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_INFO, "%s: " "failed to add the device " "with handle 0x%04x to " "persistent table because " "there is no free space " "available\n", __func__, phy_change->dev_handle); } } mt_entry->init_complete = 1; } phy_change->is_processed = 1; } if (is_removed) _mapping_clear_removed_entries(sc); } /** * _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM * @sc: per adapter object * * Returns nothing */ static void _mapping_flush_dpm_pages(struct mpr_softc *sc) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; u16 entry_num; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { if (!sc->dpm_flush_entry[entry_num]) continue; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += entry_num; dpm_entry->MappingInformation = htole16(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = htole32(dpm_entry-> PhysicalBitsMapping); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); /* TODO-How to handle failed writes? */ if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, entry_num)) { printf("%s: write of dpm entry %d for device failed\n", __func__, entry_num); } else sc->dpm_flush_entry[entry_num] = 0; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry-> PhysicalBitsMapping); } } /** * _mapping_allocate_memory- allocates the memory required for mapping tables * @sc: per adapter object * * Allocates the memory for all the tables required for host mapping * * Return 0 on success or non-zero on failure. */ int mpr_mapping_allocate_memory(struct mpr_softc *sc) { uint32_t dpm_pg0_sz; sc->mapping_table = malloc((sizeof(struct dev_mapping_table) * sc->max_devices), M_MPR, M_ZERO|M_NOWAIT); if (!sc->mapping_table) goto free_resources; sc->removal_table = malloc((sizeof(struct map_removal_table) * sc->max_devices), M_MPR, M_ZERO|M_NOWAIT); if (!sc->removal_table) goto free_resources; sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) * sc->max_enclosures), M_MPR, M_ZERO|M_NOWAIT); if (!sc->enclosure_table) goto free_resources; sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries), M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_entry_used) goto free_resources; sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries), M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_flush_entry) goto free_resources; dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_pg0) { printf("%s: memory alloc failed for dpm page; disabling dpm\n", __func__); sc->is_dpm_enable = 0; } return 0; free_resources: free(sc->mapping_table, M_MPR); free(sc->removal_table, M_MPR); free(sc->enclosure_table, M_MPR); free(sc->dpm_entry_used, M_MPR); free(sc->dpm_flush_entry, M_MPR); free(sc->dpm_pg0, M_MPR); printf("%s: device initialization failed due to failure in mapping " "table memory allocation\n", __func__); return -1; } /** * mpr_mapping_free_memory- frees the memory allocated for mapping tables * @sc: per adapter object * * Returns nothing. */ void mpr_mapping_free_memory(struct mpr_softc *sc) { free(sc->mapping_table, M_MPR); free(sc->removal_table, M_MPR); free(sc->enclosure_table, M_MPR); free(sc->dpm_entry_used, M_MPR); free(sc->dpm_flush_entry, M_MPR); free(sc->dpm_pg0, M_MPR); } static void _mapping_process_dpm_pg0(struct mpr_softc *sc) { u8 missing_cnt, enc_idx; u16 slot_id, entry_num, num_slots; u32 map_idx, dev_idx, start_idx, end_idx; struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); struct enc_mapping_table *et_entry; u64 physical_id; u32 phy_bits = 0; if (sc->ir_firmware) _mapping_get_ir_maprange(sc, &start_idx, &end_idx); dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) { physical_id = dpm_entry->PhysicalIdentifier.High; physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; if (!physical_id) { sc->dpm_entry_used[entry_num] = 0; continue; } sc->dpm_entry_used[entry_num] = 1; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; dev_idx = le16toh(dpm_entry->DeviceIndex); phy_bits = le32toh(dpm_entry->PhysicalBitsMapping); if (sc->ir_firmware && (dev_idx >= start_idx) && (dev_idx <= end_idx)) { mt_entry = &sc->mapping_table[dev_idx]; mt_entry->physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (mt_entry->physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->channel = MPR_RAID_CHANNEL; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPR_DEV_RESERVED; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { if (dev_idx < (sc->num_rsvd_entries + max_num_phy_ids)) { slot_id = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1) slot_id = 1; num_slots = max_num_phy_ids; } else { slot_id = 0; num_slots = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_SLOT_MASK; num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; } enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { printf("%s: enclosure entries exceed max " "enclosures of %d\n", __func__, sc->max_enclosures); break; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; physical_id = dpm_entry->PhysicalIdentifier.High; et_entry->enclosure_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; et_entry->start_index = dev_idx; et_entry->dpm_entry_num = entry_num; et_entry->num_slots = num_slots; et_entry->start_slot = slot_id; et_entry->missing_count = missing_cnt; et_entry->phy_bits = phy_bits; mt_entry = &sc->mapping_table[dev_idx]; for (map_idx = dev_idx; map_idx < (dev_idx + num_slots); map_idx++, mt_entry++) { if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { printf("%s: conflict in mapping table " "for enclosure %d\n", __func__, enc_idx); break; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->channel = 0; mt_entry->id = dev_idx; mt_entry->dpm_entry_num = entry_num; mt_entry->missing_count = missing_cnt; mt_entry->device_info = MPR_DEV_RESERVED; } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { map_idx = dev_idx; mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { printf("%s: conflict in mapping table for " "device %d\n", __func__, map_idx); break; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->channel = 0; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPR_DEV_RESERVED; } } /*close the loop for DPM table */ } /* * mpr_mapping_check_devices - start of the day check for device availabilty * @sc: per adapter object * @sleep_flag: Flag indicating whether this function can sleep or not * * Returns nothing. */ void mpr_mapping_check_devices(struct mpr_softc *sc, int sleep_flag) { u32 i; /* u32 cntdn, i; u32 timeout = 60;*/ struct dev_mapping_table *mt_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); struct enc_mapping_table *et_entry; u32 start_idx, end_idx; /* We need to ucomment this when this function is called * from the port enable complete */ #if 0 sc->track_mapping_events = 0; cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; do { if (!sc->pending_map_events) break; if (sleep_flag == CAN_SLEEP) pause("mpr_pause", (hz/1000));/* 1msec sleep */ else DELAY(500); /* 500 useconds delay */ } while (--cntdn); if (!cntdn) printf("%s: there are %d" " pending events after %d seconds of delay\n", __func__, sc->pending_map_events, timeout); #endif sc->pending_map_events = 0; if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { et_entry = sc->enclosure_table; for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) { if (!et_entry->init_complete) { if (et_entry->missing_count < MPR_MAX_MISSING_COUNT) { et_entry->missing_count++; if (et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) _mapping_commit_enc_entry(sc, et_entry); } et_entry->init_complete = 1; } } if (!sc->ir_firmware) return; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) { if (mt_entry->device_info & MPR_DEV_RESERVED && !mt_entry->physical_id) mt_entry->init_complete = 1; else if (mt_entry->device_info & MPR_DEV_RESERVED) { if (!mt_entry->init_complete) { if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) { mt_entry->missing_count++; if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) _mapping_commit_map_entry(sc, mt_entry); } mt_entry->init_complete = 1; } } } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { mt_entry = sc->mapping_table; for (i = 0; i < sc->max_devices; i++, mt_entry++) { if (mt_entry->device_info & MPR_DEV_RESERVED && !mt_entry->physical_id) mt_entry->init_complete = 1; else if (mt_entry->device_info & MPR_DEV_RESERVED) { if (!mt_entry->init_complete) { if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) { mt_entry->missing_count++; if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) _mapping_commit_map_entry(sc, mt_entry); } mt_entry->init_complete = 1; } } } } } /** * mpr_mapping_is_reinit_required - check whether event replay required * @sc: per adapter object * * Checks the per ioc flags and decide whether reinit of events required * * Returns 1 for reinit of ioc 0 for not. */ int mpr_mapping_is_reinit_required(struct mpr_softc *sc) { if (!sc->mt_full_retry && sc->mt_add_device_failed) { sc->mt_full_retry = 1; sc->mt_add_device_failed = 0; _mapping_flush_dpm_pages(sc); return 1; } sc->mt_full_retry = 1; return 0; } /** * mpr_mapping_initialize - initialize mapping tables * @sc: per adapter object * * Read controller persitant mapping tables into internal data area. * * Return 0 for success or non-zero for failure. */ int mpr_mapping_initialize(struct mpr_softc *sc) { uint16_t volume_mapping_flags, dpm_pg0_sz; uint32_t i; Mpi2ConfigReply_t mpi_reply; int error; uint8_t retry_count; uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); /* The additional 1 accounts for the virtual enclosure * created for the controller */ sc->max_enclosures = sc->facts->MaxEnclosures + 1; sc->max_expanders = sc->facts->MaxSasExpanders; sc->max_volumes = sc->facts->MaxVolumes; sc->max_devices = sc->facts->MaxTargets + sc->max_volumes; sc->pending_map_events = 0; sc->num_enc_table_entries = 0; sc->num_rsvd_entries = 0; sc->num_channels = 1; sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries; sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0; sc->track_mapping_events = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING) sc->is_dpm_enable = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) sc->num_rsvd_entries = 1; volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)) sc->num_rsvd_entries += sc->max_volumes; error = mpr_mapping_allocate_memory(sc); if (error) return (error); for (i = 0; i < sc->max_devices; i++) _mapping_clear_map_entry(sc->mapping_table + i); for (i = 0; i < sc->max_enclosures; i++) _mapping_clear_enc_entry(sc->enclosure_table + i); for (i = 0; i < sc->max_devices; i++) { sc->removal_table[i].dev_handle = 0; sc->removal_table[i].dpm_entry_num = MPR_DPM_BAD_IDX; } memset(sc->dpm_entry_used, 0, sc->max_dpm_entries); memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries); if (sc->is_dpm_enable) { dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); retry_count = 0; retry_read_dpm: if (mpr_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0, dpm_pg0_sz)) { printf("%s: dpm page read failed; disabling dpm\n", __func__); if (retry_count < 3) { retry_count++; goto retry_read_dpm; } sc->is_dpm_enable = 0; } } if (sc->is_dpm_enable) _mapping_process_dpm_pg0(sc); sc->track_mapping_events = 1; return 0; } /** * mpr_mapping_exit - clear mapping table and associated memory * @sc: per adapter object * * Returns nothing. */ void mpr_mapping_exit(struct mpr_softc *sc) { _mapping_flush_dpm_pages(sc); mpr_mapping_free_memory(sc); } /** * mpr_mapping_get_sas_id - assign a target id for sas device * @sc: per adapter object * @sas_address: sas address of the device * @handle: device handle * * Returns valid ID on success or BAD_ID. */ unsigned int mpr_mapping_get_sas_id(struct mpr_softc *sc, uint64_t sas_address, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle && mt_entry->physical_id == sas_address) return mt_entry->id; } return MPR_MAP_BAD_ID; } /** * mpr_mapping_get_sas_id_from_handle - find a target id in mapping table using * only the dev handle. This is just a wrapper function for the local function * _mapping_get_mt_idx_from_handle. * @sc: per adapter object * @handle: device handle * * Returns valid ID on success or BAD_ID. */ unsigned int mpr_mapping_get_sas_id_from_handle(struct mpr_softc *sc, u16 handle) { return (_mapping_get_mt_idx_from_handle(sc, handle)); } /** * mpr_mapping_get_raid_id - assign a target id for raid device * @sc: per adapter object * @wwid: world wide identifier for raid volume * @handle: device handle * * Returns valid ID on success or BAD_ID. */ unsigned int mpr_mapping_get_raid_id(struct mpr_softc *sc, u64 wwid, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle && mt_entry->physical_id == wwid) return mt_entry->id; } return MPR_MAP_BAD_ID; } /** * mpr_mapping_get_raid_id_from_handle - find raid device in mapping table * using only the volume dev handle. This is just a wrapper function for the * local function _mapping_get_ir_mt_idx_from_handle. * @sc: per adapter object * @volHandle: volume device handle * * Returns valid ID on success or BAD_ID. */ unsigned int mpr_mapping_get_raid_id_from_handle(struct mpr_softc *sc, u16 volHandle) { return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle)); } /** * mpr_mapping_enclosure_dev_status_change_event - handle enclosure events * @sc: per adapter object * @event_data: event data payload * * Return nothing. */ void mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc, Mpi2EventDataSasEnclDevStatusChange_t *event_data) { u8 enc_idx, missing_count; struct enc_mapping_table *et_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 update_phy_bits = 0; u32 saved_phy_bits; uint64_t temp64_var; if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) != MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) goto out; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) { if (!event_data->NumSlots) { printf("%s: enclosure with handle = 0x%x reported 0 " "slots\n", __func__, le16toh(event_data->EnclosureHandle)); goto out; } temp64_var = event_data->EnclosureLogicalID.High; temp64_var = (temp64_var << 32) | event_data->EnclosureLogicalID.Low; enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var, event_data->PhyBits); if (enc_idx != MPR_ENCTABLE_BAD_IDX) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->init_complete && !et_entry->missing_count) { printf("%s: enclosure %d is already present " "with handle = 0x%x\n",__func__, enc_idx, et_entry->enc_handle); goto out; } et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->start_slot = le16toh(event_data->StartSlot); saved_phy_bits = et_entry->phy_bits; et_entry->phy_bits |= le32toh(event_data->PhyBits); if (saved_phy_bits != et_entry->phy_bits) update_phy_bits = 1; if (et_entry->missing_count || update_phy_bits) { et_entry->missing_count = 0; if (sc->is_dpm_enable && et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; missing_count = (u8)(dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK); if (!et_entry->init_complete && ( missing_count || update_phy_bits)) { dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; sc->dpm_flush_entry[et_entry-> dpm_entry_num] = 1; } } } } else { enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { printf("%s: enclosure can not be added; " "mapping table is full\n", __func__); goto out; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->enclosure_id = event_data-> EnclosureLogicalID.High; et_entry->enclosure_id = ( et_entry->enclosure_id << 32) | event_data->EnclosureLogicalID.Low; et_entry->start_index = MPR_MAPTABLE_BAD_IDX; et_entry->dpm_entry_num = MPR_DPM_BAD_IDX; et_entry->num_slots = le16toh(event_data->NumSlots); et_entry->start_slot = le16toh(event_data->StartSlot); et_entry->phy_bits = le32toh(event_data->PhyBits); } et_entry->init_complete = 1; } else if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) { enc_idx = _mapping_get_enc_idx_from_handle(sc, le16toh(event_data->EnclosureHandle)); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { printf("%s: cannot unmap enclosure %d because it has " "already been deleted", __func__, enc_idx); goto out; } et_entry = &sc->enclosure_table[enc_idx]; if (!et_entry->init_complete) { if (et_entry->missing_count < MPR_MAX_MISSING_COUNT) et_entry->missing_count++; else et_entry->init_complete = 1; } if (!et_entry->missing_count) et_entry->missing_count++; if (sc->is_dpm_enable && !et_entry->init_complete && et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->MappingInformation |= et_entry->missing_count; sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1; } et_entry->init_complete = 1; } out: _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mpr_mapping_topology_change_event - handle topology change events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mpr_mapping_topology_change_event(struct mpr_softc *sc, Mpi2EventDataSasTopologyChangeList_t *event_data) { struct _map_topology_change topo_change; struct _map_phy_change *phy_change; Mpi2EventSasTopoPhyEntry_t *event_phy_change; u8 i, num_entries; topo_change.enc_handle = le16toh(event_data->EnclosureHandle); topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle); num_entries = event_data->NumEntries; topo_change.num_entries = num_entries; topo_change.start_phy_num = event_data->StartPhyNum; topo_change.num_phys = event_data->NumPhys; topo_change.exp_status = event_data->ExpStatus; event_phy_change = event_data->PHY; topo_change.phy_details = NULL; if (!num_entries) goto out; phy_change = malloc(sizeof(struct _map_phy_change) * num_entries, M_MPR, M_NOWAIT|M_ZERO); topo_change.phy_details = phy_change; if (!phy_change) goto out; for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) { phy_change->dev_handle = le16toh(event_phy_change-> AttachedDevHandle); phy_change->reason = event_phy_change->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; } _mapping_update_missing_count(sc, &topo_change); _mapping_get_dev_info(sc, &topo_change); _mapping_clear_removed_entries(sc); _mapping_add_new_device(sc, &topo_change); out: free(topo_change.phy_details, M_MPR); _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * _mapping_check_update_ir_mt_idx - Check and update IR map table index * @sc: per adapter object * @event_data: event data payload * @evt_idx: current event index * @map_idx: current index and the place holder for new map table index * @wwid_table: world wide name for volumes in the element table * * pass through IR events and find whether any events matches and if so * tries to find new index if not returns failure * * Returns 0 on success and 1 on failure */ static int _mapping_check_update_ir_mt_idx(struct mpr_softc *sc, Mpi2EventDataIrConfigChangeList_t *event_data, int evt_idx, u32 *map_idx, u64 *wwid_table) { struct dev_mapping_table *mt_entry; u32 st_idx, end_idx, mt_idx = *map_idx; u8 match = 0; Mpi2EventIrConfigElement_t *element; u16 element_flags; int i; mt_entry = &sc->mapping_table[mt_idx]; _mapping_get_ir_maprange(sc, &st_idx, &end_idx); search_again: match = 0; for (i = evt_idx + 1; i < event_data->NumElements; i++) { element = (Mpi2EventIrConfigElement_t *) &event_data->ConfigElement[i]; element_flags = le16toh(element->ElementFlags); if ((element_flags & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) != MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) continue; if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) { if (mt_entry->physical_id == wwid_table[i]) { match = 1; break; } } } if (match) { do { mt_idx++; if (mt_idx > end_idx) return 1; mt_entry = &sc->mapping_table[mt_idx]; } while (mt_entry->device_info & MPR_MAP_IN_USE); goto search_again; } *map_idx = mt_idx; return 0; } /** * mpr_mapping_ir_config_change_event - handle IR config change list events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mpr_mapping_ir_config_change_event(struct mpr_softc *sc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; int i; u64 *wwid_table; u32 map_idx, flags; struct dev_mapping_table *mt_entry; u16 element_flags; u8 log_full_error = 0; wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPR, M_NOWAIT | M_ZERO); if (!wwid_table) goto out; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; flags = le32toh(event_data->Flags); for (i = 0; i < event_data->NumElements; i++, element++) { element_flags = le16toh(element->ElementFlags); if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) continue; if ((element_flags & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) == MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) { mpr_config_get_volume_wwid(sc, le16toh(element->VolDevHandle), &wwid_table[i]); map_idx = _mapping_get_ir_mt_idx_from_wwid(sc, wwid_table[i]); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->device_info |= MPR_MAP_IN_USE; } } } if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) goto out; else { element = (Mpi2EventIrConfigElement_t *)&event_data-> ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) { map_idx = _mapping_get_ir_mt_idx_from_wwid (sc, wwid_table[i]); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->channel = MPR_RAID_CHANNEL; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh (element->VolDevHandle); mt_entry->device_info = MPR_DEV_RESERVED | MPR_MAP_IN_USE; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); continue; } map_idx = _mapping_get_free_ir_mt_idx(sc); if (map_idx == MPR_MAPTABLE_BAD_IDX) log_full_error = 1; else if (i < (event_data->NumElements - 1)) { log_full_error = _mapping_check_update_ir_mt_idx (sc, event_data, i, &map_idx, wwid_table); } if (log_full_error) { printf("%s: no space to add the RAID " "volume with handle 0x%04x in " "mapping table\n", __func__, le16toh (element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = wwid_table[i]; mt_entry->channel = MPR_RAID_CHANNEL; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh(element-> VolDevHandle); mt_entry->device_info = MPR_DEV_RESERVED | MPR_MAP_IN_USE; mt_entry->init_complete = 0; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_REMOVED) { map_idx = _mapping_get_ir_mt_idx_from_wwid(sc, wwid_table[i]); if (map_idx == MPR_MAPTABLE_BAD_IDX) { printf("%s: failed to remove a volume " "because it has already been " "removed\n", __func__); continue; } _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { map_idx = _mapping_get_mt_idx_from_handle(sc, le16toh(element->VolDevHandle)); if (map_idx == MPR_MAPTABLE_BAD_IDX) { printf("%s: failed to remove volume " "with handle 0x%04x because it has " "already been removed\n", __func__, le16toh(element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; _mapping_update_ir_missing_cnt(sc, map_idx, element, mt_entry->physical_id); } } } out: _mapping_flush_dpm_pages(sc); free(wwid_table, M_MPR); if (sc->pending_map_events) sc->pending_map_events--; } Index: head/sys/dev/mpr/mpr_mapping.h =================================================================== --- head/sys/dev/mpr/mpr_mapping.h (revision 299264) +++ head/sys/dev/mpr/mpr_mapping.h (revision 299265) @@ -1,75 +1,75 @@ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #ifndef _MPR_MAPPING_H #define _MPR_MAPPING_H /** * struct _map_phy_change - PHY entries received in Topology change list * @physical_id: SAS address of the device attached with the associate PHY * @device_info: bitfield provides detailed info about the device * @dev_handle: device handle for the device pointed by this entry * @slot: slot ID * @is_processed: Flag to indicate whether this entry is processed or not * @is_SATA_SSD: 1 if this is a SATA device AND an SSD, 0 otherwise */ struct _map_phy_change { uint64_t physical_id; uint32_t device_info; uint16_t dev_handle; uint16_t slot; uint8_t reason; uint8_t is_processed; uint8_t is_SATA_SSD; uint8_t reserved; }; /** * struct _map_topology_change - entries to be removed from mapping table * @dpm_entry_num: index of this device in device persistent map table * @dev_handle: device handle for the device pointed by this entry */ struct _map_topology_change { uint16_t enc_handle; uint16_t exp_handle; uint8_t num_entries; uint8_t start_phy_num; uint8_t num_phys; uint8_t exp_status; struct _map_phy_change *phy_details; }; extern int mprsas_get_sas_address_for_sata_disk(struct mpr_softc *ioc, u64 *sas_address, u16 handle, u32 device_info, u8 *is_SATA_SSD); #endif Index: head/sys/dev/mpr/mpr_sas.c =================================================================== --- head/sys/dev/mpr/mpr_sas.c (revision 299264) +++ head/sys/dev/mpr/mpr_sas.c (revision 299265) @@ -1,3546 +1,3537 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT3 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 900026 #include #endif #include #include #include #include #include #include #include #include #include #include #include #define MPRSAS_DISCOVERY_TIMEOUT 20 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ /* * static array to check SCSI OpCode for EEDP protection bits */ #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP static uint8_t op_code_prot[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); static void mprsas_action(struct cam_sim *sim, union ccb *ccb); static void mprsas_poll(struct cam_sim *sim); static void mprsas_scsiio_timeout(void *data); -static void mprsas_abort_complete(struct mpr_softc *sc, - struct mpr_command *cm); +static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); -static void mprsas_resetdev_complete(struct mpr_softc *, - struct mpr_command *); +static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *); static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, struct mpr_command *cm); static void mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); #if (__FreeBSD_version < 901503) || \ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, struct ccb_getdev *cgd); static void mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb); #endif static int mprsas_send_portenable(struct mpr_softc *sc); static void mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm); #if __FreeBSD_version >= 900026 -static void mprsas_smpio_complete(struct mpr_softc *sc, - struct mpr_command *cm); -static void mprsas_send_smpcmd(struct mprsas_softc *sassc, - union ccb *ccb, uint64_t sasaddr); +static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); +static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, + uint64_t sasaddr); static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); #endif //FreeBSD_version >= 900026 struct mprsas_target * mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, uint16_t handle) { struct mprsas_target *target; int i; for (i = start; i < sassc->maxtargets; i++) { target = &sassc->targets[i]; if (target->handle == handle) return (target); } return (NULL); } /* we need to freeze the simq during attach and diag reset, to avoid failing * commands before device handles have been found by discovery. Since * discovery involves reading config pages and possibly sending commands, * discovery actions may continue even after we receive the end of discovery * event, so refcount discovery actions instead of assuming we can unfreeze * the simq when we get the event. */ void mprsas_startup_increment(struct mprsas_softc *sassc) { MPR_FUNCTRACE(sassc->sc); if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { if (sassc->startup_refcount++ == 0) { /* just starting, freeze the simq */ mpr_dprint(sassc->sc, MPR_INIT, "%s freezing simq\n", __func__); #if (__FreeBSD_version >= 1000039) || \ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) xpt_hold_boot(); #endif xpt_freeze_simq(sassc->sim, 1); } mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } void mprsas_release_simq_reinit(struct mprsas_softc *sassc) { if (sassc->flags & MPRSAS_QUEUE_FROZEN) { sassc->flags &= ~MPRSAS_QUEUE_FROZEN; xpt_release_simq(sassc->sim, 1); mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); } } void mprsas_startup_decrement(struct mprsas_softc *sassc) { MPR_FUNCTRACE(sassc->sc); if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { if (--sassc->startup_refcount == 0) { /* finished all discovery-related actions, release * the simq and rescan for the latest topology. */ mpr_dprint(sassc->sc, MPR_INIT, "%s releasing simq\n", __func__); sassc->flags &= ~MPRSAS_IN_STARTUP; xpt_release_simq(sassc->sim, 1); #if (__FreeBSD_version >= 1000039) || \ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) xpt_release_boot(); #else mprsas_rescan_target(sassc->sc, NULL); #endif } mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, sassc->startup_refcount); } } /* The firmware requires us to stop sending commands when we're doing task * management, so refcount the TMs and keep the simq frozen when any are in * use. */ struct mpr_command * mprsas_alloc_tm(struct mpr_softc *sc) { struct mpr_command *tm; MPR_FUNCTRACE(sc); tm = mpr_alloc_high_priority_command(sc); return tm; } void mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) { MPR_FUNCTRACE(sc); if (tm == NULL) return; /* * For TM's the devq is frozen for the device. Unfreeze it here and * free the resources used for freezing the devq. Must clear the * INRESET flag as well or scsi I/O will not work. */ if (tm->cm_targ != NULL) { tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET; } if (tm->cm_ccb) { mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n", tm->cm_targ->tid); xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); xpt_free_path(tm->cm_ccb->ccb_h.path); xpt_free_ccb(tm->cm_ccb); } mpr_free_high_priority_command(sc, tm); } void mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) { struct mprsas_softc *sassc = sc->sassc; path_id_t pathid; target_id_t targetid; union ccb *ccb; MPR_FUNCTRACE(sc); pathid = cam_sim_path(sassc->sim); if (targ == NULL) targetid = CAM_TARGET_WILDCARD; else targetid = targ - sassc->targets; /* * Allocate a CCB and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); xpt_free_ccb(ccb); return; } if (targetid == CAM_TARGET_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else ccb->ccb_h.func_code = XPT_SCAN_TGT; mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); xpt_rescan(ccb); } static void mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) { struct sbuf sb; va_list ap; char str[192]; char path_str[64]; if (cm == NULL) return; /* No need to be in here if debugging isn't enabled */ if ((cm->cm_sc->mpr_debug & level) == 0) return; sbuf_new(&sb, str, sizeof(str), 0); va_start(ap, fmt); if (cm->cm_ccb != NULL) { xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, sizeof(path_str)); sbuf_cat(&sb, path_str); if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { scsi_command_string(&cm->cm_ccb->csio, &sb); sbuf_printf(&sb, "length %d ", cm->cm_ccb->csio.dxfer_len); } } else { sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", cam_sim_name(cm->cm_sc->sassc->sim), cam_sim_unit(cm->cm_sc->sassc->sim), cam_sim_bus(cm->cm_sc->sassc->sim), cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, cm->cm_lun); } sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); sbuf_vprintf(&sb, fmt, ap); sbuf_finish(&sb); mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); va_end(ap); } static void mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; struct mprsas_target *targ; uint16_t handle; MPR_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " "device 0x%x\n", reply->IOCStatus, handle); mprsas_free_tm(sc, tm); return; } mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", reply->TerminationCount); mpr_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_level_valid = 0x0; targ->encl_level = 0x0; targ->connector_name[0] = ' '; targ->connector_name[1] = ' '; targ->connector_name[2] = ' '; targ->connector_name[3] = ' '; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; targ->scsi_req_desc_type = 0; } mprsas_free_tm(sc, tm); } /* * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. * Otherwise Volume Delete is same as Bare Drive Removal. */ void mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *cm; struct mprsas_target *targ = NULL; MPR_FUNCTRACE(sassc->sc); sc = sassc->sc; targ = mprsas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mpr_dprint(sc, MPR_ERROR, "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); return; } targ->flags |= MPRSAS_TARGET_INREMOVAL; cm = mprsas_alloc_tm(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", __func__); return; } mprsas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; req->DevHandle = targ->handle; req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; cm->cm_complete = mprsas_remove_volume; cm->cm_complete_data = (void *)(uintptr_t)handle; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); mpr_map_command(sc, cm); } /* * The MPT3 firmware performs debounce on the link to avoid transient link * errors and false removals. When it does decide that link has been lost * and a device needs to go away, it expects that the host will perform a * target reset and then an op remove. The reset has the side-effect of * aborting any outstanding requests for the device, which is required for * the op-remove to succeed. It's not clear if the host should check for * the device coming back alive after the reset. */ void mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *cm; struct mprsas_target *targ = NULL; MPR_FUNCTRACE(sassc->sc); sc = sassc->sc; targ = mprsas_find_target_by_handle(sassc, 0, handle); if (targ == NULL) { /* FIXME: what is the action? */ /* We don't know about this device? */ mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", __func__, handle); return; } targ->flags |= MPRSAS_TARGET_INREMOVAL; cm = mprsas_alloc_tm(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", __func__); return; } mprsas_rescan_target(sc, targ); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; memset(req, 0, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; cm->cm_targ = targ; cm->cm_data = NULL; cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; cm->cm_complete = mprsas_remove_device; cm->cm_complete_data = (void *)(uintptr_t)handle; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); mpr_map_command(sc, cm); } static void mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; struct mprsas_target *targ; struct mpr_command *next_cm; uint16_t handle; MPR_FUNCTRACE(sc); reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, handle); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { /* XXX retry the remove after the diag reset completes? */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " "device 0x%x\n", le16toh(reply->IOCStatus), handle); mprsas_free_tm(sc, tm); return; } mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", le32toh(reply->TerminationCount)); mpr_free_reply(sc, tm->cm_reply_data); tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ /* Reuse the existing command */ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; memset(req, 0, sizeof(*req)); req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; req->DevHandle = htole16(handle); tm->cm_data = NULL; tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; tm->cm_complete = mprsas_remove_complete; tm->cm_complete_data = (void *)(uintptr_t)handle; mpr_map_command(sc, tm); mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n", targ->tid, handle); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " "connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { union ccb *ccb; mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); ccb = tm->cm_complete_data; mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); mprsas_scsiio_complete(sc, tm); } } static void mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; uint16_t handle; struct mprsas_target *targ; struct mprsas_lun *lun; MPR_FUNCTRACE(sc); reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; handle = (uint16_t)(uintptr_t)tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, handle); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { /* most likely a chip reset */ mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " "0x%04x\n", __func__, handle); mprsas_free_tm(sc, tm); return; } mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, handle, le16toh(reply->IOCStatus)); /* * Don't clear target if remove fails because things will get confusing. * Leave the devname and sasaddr intact so that we know to avoid reusing * this target id if possible, and so we can assign the same target id * to this device if it comes back in the future. */ if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { targ = tm->cm_targ; targ->handle = 0x0; targ->encl_handle = 0x0; targ->encl_level_valid = 0x0; targ->encl_level = 0x0; targ->connector_name[0] = ' '; targ->connector_name[1] = ' '; targ->connector_name[2] = ' '; targ->connector_name[3] = ' '; targ->encl_slot = 0x0; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; targ->devinfo = 0x0; targ->flags = 0x0; targ->scsi_req_desc_type = 0; while (!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPR); } } mprsas_free_tm(sc, tm); } static int mprsas_register_events(struct mpr_softc *sc) { uint8_t events[16]; bzero(events, 16); setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_DISCOVERY); setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); setbit(events, MPI2_EVENT_IR_VOLUME); setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); setbit(events, MPI2_EVENT_TEMP_THRESHOLD); mpr_register_events(sc, events, mprsas_evt_handler, NULL, &sc->sassc->mprsas_eh); return (0); } int mpr_attach_sas(struct mpr_softc *sc) { struct mprsas_softc *sassc; cam_status status; int unit, error = 0; MPR_FUNCTRACE(sc); sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); if (!sassc) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } /* * XXX MaxTargets could change during a reinit. Since we don't * resize the targets[] array during such an event, cache the value * of MaxTargets here so that we don't get into trouble later. This * should move into the reinit logic. */ sassc->maxtargets = sc->facts->MaxTargets; sassc->targets = malloc(sizeof(struct mprsas_target) * sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); if (!sassc->targets) { device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", __func__, __LINE__); free(sassc, M_MPR); return (ENOMEM); } sc->sassc = sassc; sassc->sc = sc; if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n"); error = ENOMEM; goto out; } unit = device_get_unit(sc->mpr_dev); sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); if (sassc->sim == NULL) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n"); error = EINVAL; goto out; } TAILQ_INIT(&sassc->ev_queue); /* Initialize taskqueue for Event Handling */ TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sassc->ev_tq); taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", device_get_nameunit(sc->mpr_dev)); mpr_lock(sc); /* * XXX There should be a bus for every port on the adapter, but since * we're just going to fake the topology for now, we'll pretend that * everything is just a target on a single bus. */ if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n", error); mpr_unlock(sc); goto out; } /* * Assume that discovery events will start right away. * * Hold off boot until discovery is complete. */ sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; sc->sassc->startup_refcount = 0; mprsas_startup_increment(sassc); callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); /* * Register for async events so we can determine the EEDP * capabilities of devices. */ status = xpt_create_path(&sassc->path, /*periph*/NULL, cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { mpr_printf(sc, "Error %#x creating sim path\n", status); sassc->path = NULL; } else { int event; #if (__FreeBSD_version >= 1000006) || \ ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; #else event = AC_FOUND_DEVICE; #endif /* * Prior to the CAM locking improvements, we can't call * xpt_register_async() with a particular path specified. * * If a path isn't specified, xpt_register_async() will * generate a wildcard path and acquire the XPT lock while * it calls xpt_action() to execute the XPT_SASYNC_CB CCB. * It will then drop the XPT lock once that is done. * * If a path is specified for xpt_register_async(), it will * not acquire and drop the XPT lock around the call to * xpt_action(). xpt_action() asserts that the caller * holds the SIM lock, so the SIM lock has to be held when * calling xpt_register_async() when the path is specified. * * But xpt_register_async calls xpt_for_all_devices(), * which calls xptbustraverse(), which will acquire each * SIM lock. When it traverses our particular bus, it will * necessarily acquire the SIM lock, which will lead to a * recursive lock acquisition. * * The CAM locking changes fix this problem by acquiring * the XPT topology lock around bus traversal in * xptbustraverse(), so the caller can hold the SIM lock * and it does not cause a recursive lock acquisition. * * These __FreeBSD_version values are approximate, especially * for stable/10, which is two months later than the actual * change. */ #if (__FreeBSD_version < 1000703) || \ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) mpr_unlock(sc); status = xpt_register_async(event, mprsas_async, sc, NULL); mpr_lock(sc); #else status = xpt_register_async(event, mprsas_async, sc, sassc->path); #endif if (status != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "Error %#x registering async handler for " "AC_ADVINFO_CHANGED events\n", status); xpt_free_path(sassc->path); sassc->path = NULL; } } if (status != CAM_REQ_CMP) { /* * EEDP use is the exception, not the rule. * Warn the user, but do not fail to attach. */ mpr_printf(sc, "EEDP capabilities disabled.\n"); } mpr_unlock(sc); mprsas_register_events(sc); out: if (error) mpr_detach_sas(sc); return (error); } int mpr_detach_sas(struct mpr_softc *sc) { struct mprsas_softc *sassc; struct mprsas_lun *lun, *lun_tmp; struct mprsas_target *targ; int i; MPR_FUNCTRACE(sc); if (sc->sassc == NULL) return (0); sassc = sc->sassc; mpr_deregister_events(sc, sassc->mprsas_eh); /* * Drain and free the event handling taskqueue with the lock * unheld so that any parallel processing tasks drain properly * without deadlocking. */ if (sassc->ev_tq != NULL) taskqueue_free(sassc->ev_tq); /* Make sure CAM doesn't wedge if we had to bail out early. */ mpr_lock(sc); /* Deregister our async handler */ if (sassc->path != NULL) { xpt_register_async(0, mprsas_async, sc, sassc->path); xpt_free_path(sassc->path); sassc->path = NULL; } if (sassc->flags & MPRSAS_IN_STARTUP) xpt_release_simq(sassc->sim, 1); if (sassc->sim != NULL) { xpt_bus_deregister(cam_sim_path(sassc->sim)); cam_sim_free(sassc->sim, FALSE); } sassc->flags |= MPRSAS_SHUTDOWN; mpr_unlock(sc); if (sassc->devq != NULL) cam_simq_free(sassc->devq); for (i = 0; i < sassc->maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPR); } } free(sassc->targets, M_MPR); free(sassc, M_MPR); sc->sassc = NULL; return (0); } void mprsas_discovery_end(struct mprsas_softc *sassc) { struct mpr_softc *sc = sassc->sc; MPR_FUNCTRACE(sc); if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) callout_stop(&sassc->discovery_callout); } static void mprsas_action(struct cam_sim *sim, union ccb *ccb) { struct mprsas_softc *sassc; sassc = cam_sim_softc(sim); MPR_FUNCTRACE(sassc->sc); mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n", ccb->ccb_h.func_code); mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; #if (__FreeBSD_version >= 1000039) || \ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; #else cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; #endif cpi->hba_eng_cnt = 0; cpi->max_target = sassc->maxtargets - 1; cpi->max_lun = 255; cpi->initiator_id = sassc->maxtargets - 1; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); /* * XXXSLM-I think this needs to change based on config page or * something instead of hardcoded to 150000. */ cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC; #if __FreeBSD_version >= 800001 /* * XXXSLM-probably need to base this number on max SGL's and * page size. */ cpi->maxio = 256 * 1024; #endif mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; struct mprsas_target *targ; cts = &ccb->cts; sas = &cts->xport_specific.sas; scsi = &cts->proto_specific.scsi; KASSERT(cts->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", cts->ccb_h.target_id)); targ = &sassc->targets[cts->ccb_h.target_id]; if (targ->handle == 0x0) { mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; } cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; switch (targ->linkrate) { case 0x08: sas->bitrate = 150000; break; case 0x09: sas->bitrate = 300000; break; case 0x0a: sas->bitrate = 600000; break; case 0x0b: sas->bitrate = 1200000; break; default: sas->valid = 0; } cts->protocol = PROTO_SCSI; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, /*extended*/1); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_RESET_DEV: - mpr_dprint(sassc->sc, MPR_XINFO, - "mprsas_action XPT_RESET_DEV\n"); + mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action " + "XPT_RESET_DEV\n"); mprsas_action_resetdev(sassc, ccb); return; case XPT_RESET_BUS: case XPT_ABORT: case XPT_TERM_IO: - mpr_dprint(sassc->sc, MPR_XINFO, - "mprsas_action faking success for abort or reset\n"); + mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success " + "for abort or reset\n"); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); break; case XPT_SCSI_IO: mprsas_action_scsiio(sassc, ccb); return; #if __FreeBSD_version >= 900026 case XPT_SMP_IO: mprsas_action_smpio(sassc, ccb); return; #endif default: mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); break; } xpt_done(ccb); } static void mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, target_id_t target_id, lun_id_t lun_id) { path_id_t path_id = cam_sim_path(sc->sassc->sim); struct cam_path *path; mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, ac_code, target_id, (uintmax_t)lun_id); if (xpt_create_path(&path, NULL, path_id, target_id, lun_id) != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " "notification\n"); return; } xpt_async(ac_code, path, NULL); xpt_free_path(path); } static void mprsas_complete_all_commands(struct mpr_softc *sc) { struct mpr_command *cm; int i; int completed; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); /* complete all commands with a NULL reply */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; cm->cm_reply = NULL; completed = 0; if (cm->cm_flags & MPR_CM_FLAGS_POLLED) cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mprsas_log_command(cm, MPR_RECOVERY, - "completing cm %p state %x ccb %p for diag " - "reset\n", cm, cm->cm_state, cm->cm_ccb); + "completing cm %p state %x ccb %p for diag reset\n", + cm, cm->cm_state, cm->cm_ccb); cm->cm_complete(sc, cm); completed = 1; } if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { mprsas_log_command(cm, MPR_RECOVERY, "waking up cm %p state %x ccb %p for diag reset\n", cm, cm->cm_state, cm->cm_ccb); wakeup(cm); completed = 1; } if (cm->cm_sc->io_cmds_active != 0) { cm->cm_sc->io_cmds_active--; } else { mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: " "io_cmds_active is out of sync - resynching to " "0\n"); } if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { /* this should never happen, but if it does, log */ mprsas_log_command(cm, MPR_RECOVERY, "cm %p state %x flags 0x%x ccb %p during diag " "reset\n", cm, cm->cm_state, cm->cm_flags, cm->cm_ccb); } } } void mprsas_handle_reinit(struct mpr_softc *sc) { int i; /* Go back into startup mode and freeze the simq, so that CAM * doesn't send any commands until after we've rediscovered all * targets and found the proper device handles for them. * * After the reset, portenable will trigger discovery, and after all * discovery-related activities have finished, the simq will be * released. */ mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); sc->sassc->flags |= MPRSAS_IN_STARTUP; sc->sassc->flags |= MPRSAS_IN_DISCOVERY; mprsas_startup_increment(sc->sassc); /* notify CAM of a bus reset */ mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); /* complete and cleanup after all outstanding commands */ mprsas_complete_all_commands(sc); mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n", __func__, sc->sassc->startup_refcount); /* zero all the target handles, since they may change after the * reset, and we have to rediscover all the targets and use the new * handles. */ for (i = 0; i < sc->sassc->maxtargets; i++) { if (sc->sassc->targets[i].outstanding != 0) mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", i, sc->sassc->targets[i].outstanding); sc->sassc->targets[i].handle = 0x0; sc->sassc->targets[i].exp_dev_handle = 0x0; sc->sassc->targets[i].outstanding = 0; sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; } } static void mprsas_tm_timeout(void *data) { struct mpr_command *tm = data; struct mpr_softc *sc = tm->cm_sc; mtx_assert(&sc->mpr_mtx, MA_OWNED); - mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, - "task mgmt %p timed out\n", tm); + mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed " + "out\n", tm); mpr_reinit(sc); } static void -mprsas_logical_unit_reset_complete(struct mpr_softc *sc, - struct mpr_command *tm) +mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; unsigned int cm_count = 0; struct mpr_command *cm; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! " "This should not happen!\n", __func__, tm->cm_flags); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { - mprsas_log_command(tm, MPR_RECOVERY, - "NULL reset reply for tm %p\n", tm); + mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm " + "%p\n", tm); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_reinit(sc); } return; } mprsas_log_command(tm, MPR_RECOVERY, "logical unit reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); /* See if there are any outstanding commands for this LUN. * This could be made more efficient by using a per-LU data * structure of some sort. */ TAILQ_FOREACH(cm, &targ->commands, cm_link) { if (cm->cm_lun == tm->cm_lun) cm_count++; } if (cm_count == 0) { mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, "logical unit %u finished recovery after reset\n", tm->cm_lun, tm); mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, tm->cm_lun); /* we've finished recovery for this logical unit. check and * see if some other logical unit has a timedout command * that needs to be processed. */ cm = TAILQ_FIRST(&targ->timedout_commands); if (cm) { mprsas_send_abort(sc, tm, cm); } else { targ->tm = NULL; mprsas_free_tm(sc, tm); } } else { /* if we still have commands for this LUN, the reset * effectively failed, regardless of the status reported. * Escalate to a target reset. */ mprsas_log_command(tm, MPR_RECOVERY, "logical unit reset complete for tm %p, but still have %u " "command(s)\n", tm, cm_count); mprsas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); } } static void mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target " "reset! This should not happen!\n", __func__, tm->cm_flags); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { - mprsas_log_command(tm, MPR_RECOVERY, - "NULL reset reply for tm %p\n", tm); + mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm " + "%p\n", tm); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_reinit(sc); } return; } mprsas_log_command(tm, MPR_RECOVERY, "target reset status 0x%x code 0x%x count %u\n", le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); if (targ->outstanding == 0) { /* we've finished recovery for this target and all * of its logical units. */ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, "recovery finished after target reset\n"); mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* after a target reset, if this target still has * outstanding commands, the reset effectively failed, * regardless of the status reported. escalate. */ mprsas_log_command(tm, MPR_RECOVERY, "target reset complete for tm %p, but still have %u " "command(s)\n", tm, targ->outstanding); mpr_reinit(sc); } } #define MPR_RESET_TIMEOUT 30 int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *target; int err; target = tm->cm_targ; if (target->handle == 0) { mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id " "%d\n", __func__, target->tid); return -1; } req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(target->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = type; if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { /* XXX Need to handle invalid LUNs */ MPR_SET_LUN(req->LUN, tm->cm_lun); tm->cm_targ->logical_unit_resets++; mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, "sending logical unit reset\n"); tm->cm_complete = mprsas_logical_unit_reset_complete; mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun); } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { /* * Target reset method = * SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_targ->target_resets++; mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, "sending target reset\n"); tm->cm_complete = mprsas_target_reset_complete; mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); } else { mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); return -1; } mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid, target->handle); if (target->encl_level_valid) { mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " "connector name (%4s)\n", target->encl_level, target->encl_slot, target->connector_name); } tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete_data = (void *)tm; callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz, mprsas_tm_timeout, tm); err = mpr_map_command(sc, tm); if (err) mprsas_log_command(tm, MPR_RECOVERY, "error %d sending reset type %u\n", err, type); return err; } static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) { struct mpr_command *cm; MPI2_SCSI_TASK_MANAGE_REPLY *reply; MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; callout_stop(&tm->cm_callout); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; targ = tm->cm_targ; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mprsas_log_command(tm, MPR_RECOVERY, "cm_flags = %#x for abort %p TaskMID %u!\n", tm->cm_flags, tm, le16toh(req->TaskMID)); mprsas_free_tm(sc, tm); return; } if (reply == NULL) { mprsas_log_command(tm, MPR_RECOVERY, "NULL abort reply for tm %p TaskMID %u\n", tm, le16toh(req->TaskMID)); if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { /* this completion was due to a reset, just cleanup */ targ->tm = NULL; mprsas_free_tm(sc, tm); } else { /* we should have gotten a reply. */ mpr_reinit(sc); } return; } mprsas_log_command(tm, MPR_RECOVERY, "abort TaskMID %u status 0x%x code 0x%x count %u\n", le16toh(req->TaskMID), le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), le32toh(reply->TerminationCount)); cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); if (cm == NULL) { /* if there are no more timedout commands, we're done with * error recovery for this target. */ mprsas_log_command(tm, MPR_RECOVERY, "finished recovery after aborting TaskMID %u\n", le16toh(req->TaskMID)); targ->tm = NULL; mprsas_free_tm(sc, tm); } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { /* abort success, but we have more timedout commands to abort */ mprsas_log_command(tm, MPR_RECOVERY, "continuing recovery after aborting TaskMID %u\n", le16toh(req->TaskMID)); mprsas_send_abort(sc, tm, cm); } else { /* we didn't get a command completion, so the abort * failed as far as we're concerned. escalate. */ mprsas_log_command(tm, MPR_RECOVERY, "abort failed for TaskMID %u tm %p\n", le16toh(req->TaskMID), tm); mprsas_send_reset(sc, tm, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); } } #define MPR_ABORT_TIMEOUT 5 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, struct mpr_command *cm) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mprsas_target *targ; int err; targ = cm->cm_targ; if (targ->handle == 0) { mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", __func__, cm->cm_ccb->ccb_h.target_id); return -1; } mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, "Aborting command %p\n", cm); req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; /* XXX Need to handle invalid LUNs */ MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); req->TaskMID = htole16(cm->cm_desc.Default.SMID); tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete = mprsas_abort_complete; tm->cm_complete_data = (void *)tm; tm->cm_targ = cm->cm_targ; tm->cm_lun = cm->cm_lun; callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz, mprsas_tm_timeout, tm); targ->aborts++; mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n", __func__, targ->tid); mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun); err = mpr_map_command(sc, tm); if (err) mprsas_log_command(tm, MPR_RECOVERY, "error %d sending abort for cm %p SMID %u\n", err, cm, req->TaskMID); return err; } static void mprsas_scsiio_timeout(void *data) { struct mpr_softc *sc; struct mpr_command *cm; struct mprsas_target *targ; cm = (struct mpr_command *)data; sc = cm->cm_sc; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm); /* * Run the interrupt handler to make sure it's not pending. This * isn't perfect because the command could have already completed * and been re-used, though this is unlikely. */ mpr_intr_locked(sc); if (cm->cm_state == MPR_CM_STATE_FREE) { mprsas_log_command(cm, MPR_XINFO, "SCSI command %p almost timed out\n", cm); return; } if (cm->cm_ccb == NULL) { mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); return; } targ = cm->cm_targ; targ->timeouts++; - mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p " - "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, - targ->handle); + mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p target " + "%u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, targ->handle); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, " "connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } /* XXX first, check the firmware state, to see if it's still * operational. if not, do a diag reset. */ mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); cm->cm_state = MPR_CM_STATE_TIMEDOUT; TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); if (targ->tm != NULL) { /* target already in recovery, just queue up another * timedout command to be processed later. */ mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " "processing by tm %p\n", cm, targ->tm); } else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", cm, targ->tm); /* start recovery by aborting the first timedout command */ mprsas_send_abort(sc, targ->tm, cm); } else { /* XXX queue this target up for recovery once a TM becomes * available. The firmware only has a limited number of * HighPriority credits for the high priority requests used * for task management, and we ran out. * * Isilon: don't worry about this for now, since we have * more credits than disks in an enclosure, and limit * ourselves to one TM per target for recovery. */ - mpr_dprint(sc, MPR_RECOVERY, - "timedout cm %p failed to allocate a tm\n", cm); + mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to " + "allocate a tm\n", cm); } } static void mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) { MPI2_SCSI_IO_REQUEST *req; struct ccb_scsiio *csio; struct mpr_softc *sc; struct mprsas_target *targ; struct mprsas_lun *lun; struct mpr_command *cm; uint8_t i, lba_byte, *ref_tag_addr; uint16_t eedp_flags; uint32_t mpi_control; sc = sassc->sc; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); csio = &ccb->csio; KASSERT(csio->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SCSI_IO\n", csio->ccb_h.target_id)); targ = &sassc->targets[csio->ccb_h.target_id]; mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); if (targ->handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", __func__, csio->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO " "supported %u\n", __func__, csio->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * Sometimes, it is possible to get a command that is not "In * Progress" and was actually aborted by the upper layer. Check for * this here and complete the command without error. */ if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " "target %u\n", __func__, csio->ccb_h.target_id); xpt_done(ccb); return; } /* * If devinfo is 0 this will be a volume. In that case don't tell CAM * that the volume has timed out. We want volumes to be enumerated * until they are deleted/removed, not just failed. */ if (targ->flags & MPRSAS_TARGET_INREMOVAL) { if (targ->devinfo == 0) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); xpt_done(ccb); return; } if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } /* * If target has a reset in progress, freeze the devq and return. The * devq will be released when the TM reset is finished. */ if (targ->flags & MPRSAS_TARGET_INRESET) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n", __func__, targ->tid); xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } cm = mpr_alloc_command(sc); if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { if (cm != NULL) { mpr_free_command(sc, cm); } if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPRSAS_QUEUE_FROZEN; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return; } req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; req->MsgFlags = 0; req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); req->SenseBufferLength = MPR_SENSE_LEN; req->SGLFlags = 0; req->ChainOffset = 0; req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ req->SGLOffset1= 0; req->SGLOffset2= 0; req->SGLOffset3= 0; req->SkipCount = 0; req->DataLength = htole32(csio->dxfer_len); req->BidirectionalDataLength = 0; req->IoFlags = htole16(csio->cdb_len); req->EEDPFlags = 0; /* Note: BiDirectional transfers are not supported */ switch (csio->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: mpi_control = MPI2_SCSIIO_CONTROL_READ; cm->cm_flags |= MPR_CM_FLAGS_DATAIN; break; case CAM_DIR_OUT: mpi_control = MPI2_SCSIIO_CONTROL_WRITE; cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; break; case CAM_DIR_NONE: default: mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; break; } if (csio->cdb_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; /* * It looks like the hardware doesn't require an explicit tag * number for each transaction. SAM Task Management not supported * at the moment. */ switch (csio->tag_action) { case MSG_HEAD_OF_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ORDERED_Q_TAG: mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_ACA_TASK: mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; break; case CAM_TAG_ACTION_NONE: case MSG_SIMPLE_Q_TAG: default: mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; break; } mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; req->Control = htole32(mpi_control); if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { mpr_free_command(sc, cm); mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID); xpt_done(ccb); return; } if (csio->ccb_h.flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); req->IoFlags = htole16(csio->cdb_len); /* * Check if EEDP is supported and enabled. If it is then check if the * SCSI opcode could be using EEDP. If so, make sure the LUN exists and * is formatted for EEDP support. If all of this is true, set CDB up * for EEDP transfer. */ eedp_flags = op_code_prot[req->CDB.CDB32[0]]; if (sc->eedp_enabled && eedp_flags) { SLIST_FOREACH(lun, &targ->luns, lun_link) { if (lun->lun_id == csio->ccb_h.target_lun) { break; } } if ((lun != NULL) && (lun->eedp_formatted)) { req->EEDPBlockSize = htole16(lun->eedp_block_size); eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); req->EEDPFlags = htole16(eedp_flags); /* * If CDB less than 32, fill in Primary Ref Tag with * low 4 bytes of LBA. If CDB is 32, tag stuff is * already there. Also, set protection bit. FreeBSD * currently does not support CDBs bigger than 16, but * the code doesn't hurt, and will be here for the * future. */ if (csio->cdb_len != 32) { lba_byte = (csio->cdb_len == 16) ? 6 : 2; ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. PrimaryReferenceTag; for (i = 0; i < 4; i++) { *ref_tag_addr = req->CDB.CDB32[lba_byte + i]; ref_tag_addr++; } req->CDB.EEDP32.PrimaryReferenceTag = htole32(req-> CDB.EEDP32.PrimaryReferenceTag); req->CDB.EEDP32.PrimaryApplicationTagMask = 0xFFFF; req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 0x20; } else { eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; req->EEDPFlags = htole16(eedp_flags); req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 0x1F) | 0x20; } } } cm->cm_length = csio->dxfer_len; if (cm->cm_length != 0) { cm->cm_data = ccb; cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; } else { cm->cm_data = NULL; } cm->cm_sge = &req->SGL; cm->cm_sglsize = (32 - 24) * 4; cm->cm_complete = mprsas_scsiio_complete; cm->cm_complete_data = ccb; cm->cm_targ = targ; cm->cm_lun = csio->ccb_h.target_lun; cm->cm_ccb = ccb; /* * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) * and set descriptor type. */ if (targ->scsi_req_desc_type == MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; cm->cm_desc.FastPathSCSIIO.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle); } else { cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); } callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, mprsas_scsiio_timeout, cm, 0); targ->issued++; targ->outstanding++; TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); ccb->ccb_h.status |= CAM_SIM_QUEUED; mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", __func__, cm, ccb, targ->outstanding); mpr_map_command(sc, cm); return; } static void mpr_response_code(struct mpr_softc *sc, u8 response_code) { char *desc; switch (response_code) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code, desc); } /** * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request */ static void mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16toh(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = sc->tmp_string; u32 log_info = le32toh(mpi_reply->IOCLogInfo); if (log_info == 0x31170000) return; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: desc_ioc_state = "success"; break; case MPI2_IOCSTATUS_INVALID_FUNCTION: desc_ioc_state = "invalid function"; break; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: desc_ioc_state = "scsi recovered error"; break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: desc_ioc_state = "scsi invalid dev handle"; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: desc_ioc_state = "scsi device not there"; break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: desc_ioc_state = "scsi data overrun"; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: desc_ioc_state = "scsi data underrun"; break; case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: desc_ioc_state = "scsi io data error"; break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: desc_ioc_state = "scsi protocol error"; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: desc_ioc_state = "scsi task terminated"; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: desc_ioc_state = "scsi residual mismatch"; break; case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: desc_ioc_state = "scsi task mgmt failed"; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: desc_ioc_state = "scsi ioc terminated"; break; case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: desc_ioc_state = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: desc_ioc_state = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: desc_ioc_state = "eedp app tag error"; break; default: desc_ioc_state = "unknown"; break; } switch (scsi_status) { case MPI2_SCSI_STATUS_GOOD: desc_scsi_status = "good"; break; case MPI2_SCSI_STATUS_CHECK_CONDITION: desc_scsi_status = "check condition"; break; case MPI2_SCSI_STATUS_CONDITION_MET: desc_scsi_status = "condition met"; break; case MPI2_SCSI_STATUS_BUSY: desc_scsi_status = "busy"; break; case MPI2_SCSI_STATUS_INTERMEDIATE: desc_scsi_status = "intermediate"; break; case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: desc_scsi_status = "intermediate condmet"; break; case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: desc_scsi_status = "reservation conflict"; break; case MPI2_SCSI_STATUS_COMMAND_TERMINATED: desc_scsi_status = "command terminated"; break; case MPI2_SCSI_STATUS_TASK_SET_FULL: desc_scsi_status = "task set full"; break; case MPI2_SCSI_STATUS_ACA_ACTIVE: desc_scsi_status = "aca active"; break; case MPI2_SCSI_STATUS_TASK_ABORTED: desc_scsi_status = "task aborted"; break; default: desc_scsi_status = "unknown"; break; } desc_scsi_state[0] = '\0'; if (!scsi_state) desc_scsi_state = " "; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) strcat(desc_scsi_state, "response info "); if (scsi_state & MPI2_SCSI_STATE_TERMINATED) strcat(desc_scsi_state, "state terminated "); if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) strcat(desc_scsi_state, "no status "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) strcat(desc_scsi_state, "autosense failed "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) strcat(desc_scsi_state, "autosense valid "); mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); if (targ->encl_level_valid) { mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " "connector name (%4s)\n", targ->encl_level, targ->encl_slot, targ->connector_name); } /* We can add more detail about underflow data here * TO-DO * */ mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); if (sc->mpr_debug & MPR_XINFO && scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); scsi_sense_print(csio); mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32toh(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; mpr_response_code(sc,response_bytes[0]); } } static void mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_SCSI_IO_REPLY *rep; union ccb *ccb; struct ccb_scsiio *csio; struct mprsas_softc *sassc; struct scsi_vpd_supported_page_list *vpd_list = NULL; u8 *TLR_bits, TLR_on; int dir = 0, i; u16 alloc_len; struct mprsas_target *target; target_id_t target_id; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_TRACE, "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, cm->cm_targ->outstanding); callout_stop(&cm->cm_callout); mtx_assert(&sc->mpr_mtx, MA_OWNED); sassc = sc->sassc; ccb = cm->cm_complete_data; csio = &ccb->csio; target_id = csio->ccb_h.target_id; rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; /* * XXX KDM if the chain allocation fails, does it matter if we do * the sync and unload here? It is simpler to do it in every case, * assuming it doesn't cause problems. */ if (cm->cm_data != NULL) { if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) dir = BUS_DMASYNC_POSTREAD; else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) dir = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } cm->cm_targ->completed++; cm->cm_targ->outstanding--; TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); if (cm->cm_reply != NULL) mprsas_log_command(cm, MPR_RECOVERY, "completed timedout cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mprsas_log_command(cm, MPR_RECOVERY, "completed timedout cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if (cm->cm_targ->tm != NULL) { if (cm->cm_reply != NULL) mprsas_log_command(cm, MPR_RECOVERY, "completed cm %p ccb %p during recovery " "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); else mprsas_log_command(cm, MPR_RECOVERY, "completed cm %p ccb %p during recovery\n", cm, cm->cm_ccb); } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { mprsas_log_command(cm, MPR_RECOVERY, "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); } if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { /* * We ran into an error after we tried to map the command, * so we're getting a callback without queueing the command * to the hardware. So we set the status here, and it will * be retained below. We'll go through the "fast path", * because there can be no reply when we haven't actually * gone out to the hardware. */ mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); /* * Currently the only error included in the mask is * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of * chain frames. We need to freeze the queue until we get * a command that completed without this error, which will * hopefully have some chain frames attached that we can * use. If we wanted to get smarter about it, we would * only unfreeze the queue in this condition when we're * sure that we're getting some chain frames back. That's * probably unnecessary. */ if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { xpt_freeze_simq(sassc->sim, 1); sassc->flags |= MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_INFO, "Error sending command, " "freezing SIM queue\n"); } } /* * If this is a Start Stop Unit command and it was issued by the driver * during shutdown, decrement the refcount to account for all of the * commands that were sent. All SSU commands should be completed before * shutdown completes, meaning SSU_refcount will be 0 after SSU_started * is TRUE. */ if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); sc->SSU_refcount--; } /* Take the fast path to completion */ if (cm->cm_reply == NULL) { if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); else { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); csio->scsi_status = SCSI_STATUS_OK; } if (sassc->flags & MPRSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_XINFO, "Unfreezing SIM queue\n"); } } /* * There are two scenarios where the status won't be * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is * set, the second is in the MPR_FLAGS_DIAGRESET above. */ if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { /* * Freeze the dev queue so that commands are * executed in the correct order after error * recovery. */ ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mpr_free_command(sc, cm); xpt_done(ccb); return; } mprsas_log_command(cm, MPR_XINFO, "ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: csio->resid = cm->cm_length - le32toh(rep->TransferCount); /* FALLTHROUGH */ case MPI2_IOCSTATUS_SUCCESS: case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); /* Completion failed at the transport level. */ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | MPI2_SCSI_STATE_TERMINATED)) { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } /* In a modern packetized environment, an autosense failure * implies that there's not much else that can be done to * recover the command. */ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); break; } /* * CAM doesn't care about SAS Response Info data, but if this is * the state check if TLR should be done. If not, clear the * TLR_bits for the target. */ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) == MPR_SCSI_RI_INVALID_FRAME)) { sc->mapping_table[target_id].TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /* * Intentionally override the normal SCSI status reporting * for these two cases. These are likely to happen in a * multi-initiator environment, and we want to make sure that * CAM retries these commands rather than fail them. */ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; } /* Handle normal status and sense */ csio->scsi_status = rep->SCSIStatus; if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { int sense_len, returned_sense_len; returned_sense_len = min(le32toh(rep->SenseCount), sizeof(struct scsi_sense_data)); if (returned_sense_len < csio->sense_len) csio->sense_resid = csio->sense_len - returned_sense_len; else csio->sense_resid = 0; sense_len = min(returned_sense_len, csio->sense_len - csio->sense_resid); bzero(&csio->sense_data, sizeof(csio->sense_data)); bcopy(cm->cm_sense, &csio->sense_data, sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } /* * Check if this is an INQUIRY command. If it's a VPD inquiry, * and it's page code 0 (Supported Page List), and there is * inquiry data, and this is for a sequential access device, and * the device is an SSP target, and TLR is supported by the * controller, turn the TLR_bits value ON if page 0x90 is * supported. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && (csio->data_ptr != NULL) && ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && (sc->control_TLR) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { vpd_list = (struct scsi_vpd_supported_page_list *) csio->data_ptr; TLR_bits = &sc->mapping_table[target_id].TLR_bits; *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + csio->cdb_io.cdb_bytes[4]; alloc_len -= csio->resid; for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { if (vpd_list->list[i] == 0x90) { *TLR_bits = TLR_on; break; } } } /* * If this is a SATA direct-access end device, mark it so that * a SCSI StartStopUnit command will be sent to it when the * driver is being shutdown. */ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && (sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && ((sc->mapping_table[target_id].device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == MPI2_SAS_DEVICE_INFO_END_DEVICE)) { target = &sassc->targets[target_id]; target->supports_SSU = TRUE; mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n", target_id); } break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * If devinfo is 0 this will be a volume. In that case don't * tell CAM that the volume is not there. We want volumes to * be enumerated until they are deleted/removed, not just * failed. */ if (cm->cm_targ->devinfo == 0) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); break; case MPI2_IOCSTATUS_INVALID_SGL: mpr_print_scsiio_cmd(sc, cm); mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: /* * This is one of the responses that comes back when an I/O * has been aborted. If it is because of a timeout that we * initiated, just set the status to CAM_CMD_TIMEOUT. * Otherwise set it to CAM_REQ_ABORTED. The effect on the * command is the same (it gets retried, subject to the * retry counter), the only difference is what gets printed * on the console. */ if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); else mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: /* resid is ignored for this condition */ csio->resid = 0; mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: /* * These can sometimes be transient transport-related * errors, and sometimes persistent drive-related errors. * We used to retry these without decrementing the retry * count by returning CAM_REQUEUE_REQ. Unfortunately, if * we hit a persistent drive problem that returns one of * these error codes, we would retry indefinitely. So, * return CAM_REQ_CMP_ERROR so that we decrement the retry * count and avoid infinite retries. We're taking the * potential risk of flagging false failures in the event * of a topology-related error (e.g. a SAS expander problem * causes a command addressed to a drive to fail), but * avoiding getting into an infinite retry loop. */ mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); mprsas_log_command(cm, MPR_INFO, "terminated ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); break; case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_VPID: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: mprsas_log_command(cm, MPR_XINFO, "completed ioc %x scsi %x state %x xfer %u\n", le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, le32toh(rep->TransferCount)); csio->resid = cm->cm_length; mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); break; } mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); if (sassc->flags & MPRSAS_QUEUE_FROZEN) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; sassc->flags &= ~MPRSAS_QUEUE_FROZEN; mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " "queue\n"); } if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); } mpr_free_command(sc, cm); xpt_done(ccb); } #if __FreeBSD_version >= 900026 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_SMP_PASSTHROUGH_REPLY *rpl; MPI2_SMP_PASSTHROUGH_REQUEST *req; uint64_t sasaddr; union ccb *ccb; ccb = cm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and SMP * commands require two S/G elements only. That should be handled * in the standard request size. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP " "request!\n", __func__, cm->cm_flags); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; if (rpl == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; sasaddr = le32toh(req->SASAddress.Low); sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS || rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx " "completed successfully\n", __func__, (uintmax_t)sasaddr); if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); else mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); bailout: /* * We sync in both directions because we had DMAs in the S/G list * in both directions. */ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); mpr_free_command(sc, cm); xpt_done(ccb); } static void -mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, - uint64_t sasaddr) +mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr) { struct mpr_command *cm; uint8_t *request, *response; MPI2_SMP_PASSTHROUGH_REQUEST *req; struct mpr_softc *sc; struct sglist *sg; int error; sc = sassc->sc; sg = NULL; error = 0; #if (__FreeBSD_version >= 1000028) || \ ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) switch (ccb->ccb_h.flags & CAM_DATA_MASK) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: /* * XXX We don't yet support physical addresses here. */ mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " "supported\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; case CAM_DATA_SG: /* * The chip does not support more than one buffer for the * request or response. */ if ((ccb->smpio.smp_request_sglist_cnt > 1) || (ccb->smpio.smp_response_sglist_cnt > 1)) { - mpr_dprint(sc, MPR_ERROR, - "%s: multiple request or response buffer segments " - "not supported for SMP\n", __func__); + mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " + "response buffer segments not supported for SMP\n", + __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } /* * The CAM_SCATTER_VALID flag was originally implemented * for the XPT_SCSI_IO CCB, which only has one data pointer. * We have two. So, just take that flag to mean that we * might have S/G lists, and look at the S/G segment count * to figure out whether that is the case for each individual * buffer. */ if (ccb->smpio.smp_request_sglist_cnt != 0) { bus_dma_segment_t *req_sg; req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; } else request = ccb->smpio.smp_request; if (ccb->smpio.smp_response_sglist_cnt != 0) { bus_dma_segment_t *rsp_sg; rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; break; case CAM_DATA_VADDR: request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; break; default: mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } #else /* __FreeBSD_version < 1000028 */ /* * XXX We don't yet support physical addresses here. */ if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " "supported\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } /* * If the user wants to send an S/G list, check to make sure they * have single buffers. */ if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { /* * The chip does not support more than one buffer for the * request or response. */ if ((ccb->smpio.smp_request_sglist_cnt > 1) || (ccb->smpio.smp_response_sglist_cnt > 1)) { mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " "response buffer segments not supported for SMP\n", __func__); mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); xpt_done(ccb); return; } /* * The CAM_SCATTER_VALID flag was originally implemented * for the XPT_SCSI_IO CCB, which only has one data pointer. * We have two. So, just take that flag to mean that we * might have S/G lists, and look at the S/G segment count * to figure out whether that is the case for each individual * buffer. */ if (ccb->smpio.smp_request_sglist_cnt != 0) { bus_dma_segment_t *req_sg; req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; } else request = ccb->smpio.smp_request; if (ccb->smpio.smp_response_sglist_cnt != 0) { bus_dma_segment_t *rsp_sg; rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; } else response = ccb->smpio.smp_response; } else { request = ccb->smpio.smp_request; response = ccb->smpio.smp_response; } #endif /* __FreeBSD_version < 1000028 */ cm = mpr_alloc_command(sc); if (cm == NULL) { - mpr_dprint(sc, MPR_ERROR, - "%s: cannot allocate command\n", __func__); + mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n", + __func__); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; bzero(req, sizeof(*req)); req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; /* Allow the chip to use any route to this SAS address. */ req->PhysicalPort = 0xff; req->RequestDataLength = htole16(ccb->smpio.smp_request_len); req->SGLFlags = MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " "%#jx\n", __func__, (uintmax_t)sasaddr); mpr_init_sge(cm, req, &req->SGL); /* * Set up a uio to pass into mpr_map_command(). This allows us to * do one map command, and one busdma call in there. */ cm->cm_uio.uio_iov = cm->cm_iovec; cm->cm_uio.uio_iovcnt = 2; cm->cm_uio.uio_segflg = UIO_SYSSPACE; /* * The read/write flag isn't used by busdma, but set it just in * case. This isn't exactly accurate, either, since we're going in * both directions. */ cm->cm_uio.uio_rw = UIO_WRITE; cm->cm_iovec[0].iov_base = request; cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); cm->cm_iovec[1].iov_base = response; cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + cm->cm_iovec[1].iov_len; /* * Trigger a warning message in mpr_data_cb() for the user if we * wind up exceeding two S/G segments. The chip expects one * segment for the request and another for the response. */ cm->cm_max_segs = 2; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mprsas_smpio_complete; cm->cm_complete_data = ccb; /* * Tell the mapping code that we're using a uio, and that this is * an SMP passthrough request. There is a little special-case * logic there (in mpr_data_cb()) to handle the bidirectional * transfer. */ cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; /* The chip data format is little endian. */ req->SASAddress.High = htole32(sasaddr >> 32); req->SASAddress.Low = htole32(sasaddr); /* * XXX Note that we don't have a timeout/abort mechanism here. * From the manual, it looks like task management requests only * work for SCSI IO and SATA passthrough requests. We may need to * have a mechanism to retry requests in the event of a chip reset * at least. Hopefully the chip will insure that any errors short * of that are relayed back to the driver. */ error = mpr_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) { mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " "mpr_map_command()\n", __func__, error); goto bailout_error; } return; bailout_error: mpr_free_command(sc, cm); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) { struct mpr_softc *sc; struct mprsas_target *targ; uint64_t sasaddr = 0; sc = sassc->sc; /* * Make sure the target exists. */ KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); targ = &sassc->targets[ccb->ccb_h.target_id]; if (targ->handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", __func__, ccb->ccb_h.target_id); mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); xpt_done(ccb); return; } /* * If this device has an embedded SMP target, we'll talk to it * directly. * figure out what the expander's address is. */ if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) sasaddr = targ->sasaddr; /* * If we don't have a SAS address for the expander yet, try * grabbing it from the page 0x83 information cached in the * transport layer for this target. LSI expanders report the * expander SAS address as the port-associated SAS address in * Inquiry VPD page 0x83. Maxim expanders don't report it in page * 0x83. * * XXX KDM disable this for now, but leave it commented out so that * it is obvious that this is another possible way to get the SAS * address. * * The parent handle method below is a little more reliable, and * the other benefit is that it works for devices other than SES * devices. So you can send a SMP request to a da(4) device and it * will get routed to the expander that device is attached to. * (Assuming the da(4) device doesn't contain an SMP target...) */ #if 0 if (sasaddr == 0) sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); #endif /* * If we still don't have a SAS address for the expander, look for * the parent device of this device, which is probably the expander. */ if (sasaddr == 0) { #ifdef OLD_MPR_PROBE struct mprsas_target *parent_target; #endif if (targ->parent_handle == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " "a valid parent handle!\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } #ifdef OLD_MPR_PROBE parent_target = mprsas_find_target_by_handle(sassc, 0, targ->parent_handle); if (parent_target == NULL) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " "a valid parent target!\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if ((parent_target->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " "does not have an SMP target!\n", __func__, targ->handle, parent_target->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = parent_target->sasaddr; #else /* OLD_MPR_PROBE */ if ((targ->parent_devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " "does not have an SMP target!\n", __func__, targ->handle, targ->parent_handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } if (targ->parent_sasaddr == 0x0) { mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " "%d does not have a valid SAS address!\n", __func__, targ->handle, targ->parent_handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } sasaddr = targ->parent_sasaddr; #endif /* OLD_MPR_PROBE */ } if (sasaddr == 0) { mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " "handle %d\n", __func__, targ->handle); mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); goto bailout; } mprsas_send_smpcmd(sassc, ccb, sasaddr); return; bailout: xpt_done(ccb); } #endif //__FreeBSD_version >= 900026 static void mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; struct mpr_softc *sc; struct mpr_command *tm; struct mprsas_target *targ; MPR_FUNCTRACE(sassc->sc); mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); - KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, - ("Target %d out of bounds in XPT_RESET_DEV\n", - ccb->ccb_h.target_id)); + KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of " + "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id)); sc = sassc->sc; tm = mpr_alloc_command(sc); if (tm == NULL) { - mpr_dprint(sc, MPR_ERROR, - "command alloc failure in mprsas_action_resetdev\n"); + mpr_dprint(sc, MPR_ERROR, "command alloc failure in " + "mprsas_action_resetdev\n"); mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); xpt_done(ccb); return; } targ = &sassc->targets[ccb->ccb_h.target_id]; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; req->DevHandle = htole16(targ->handle); req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; /* SAS Hard Link Reset / SATA Link Reset */ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; tm->cm_data = NULL; tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; tm->cm_complete = mprsas_resetdev_complete; tm->cm_complete_data = ccb; mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", __func__, targ->tid); tm->cm_targ = targ; targ->flags |= MPRSAS_TARGET_INRESET; mpr_map_command(sc, tm); } static void mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) { MPI2_SCSI_TASK_MANAGE_REPLY *resp; union ccb *ccb; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; ccb = tm->cm_complete_data; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * task management commands don't have S/G lists. */ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { MPI2_SCSI_TASK_MANAGE_REQUEST *req; req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " "handle %#04x! This should not happen!\n", __func__, tm->cm_flags, req->DevHandle); mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); goto bailout; } - mpr_dprint(sc, MPR_XINFO, - "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, - le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); + mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", + __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, CAM_LUN_WILDCARD); } else mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); bailout: mprsas_free_tm(sc, tm); xpt_done(ccb); } static void mprsas_poll(struct cam_sim *sim) { struct mprsas_softc *sassc; sassc = cam_sim_softc(sim); if (sassc->sc->mpr_debug & MPR_TRACE) { /* frequent debug messages during a panic just slow * everything down too much. */ mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n", __func__); sassc->sc->mpr_debug &= ~MPR_TRACE; } mpr_intr_locked(sassc->sc); } static void mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct mpr_softc *sc; sc = (struct mpr_softc *)callback_arg; switch (code) { #if (__FreeBSD_version >= 1000006) || \ ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) case AC_ADVINFO_CHANGED: { struct mprsas_target *target; struct mprsas_softc *sassc; struct scsi_read_capacity_data_long rcap_buf; struct ccb_dev_advinfo cdai; struct mprsas_lun *lun; lun_id_t lunid; int found_lun; uintptr_t buftype; buftype = (uintptr_t)arg; found_lun = 0; sassc = sc->sassc; /* * We're only interested in read capacity data changes. */ if (buftype != CDAI_TYPE_RCAPLONG) break; /* * See the comment in mpr_attach_sas() for a detailed * explanation. In these versions of FreeBSD we register * for all events and filter out the events that don't * apply to us. */ #if (__FreeBSD_version < 1000703) || \ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) if (xpt_path_path_id(path) != sassc->sim->path_id) break; #endif /* * We should have a handle for this, but check to make sure. */ KASSERT(xpt_path_target_id(path) < sassc->maxtargets, ("Target %d out of bounds in mprsas_async\n", xpt_path_target_id(path))); target = &sassc->targets[xpt_path_target_id(path)]; if (target->handle == 0) break; lunid = xpt_path_lun_id(path); SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = 1; break; } } if (found_lun == 0) { lun = malloc(sizeof(struct mprsas_lun), M_MPR, M_NOWAIT | M_ZERO); if (lun == NULL) { mpr_dprint(sc, MPR_ERROR, "Unable to alloc " "LUN for EEDP support.\n"); break; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } bzero(&rcap_buf, sizeof(rcap_buf)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.ccb_h.flags = CAM_DIR_IN; cdai.buftype = CDAI_TYPE_RCAPLONG; #if (__FreeBSD_version >= 1100061) || \ ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) cdai.flags = CDAI_FLAG_NONE; #else cdai.flags = 0; #endif cdai.bufsiz = sizeof(rcap_buf); cdai.buf = (uint8_t *)&rcap_buf; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) && (rcap_buf.prot & SRC16_PROT_EN)) { lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf.length); } else { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; } break; } #endif case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; /* * See the comment in mpr_attach_sas() for a detailed * explanation. In these versions of FreeBSD we register * for all events and filter out the events that don't * apply to us. */ #if (__FreeBSD_version < 1000703) || \ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) if (xpt_path_path_id(path) != sc->sassc->sim->path_id) break; #endif cgd = arg; #if (__FreeBSD_version < 901503) || \ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) mprsas_check_eedp(sc, path, cgd); #endif break; } default: break; } } #if (__FreeBSD_version < 901503) || \ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, struct ccb_getdev *cgd) { struct mprsas_softc *sassc = sc->sassc; struct ccb_scsiio *csio; struct scsi_read_capacity_16 *scsi_cmd; struct scsi_read_capacity_eedp *rcap_buf; path_id_t pathid; target_id_t targetid; lun_id_t lunid; union ccb *ccb; struct cam_path *local_path; struct mprsas_target *target; struct mprsas_lun *lun; uint8_t found_lun; char path_str[64]; pathid = cam_sim_path(sassc->sim); targetid = xpt_path_target_id(path); lunid = xpt_path_lun_id(path); - KASSERT(targetid < sassc->maxtargets, - ("Target %d out of bounds in mprsas_check_eedp\n", targetid)); + KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in " + "mprsas_check_eedp\n", targetid)); target = &sassc->targets[targetid]; if (target->handle == 0x0) return; /* * Determine if the device is EEDP capable. * * If this flag is set in the inquiry data, the device supports * protection information, and must support the 16 byte read capacity - * command, otherwise continue without sending read cap 16 + * command, otherwise continue without sending read cap 16. */ if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) return; /* * Issue a READ CAPACITY 16 command. This info is used to determine if * the LUN is formatted for EEDP support. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " "support.\n"); return; } - if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) - != CAM_REQ_CMP) { + if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) != + CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " - "support\n"); + "support.\n"); xpt_free_ccb(ccb); return; } /* * If LUN is already in list, don't create a new one. */ found_lun = FALSE; SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id == lunid) { found_lun = TRUE; break; } } if (!found_lun) { lun = malloc(sizeof(struct mprsas_lun), M_MPR, M_NOWAIT | M_ZERO); if (lun == NULL) { mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " "EEDP support.\n"); xpt_free_path(local_path); xpt_free_ccb(ccb); return; } lun->lun_id = lunid; SLIST_INSERT_HEAD(&target->luns, lun, lun_link); } xpt_path_string(local_path, path_str, sizeof(path_str)); mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", path_str, target->handle); /* * Issue a READ CAPACITY 16 command for the LUN. The * mprsas_read_cap_done function will load the read cap info into the * LUN struct. */ rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, M_NOWAIT | M_ZERO); if (rcap_buf == NULL) { mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity " "buffer for EEDP support.\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); csio = &ccb->csio; csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = CAM_DIR_IN; csio->ccb_h.retry_count = 4; csio->ccb_h.cbfcnp = mprsas_read_cap_done; csio->ccb_h.timeout = 60000; csio->data_ptr = (uint8_t *)rcap_buf; csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); csio->sense_len = MPR_SENSE_LEN; csio->cdb_len = sizeof(*scsi_cmd); csio->tag_action = MSG_SIMPLE_Q_TAG; scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = 0x9E; scsi_cmd->service_action = SRC16_SERVICE_ACTION; ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); ccb->ccb_h.ppriv_ptr1 = sassc; xpt_action(ccb); } static void mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) { struct mprsas_softc *sassc; struct mprsas_target *target; struct mprsas_lun *lun; struct scsi_read_capacity_eedp *rcap_buf; if (done_ccb == NULL) return; /* Driver need to release devq, it Scsi command is * generated by driver internally. * Currently there is a single place where driver * calls scsi command internally. In future if driver * calls more scsi command internally, it needs to release * devq internally, since those command will not go back to * cam_periph. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; xpt_release_devq(done_ccb->ccb_h.path, /*count*/ 1, /*run_queue*/TRUE); } rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; /* * Get the LUN ID for the path and look it up in the LUN list for the * target. */ sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; - KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, - ("Target %d out of bounds in mprsas_read_cap_done\n", - done_ccb->ccb_h.target_id)); + KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out " + "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id)); target = &sassc->targets[done_ccb->ccb_h.target_id]; SLIST_FOREACH(lun, &target->luns, lun_link) { if (lun->lun_id != done_ccb->ccb_h.target_lun) continue; /* * Got the LUN in the target's LUN list. Fill it in with EEDP * info. If the READ CAP 16 command had some SCSI error (common * if command is not supported), mark the lun as not supporting * EEDP and set the block size to 0. */ if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { lun->eedp_formatted = FALSE; lun->eedp_block_size = 0; break; } if (rcap_buf->protect & 0x01) { mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID " "%d is formatted for EEDP support.\n", done_ccb->ccb_h.target_lun, done_ccb->ccb_h.target_id); lun->eedp_formatted = TRUE; lun->eedp_block_size = scsi_4btoul(rcap_buf->length); } break; } // Finished with this CCB and path. free(rcap_buf, M_MPR); xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } #endif /* (__FreeBSD_version < 901503) || \ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ void mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, struct mprsas_target *target, lun_id_t lun_id) { union ccb *ccb; path_id_t path_id; /* * Set the INRESET flag for this target so that no I/O will be sent to * the target until the reset has completed. If an I/O request does * happen, the devq will be frozen. The CCB holds the path which is * used to release the devq. The devq is released and the CCB is freed * when the TM completes. */ ccb = xpt_alloc_ccb_nowait(); if (ccb) { path_id = cam_sim_path(sc->sassc->sim); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, target->tid, lun_id) != CAM_REQ_CMP) { xpt_free_ccb(ccb); } else { tm->cm_ccb = ccb; tm->cm_targ = target; target->flags |= MPRSAS_TARGET_INRESET; } } } int mprsas_startup(struct mpr_softc *sc) { /* * Send the port enable message and set the wait_for_port_enable flag. * This flag helps to keep the simq frozen until all discovery events * are processed. */ sc->wait_for_port_enable = 1; mprsas_send_portenable(sc); return (0); } static int mprsas_send_portenable(struct mpr_softc *sc) { MPI2_PORT_ENABLE_REQUEST *request; struct mpr_command *cm; MPR_FUNCTRACE(sc); if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; request->Function = MPI2_FUNCTION_PORT_ENABLE; request->MsgFlags = 0; request->VP_ID = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete = mprsas_portenable_complete; cm->cm_data = NULL; cm->cm_sge = NULL; mpr_map_command(sc, cm); mpr_dprint(sc, MPR_XINFO, "mpr_send_portenable finished cm %p req %p complete %p\n", cm, cm->cm_req, cm->cm_complete); return (0); } static void mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_PORT_ENABLE_REPLY *reply; struct mprsas_softc *sassc; MPR_FUNCTRACE(sc); sassc = sc->sassc; /* * Currently there should be no way we can hit this case. It only * happens when we have a failure to allocate chain frames, and * port enable commands don't have S/G lists. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " "This should not happen!\n", __func__, cm->cm_flags); } reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; if (reply == NULL) mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); mpr_free_command(sc, cm); if (sc->mpr_ich.ich_arg != NULL) { mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n"); config_intrhook_disestablish(&sc->mpr_ich); sc->mpr_ich.ich_arg = NULL; } /* * Done waiting for port enable to complete. Decrement the refcount. * If refcount is 0, discovery is complete and a rescan of the bus can * take place. */ sc->wait_for_port_enable = 0; sc->port_enable_complete = 1; wakeup(&sc->port_enable_complete); mprsas_startup_decrement(sassc); } int mprsas_check_id(struct mprsas_softc *sassc, int id) { struct mpr_softc *sc = sassc->sc; char *ids; char *name; ids = &sc->exclude_ids[0]; while((name = strsep(&ids, ",")) != NULL) { if (name[0] == '\0') continue; if (strtol(name, NULL, 0) == (long)id) return (1); } return (0); } void mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets) { struct mprsas_softc *sassc; struct mprsas_lun *lun, *lun_tmp; struct mprsas_target *targ; int i; sassc = sc->sassc; /* * The number of targets is based on IOC Facts, so free all of * the allocated LUNs for each target and then the target buffer * itself. */ for (i=0; i< maxtargets; i++) { targ = &sassc->targets[i]; SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { free(lun, M_MPR); } } free(sassc->targets, M_MPR); sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets, M_MPR, M_WAITOK|M_ZERO); if (!sassc->targets) { panic("%s failed to alloc targets with error %d\n", __func__, ENOMEM); } } Index: head/sys/dev/mpr/mpr_sas.h =================================================================== --- head/sys/dev/mpr/mpr_sas.h (revision 299264) +++ head/sys/dev/mpr/mpr_sas.h (revision 299265) @@ -1,179 +1,179 @@ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ struct mpr_fw_event_work; struct mprsas_lun { SLIST_ENTRY(mprsas_lun) lun_link; lun_id_t lun_id; uint8_t eedp_formatted; uint32_t eedp_block_size; }; struct mprsas_target { uint16_t handle; uint8_t linkrate; uint8_t encl_level_valid; uint8_t encl_level; char connector_name[4]; uint64_t devname; uint32_t devinfo; uint16_t encl_handle; uint16_t encl_slot; uint8_t flags; #define MPRSAS_TARGET_INABORT (1 << 0) #define MPRSAS_TARGET_INRESET (1 << 1) #define MPRSAS_TARGET_INDIAGRESET (1 << 2) #define MPRSAS_TARGET_INREMOVAL (1 << 3) #define MPR_TARGET_FLAGS_RAID_COMPONENT (1 << 4) #define MPR_TARGET_FLAGS_VOLUME (1 << 5) #define MPR_TARGET_IS_SATA_SSD (1 << 6) #define MPRSAS_TARGET_INRECOVERY (MPRSAS_TARGET_INABORT | \ MPRSAS_TARGET_INRESET | MPRSAS_TARGET_INCHIPRESET) uint16_t tid; SLIST_HEAD(, mprsas_lun) luns; TAILQ_HEAD(, mpr_command) commands; struct mpr_command *tm; TAILQ_HEAD(, mpr_command) timedout_commands; uint16_t exp_dev_handle; uint16_t phy_num; uint64_t sasaddr; uint16_t parent_handle; uint64_t parent_sasaddr; uint32_t parent_devinfo; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; TAILQ_ENTRY(mprsas_target) sysctl_link; uint64_t issued; uint64_t completed; unsigned int outstanding; unsigned int timeouts; unsigned int aborts; unsigned int logical_unit_resets; unsigned int target_resets; uint8_t scsi_req_desc_type; uint8_t stop_at_shutdown; uint8_t supports_SSU; }; struct mprsas_softc { struct mpr_softc *sc; u_int flags; #define MPRSAS_IN_DISCOVERY (1 << 0) #define MPRSAS_IN_STARTUP (1 << 1) #define MPRSAS_DISCOVERY_TIMEOUT_PENDING (1 << 2) #define MPRSAS_QUEUE_FROZEN (1 << 3) #define MPRSAS_SHUTDOWN (1 << 4) u_int maxtargets; struct mprsas_target *targets; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct intr_config_hook sas_ich; struct callout discovery_callout; struct mpr_event_handle *mprsas_eh; u_int startup_refcount; struct proc *sysctl_proc; struct taskqueue *ev_tq; struct task ev_task; TAILQ_HEAD(, mpr_fw_event_work) ev_queue; }; MALLOC_DECLARE(M_MPRSAS); /* * Abstracted so that the driver can be backwards and forwards compatible * with future versions of CAM that will provide this functionality. */ #define MPR_SET_LUN(lun, ccblun) \ mprsas_set_lun(lun, ccblun) static __inline int mprsas_set_lun(uint8_t *lun, u_int ccblun) { uint64_t *newlun; newlun = (uint64_t *)lun; *newlun = 0; if (ccblun <= 0xff) { /* Peripheral device address method, LUN is 0 to 255 */ lun[1] = ccblun; } else if (ccblun <= 0x3fff) { /* Flat space address method, LUN is <= 16383 */ scsi_ulto2b(ccblun, lun); lun[0] |= 0x40; } else if (ccblun <= 0xffffff) { /* Extended flat space address method, LUN is <= 16777215 */ scsi_ulto3b(ccblun, &lun[1]); /* Extended Flat space address method */ lun[0] = 0xc0; /* Length = 1, i.e. LUN is 3 bytes long */ lun[0] |= 0x10; /* Extended Address Method */ lun[0] |= 0x02; } else { return (EINVAL); } return (0); } static __inline void mprsas_set_ccbstatus(union ccb *ccb, int status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline int mprsas_get_ccbstatus(union ccb *ccb) { return (ccb->ccb_h.status & CAM_STATUS_MASK); } #define MPR_SET_SINGLE_LUN(req, lun) \ do { \ bzero((req)->LUN, 8); \ (req)->LUN[1] = lun; \ } while(0) void mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ); void mprsas_discovery_end(struct mprsas_softc *sassc); void mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, struct mprsas_target *target, lun_id_t lun_id); void mprsas_startup_increment(struct mprsas_softc *sassc); void mprsas_startup_decrement(struct mprsas_softc *sassc); void mprsas_firmware_event_work(void *arg, int pending); int mprsas_check_id(struct mprsas_softc *sassc, int id); Index: head/sys/dev/mpr/mpr_sas_lsi.c =================================================================== --- head/sys/dev/mpr/mpr_sas_lsi.c (revision 299264) +++ head/sys/dev/mpr/mpr_sas_lsi.c (revision 299265) @@ -1,1394 +1,1392 @@ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD */ #include __FBSDID("$FreeBSD$"); -/* Communications core for LSI MPT3 */ +/* Communications core for Avago Technologies (LSI) MPT3 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* For Hashed SAS Address creation for SATA Drives */ #define MPT2SAS_SN_LEN 20 #define MPT2SAS_MN_LEN 40 struct mpr_fw_event_work { u16 event; void *event_data; TAILQ_ENTRY(mpr_fw_event_work) ev_link; }; union _sata_sas_address { u8 wwid[8]; struct { u32 high; u32 low; } word; }; /* * define the IDENTIFY DEVICE structure */ struct _ata_identify_device_data { u16 reserved1[10]; /* 0-9 */ u16 serial_number[10]; /* 10-19 */ u16 reserved2[7]; /* 20-26 */ u16 model_number[20]; /* 27-46*/ u16 reserved3[170]; /* 47-216 */ u16 rotational_speed; /* 217 */ u16 reserved4[38]; /* 218-255 */ }; static u32 event_count; static void mprsas_fw_work(struct mpr_softc *sc, struct mpr_fw_event_work *fw_event); static void mprsas_fw_event_free(struct mpr_softc *, struct mpr_fw_event_work *); static int mprsas_add_device(struct mpr_softc *sc, u16 handle, u8 linkrate); static int mprsas_get_sata_identify(struct mpr_softc *sc, u16 handle, Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz, u32 devinfo); static void mprsas_ata_id_timeout(void *data); int mprsas_get_sas_address_for_sata_disk(struct mpr_softc *sc, u64 *sas_address, u16 handle, u32 device_info, u8 *is_SATA_SSD); static int mprsas_volume_add(struct mpr_softc *sc, u16 handle); static void mprsas_SSU_to_SATA_devices(struct mpr_softc *sc); static void mprsas_stop_unit_done(struct cam_periph *periph, union ccb *done_ccb); void mprsas_evt_handler(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *event) { struct mpr_fw_event_work *fw_event; u16 sz; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); mpr_print_evt_sas(sc, event); mprsas_record_event(sc, event); fw_event = malloc(sizeof(struct mpr_fw_event_work), M_MPR, M_ZERO|M_NOWAIT); if (!fw_event) { printf("%s: allocate failed for fw_event\n", __func__); return; } sz = le16toh(event->EventDataLength) * 4; fw_event->event_data = malloc(sz, M_MPR, M_ZERO|M_NOWAIT); if (!fw_event->event_data) { printf("%s: allocate failed for event_data\n", __func__); free(fw_event, M_MPR); return; } bcopy(event->EventData, fw_event->event_data, sz); fw_event->event = event->Event; if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || event->Event == MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE || event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) && sc->track_mapping_events) sc->pending_map_events++; /* * When wait_for_port_enable flag is set, make sure that all the events * are processed. Increment the startup_refcount and decrement it after * events are processed. */ if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) && sc->wait_for_port_enable) mprsas_startup_increment(sc->sassc); TAILQ_INSERT_TAIL(&sc->sassc->ev_queue, fw_event, ev_link); taskqueue_enqueue(sc->sassc->ev_tq, &sc->sassc->ev_task); } static void mprsas_fw_event_free(struct mpr_softc *sc, struct mpr_fw_event_work *fw_event) { free(fw_event->event_data, M_MPR); free(fw_event, M_MPR); } /** * _mpr_fw_work - delayed task for processing firmware events * @sc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void mprsas_fw_work(struct mpr_softc *sc, struct mpr_fw_event_work *fw_event) { struct mprsas_softc *sassc; sassc = sc->sassc; mpr_dprint(sc, MPR_EVENT, "(%d)->(%s) Working on Event: [%x]\n", event_count++, __func__, fw_event->event); switch (fw_event->event) { case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: { MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data; MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy; int i; data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *) fw_event->event_data; mpr_mapping_topology_change_event(sc, fw_event->event_data); for (i = 0; i < data->NumEntries; i++) { phy = &data->PHY[i]; switch (phy->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK) { case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: if (mprsas_add_device(sc, le16toh(phy->AttachedDevHandle), phy->LinkRate)) { printf("%s: failed to add device with " "handle 0x%x\n", __func__, le16toh(phy->AttachedDevHandle)); mprsas_prepare_remove(sassc, le16toh( phy->AttachedDevHandle)); } break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: mprsas_prepare_remove(sassc, le16toh( phy->AttachedDevHandle)); break; case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: default: break; } } /* * refcount was incremented for this event in * mprsas_evt_handler. Decrement it here because the event has * been processed. */ mprsas_startup_decrement(sassc); break; } case MPI2_EVENT_SAS_DISCOVERY: { MPI2_EVENT_DATA_SAS_DISCOVERY *data; data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)fw_event->event_data; if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_STARTED) - mpr_dprint(sc, MPR_TRACE,"SAS discovery start " - "event\n"); + mpr_dprint(sc, MPR_TRACE,"SAS discovery start event\n"); if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_COMPLETED) { mpr_dprint(sc, MPR_TRACE,"SAS discovery stop event\n"); sassc->flags &= ~MPRSAS_IN_DISCOVERY; mprsas_discovery_end(sassc); } break; } case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: { Mpi2EventDataSasEnclDevStatusChange_t *data; data = (Mpi2EventDataSasEnclDevStatusChange_t *) fw_event->event_data; mpr_mapping_enclosure_dev_status_change_event(sc, fw_event->event_data); break; } case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: { Mpi2EventIrConfigElement_t *element; int i; u8 foreign_config, reason; u16 elementType; Mpi2EventDataIrConfigChangeList_t *event_data; struct mprsas_target *targ; unsigned int id; event_data = fw_event->event_data; foreign_config = (le32toh(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; id = mpr_mapping_get_raid_id_from_handle(sc, element->VolDevHandle); mpr_mapping_ir_config_change_event(sc, event_data); for (i = 0; i < event_data->NumElements; i++, element++) { reason = element->ReasonCode; elementType = le16toh(element->ElementFlags) & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; /* * check for element type of Phys Disk or Hot Spare */ if ((elementType != MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT) && (elementType != MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT)) // do next element goto skip_fp_send; /* * check for reason of Hide, Unhide, PD Created, or PD * Deleted */ if ((reason != MPI2_EVENT_IR_CHANGE_RC_HIDE) && (reason != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) && (reason != MPI2_EVENT_IR_CHANGE_RC_PD_CREATED) && (reason != MPI2_EVENT_IR_CHANGE_RC_PD_DELETED)) goto skip_fp_send; // check for a reason of Hide or PD Created if ((reason == MPI2_EVENT_IR_CHANGE_RC_HIDE) || (reason == MPI2_EVENT_IR_CHANGE_RC_PD_CREATED)) { // build RAID Action message Mpi2RaidActionRequest_t *action; Mpi2RaidActionReply_t *reply; struct mpr_command *cm; int error = 0; if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed\n", __func__); return; } mpr_dprint(sc, MPR_EVENT, "Sending FP action " "from " "MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST " ":\n"); action = (MPI2_RAID_ACTION_REQUEST *)cm->cm_req; action->Function = MPI2_FUNCTION_RAID_ACTION; action->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; action->PhysDiskNum = element->PhysDiskNum; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; error = mpr_request_polled(sc, cm); reply = (Mpi2RaidActionReply_t *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the poll returns error then we * need to do diag reset */ printf("%s: poll for page completed " "with error %d", __func__, error); } if (reply && (le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { mpr_dprint(sc, MPR_ERROR, "%s: error " "sending RaidActionPage; " "iocstatus = 0x%x\n", __func__, le16toh(reply->IOCStatus)); } if (cm) mpr_free_command(sc, cm); } skip_fp_send: mpr_dprint(sc, MPR_EVENT, "Received " "MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST Reason " "code %x:\n", element->ReasonCode); switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: case MPI2_EVENT_IR_CHANGE_RC_ADDED: if (!foreign_config) { if (mprsas_volume_add(sc, le16toh(element->VolDevHandle))) { printf("%s: failed to add RAID " "volume with handle 0x%x\n", __func__, le16toh(element-> VolDevHandle)); } } break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: case MPI2_EVENT_IR_CHANGE_RC_REMOVED: /* * Rescan after volume is deleted or removed. */ if (!foreign_config) { if (id == MPR_MAP_BAD_ID) { printf("%s: could not get ID " "for volume with handle " "0x%04x\n", __func__, le16toh(element-> VolDevHandle)); break; } targ = &sassc->targets[id]; targ->handle = 0x0; targ->encl_slot = 0x0; targ->encl_handle = 0x0; targ->encl_level_valid = 0x0; targ->encl_level = 0x0; targ->connector_name[0] = ' '; targ->connector_name[1] = ' '; targ->connector_name[2] = ' '; targ->connector_name[3] = ' '; targ->exp_dev_handle = 0x0; targ->phy_num = 0x0; targ->linkrate = 0x0; mprsas_rescan_target(sc, targ); printf("RAID target id 0x%x removed\n", targ->tid); } break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: case MPI2_EVENT_IR_CHANGE_RC_HIDE: /* * Phys Disk of a volume has been created. Hide * it from the OS. */ targ = mprsas_find_target_by_handle(sassc, 0, element->PhysDiskDevHandle); if (targ == NULL) break; targ->flags |= MPR_TARGET_FLAGS_RAID_COMPONENT; mprsas_rescan_target(sc, targ); break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: /* * Phys Disk of a volume has been deleted. * Expose it to the OS. */ if (mprsas_add_device(sc, le16toh(element->PhysDiskDevHandle), 0)) { printf("%s: failed to add device with " "handle 0x%x\n", __func__, le16toh(element-> PhysDiskDevHandle)); mprsas_prepare_remove(sassc, le16toh(element-> PhysDiskDevHandle)); } break; } } /* * refcount was incremented for this event in * mprsas_evt_handler. Decrement it here because the event has * been processed. */ mprsas_startup_decrement(sassc); break; } case MPI2_EVENT_IR_VOLUME: { Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; /* * Informational only. */ mpr_dprint(sc, MPR_EVENT, "Received IR Volume event:\n"); switch (event_data->ReasonCode) { case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED: mpr_dprint(sc, MPR_EVENT, " Volume Settings " "changed from 0x%x to 0x%x for Volome with " "handle 0x%x", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), le16toh(event_data->VolDevHandle)); break; case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED: mpr_dprint(sc, MPR_EVENT, " Volume Status " "changed from 0x%x to 0x%x for Volome with " "handle 0x%x", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), le16toh(event_data->VolDevHandle)); break; case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED: mpr_dprint(sc, MPR_EVENT, " Volume State " "changed from 0x%x to 0x%x for Volome with " "handle 0x%x", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), le16toh(event_data->VolDevHandle)); u32 state; struct mprsas_target *targ; state = le32toh(event_data->NewValue); switch (state) { case MPI2_RAID_VOL_STATE_MISSING: case MPI2_RAID_VOL_STATE_FAILED: mprsas_prepare_volume_remove(sassc, event_data->VolDevHandle); break; case MPI2_RAID_VOL_STATE_ONLINE: case MPI2_RAID_VOL_STATE_DEGRADED: case MPI2_RAID_VOL_STATE_OPTIMAL: targ = mprsas_find_target_by_handle(sassc, 0, event_data->VolDevHandle); if (targ) { printf("%s %d: Volume handle " "0x%x is already added \n", __func__, __LINE__, event_data->VolDevHandle); break; } if (mprsas_volume_add(sc, le16toh(event_data-> VolDevHandle))) { printf("%s: failed to add RAID " "volume with handle 0x%x\n", __func__, le16toh( event_data->VolDevHandle)); } break; default: break; } break; default: break; } break; } case MPI2_EVENT_IR_PHYSICAL_DISK: { Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; struct mprsas_target *targ; /* * Informational only. */ mpr_dprint(sc, MPR_EVENT, "Received IR Phys Disk event:\n"); switch (event_data->ReasonCode) { case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED: mpr_dprint(sc, MPR_EVENT, " Phys Disk Settings " "changed from 0x%x to 0x%x for Phys Disk Number " "%d and handle 0x%x at Enclosure handle 0x%x, Slot " "%d", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), event_data->PhysDiskNum, le16toh(event_data->PhysDiskDevHandle), le16toh(event_data->EnclosureHandle), le16toh(event_data->Slot)); break; case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED: mpr_dprint(sc, MPR_EVENT, " Phys Disk Status changed " "from 0x%x to 0x%x for Phys Disk Number %d and " "handle 0x%x at Enclosure handle 0x%x, Slot %d", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), event_data->PhysDiskNum, le16toh(event_data->PhysDiskDevHandle), le16toh(event_data->EnclosureHandle), le16toh(event_data->Slot)); break; case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED: mpr_dprint(sc, MPR_EVENT, " Phys Disk State changed " "from 0x%x to 0x%x for Phys Disk Number %d and " "handle 0x%x at Enclosure handle 0x%x, Slot %d", le32toh(event_data->PreviousValue), le32toh(event_data->NewValue), event_data->PhysDiskNum, le16toh(event_data->PhysDiskDevHandle), le16toh(event_data->EnclosureHandle), le16toh(event_data->Slot)); switch (event_data->NewValue) { case MPI2_RAID_PD_STATE_ONLINE: case MPI2_RAID_PD_STATE_DEGRADED: case MPI2_RAID_PD_STATE_REBUILDING: case MPI2_RAID_PD_STATE_OPTIMAL: case MPI2_RAID_PD_STATE_HOT_SPARE: targ = mprsas_find_target_by_handle( sassc, 0, event_data->PhysDiskDevHandle); if (targ) { targ->flags |= MPR_TARGET_FLAGS_RAID_COMPONENT; printf("%s %d: Found Target " "for handle 0x%x.\n", __func__, __LINE__ , event_data-> PhysDiskDevHandle); } break; case MPI2_RAID_PD_STATE_OFFLINE: case MPI2_RAID_PD_STATE_NOT_CONFIGURED: case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: default: targ = mprsas_find_target_by_handle( sassc, 0, event_data->PhysDiskDevHandle); if (targ) { targ->flags |= ~MPR_TARGET_FLAGS_RAID_COMPONENT; printf("%s %d: Found Target " "for handle 0x%x. \n", __func__, __LINE__ , event_data-> PhysDiskDevHandle); } break; } default: break; } break; } case MPI2_EVENT_IR_OPERATION_STATUS: { Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; /* * Informational only. */ mpr_dprint(sc, MPR_EVENT, "Received IR Op Status event:\n"); mpr_dprint(sc, MPR_EVENT, " RAID Operation of %d is %d " "percent complete for Volume with handle 0x%x", event_data->RAIDOperation, event_data->PercentComplete, le16toh(event_data->VolDevHandle)); break; } case MPI2_EVENT_TEMP_THRESHOLD: { pMpi2EventDataTemperature_t temp_event; temp_event = (pMpi2EventDataTemperature_t)fw_event->event_data; /* * The Temp Sensor Count must be greater than the event's Sensor * Num to be valid. If valid, print the temp thresholds that * have been exceeded. */ if (sc->iounit_pg8.NumSensors > temp_event->SensorNum) { mpr_dprint(sc, MPR_FAULT, "Temperature Threshold flags " "%s %s %s %s exceeded for Sensor: %d !!!\n", ((temp_event->Status & 0x01) == 1) ? "0 " : " ", ((temp_event->Status & 0x02) == 2) ? "1 " : " ", ((temp_event->Status & 0x04) == 4) ? "2 " : " ", ((temp_event->Status & 0x08) == 8) ? "3 " : " ", temp_event->SensorNum); mpr_dprint(sc, MPR_FAULT, "Current Temp in Celsius: " "%d\n", temp_event->CurrentTemperature); } break; } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: default: mpr_dprint(sc, MPR_TRACE,"Unhandled event 0x%0X\n", fw_event->event); break; } mpr_dprint(sc, MPR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count, __func__, fw_event->event); mprsas_fw_event_free(sc, fw_event); } void mprsas_firmware_event_work(void *arg, int pending) { struct mpr_fw_event_work *fw_event; struct mpr_softc *sc; sc = (struct mpr_softc *)arg; mpr_lock(sc); while ((fw_event = TAILQ_FIRST(&sc->sassc->ev_queue)) != NULL) { TAILQ_REMOVE(&sc->sassc->ev_queue, fw_event, ev_link); mprsas_fw_work(sc, fw_event); } mpr_unlock(sc); } static int mprsas_add_device(struct mpr_softc *sc, u16 handle, u8 linkrate){ char devstring[80]; struct mprsas_softc *sassc; struct mprsas_target *targ; Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t config_page; uint64_t sas_address, parent_sas_address = 0; u32 device_info, parent_devinfo = 0; unsigned int id; int ret = 1, error = 0, i; struct mprsas_lun *lun; u8 is_SATA_SSD = 0; struct mpr_command *cm; sassc = sc->sassc; mprsas_startup_increment(sassc); if ((mpr_config_get_sas_device_pg0(sc, &mpi_reply, &config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printf("%s: error reading SAS device page0\n", __func__); error = ENXIO; goto out; } device_info = le32toh(config_page.DeviceInfo); if (((device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) && (le16toh(config_page.ParentDevHandle) != 0)) { Mpi2ConfigReply_t tmp_mpi_reply; Mpi2SasDevicePage0_t parent_config_page; if ((mpr_config_get_sas_device_pg0(sc, &tmp_mpi_reply, &parent_config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, le16toh(config_page.ParentDevHandle)))) { printf("%s: error reading SAS device %#x page0\n", __func__, le16toh(config_page.ParentDevHandle)); } else { parent_sas_address = parent_config_page.SASAddress.High; parent_sas_address = (parent_sas_address << 32) | parent_config_page.SASAddress.Low; parent_devinfo = le32toh(parent_config_page.DeviceInfo); } } /* TODO Check proper endianness */ sas_address = config_page.SASAddress.High; sas_address = (sas_address << 32) | config_page.SASAddress.Low; mpr_dprint(sc, MPR_INFO, "SAS Address from SAS device page0 = %jx\n", sas_address); /* * Always get SATA Identify information because this is used to * determine if Start/Stop Unit should be sent to the drive when the * system is shutdown. */ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) { ret = mprsas_get_sas_address_for_sata_disk(sc, &sas_address, handle, device_info, &is_SATA_SSD); if (ret) { mpr_dprint(sc, MPR_ERROR, "%s: failed to get disk type " "(SSD or HDD) for SATA device with handle 0x%04x\n", __func__, handle); } else { mpr_dprint(sc, MPR_INFO, "SAS Address from SATA " "device = %jx\n", sas_address); } } id = mpr_mapping_get_sas_id(sc, sas_address, handle); if (id == MPR_MAP_BAD_ID) { printf("failure at %s:%d/%s()! Could not get ID for device " "with handle 0x%04x\n", __FILE__, __LINE__, __func__, handle); error = ENXIO; goto out; } if (mprsas_check_id(sassc, id) != 0) { device_printf(sc->mpr_dev, "Excluding target id %d\n", id); error = ENXIO; goto out; } mpr_dprint(sc, MPR_MAPPING, "SAS Address from SAS device page0 = %jx\n", sas_address); targ = &sassc->targets[id]; targ->devinfo = device_info; targ->devname = le32toh(config_page.DeviceName.High); targ->devname = (targ->devname << 32) | le32toh(config_page.DeviceName.Low); targ->encl_handle = le16toh(config_page.EnclosureHandle); targ->encl_slot = le16toh(config_page.Slot); targ->encl_level = config_page.EnclosureLevel; targ->connector_name[0] = config_page.ConnectorName[0]; targ->connector_name[1] = config_page.ConnectorName[1]; targ->connector_name[2] = config_page.ConnectorName[2]; targ->connector_name[3] = config_page.ConnectorName[3]; targ->handle = handle; targ->parent_handle = le16toh(config_page.ParentDevHandle); targ->sasaddr = mpr_to_u64(&config_page.SASAddress); targ->parent_sasaddr = le64toh(parent_sas_address); targ->parent_devinfo = parent_devinfo; targ->tid = id; targ->linkrate = (linkrate>>4); targ->flags = 0; if (is_SATA_SSD) { targ->flags = MPR_TARGET_IS_SATA_SSD; } if (le16toh(config_page.Flags) & MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) { targ->scsi_req_desc_type = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; } if (le16toh(config_page.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { targ->encl_level_valid = TRUE; } TAILQ_INIT(&targ->commands); TAILQ_INIT(&targ->timedout_commands); while (!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPR); } SLIST_INIT(&targ->luns); mpr_describe_devinfo(targ->devinfo, devstring, 80); mpr_dprint(sc, (MPR_INFO|MPR_MAPPING), "Found device <%s> <%s> " "handle<0x%04x> enclosureHandle<0x%04x> slot %d\n", devstring, mpr_describe_table(mpr_linkrate_names, targ->linkrate), targ->handle, targ->encl_handle, targ->encl_slot); if (targ->encl_level_valid) { mpr_dprint(sc, (MPR_INFO|MPR_MAPPING), "At enclosure level %d " "and connector name (%4s)\n", targ->encl_level, targ->connector_name); } #if ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000039)) || \ (__FreeBSD_version < 902502) if ((sassc->flags & MPRSAS_IN_STARTUP) == 0) #endif mprsas_rescan_target(sc, targ); mpr_dprint(sc, MPR_MAPPING, "Target id 0x%x added\n", targ->tid); /* * Check all commands to see if the SATA_ID_TIMEOUT flag has been set. * If so, send a Target Reset TM to the target that was just created. * An Abort Task TM should be used instead of a Target Reset, but that * would be much more difficult because targets have not been fully * discovered yet, and LUN's haven't been setup. So, just reset the * target instead of the LUN. */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { targ->timeouts++; cm->cm_state = MPR_CM_STATE_TIMEDOUT; if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { mpr_dprint(sc, MPR_INFO, "%s: sending Target " "Reset for stuck SATA identify command " "(cm = %p)\n", __func__, cm); targ->tm->cm_targ = targ; mprsas_send_reset(sc, targ->tm, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); } else { mpr_dprint(sc, MPR_ERROR, "Failed to allocate " - "tm for Target Reset after SATA ID " - "command timed out (cm %p)\n", cm); + "tm for Target Reset after SATA ID command " + "timed out (cm %p)\n", cm); } /* * No need to check for more since the target is * already being reset. */ break; } } out: /* * Free the commands that may not have been freed from the SATA ID call */ for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { mpr_free_command(sc, cm); } } mprsas_startup_decrement(sassc); return (error); } int mprsas_get_sas_address_for_sata_disk(struct mpr_softc *sc, u64 *sas_address, u16 handle, u32 device_info, u8 *is_SATA_SSD) { Mpi2SataPassthroughReply_t mpi_reply; int i, rc, try_count; u32 *bufferptr; union _sata_sas_address hash_address; struct _ata_identify_device_data ata_identify; u8 buffer[MPT2SAS_MN_LEN + MPT2SAS_SN_LEN]; u32 ioc_status; u8 sas_status; memset(&ata_identify, 0, sizeof(ata_identify)); try_count = 0; do { rc = mprsas_get_sata_identify(sc, handle, &mpi_reply, (char *)&ata_identify, sizeof(ata_identify), device_info); try_count++; ioc_status = le16toh(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; sas_status = mpi_reply.SASStatus; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* No sense sleeping. this error won't get better */ break; default: if (sc->spinup_wait_time > 0) { mpr_dprint(sc, MPR_INFO, "Sleeping %d seconds " "after SATA ID error to wait for spinup\n", sc->spinup_wait_time); msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprid", sc->spinup_wait_time * hz); } } } while (((rc && (rc != EWOULDBLOCK)) || - (ioc_status && - (ioc_status != MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR)) - || sas_status) && (try_count < 5)); + (ioc_status && (ioc_status != MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR)) + || sas_status) && (try_count < 5)); if (rc == 0 && !ioc_status && !sas_status) { mpr_dprint(sc, MPR_MAPPING, "%s: got SATA identify " "successfully for handle = 0x%x with try_count = %d\n", __func__, handle, try_count); } else { mpr_dprint(sc, MPR_MAPPING, "%s: handle = 0x%x failed\n", __func__, handle); return -1; } /* Copy & byteswap the 40 byte model number to a buffer */ for (i = 0; i < MPT2SAS_MN_LEN; i += 2) { buffer[i] = ((u8 *)ata_identify.model_number)[i + 1]; buffer[i + 1] = ((u8 *)ata_identify.model_number)[i]; } /* Copy & byteswap the 20 byte serial number to a buffer */ for (i = 0; i < MPT2SAS_SN_LEN; i += 2) { buffer[MPT2SAS_MN_LEN + i] = ((u8 *)ata_identify.serial_number)[i + 1]; buffer[MPT2SAS_MN_LEN + i + 1] = ((u8 *)ata_identify.serial_number)[i]; } bufferptr = (u32 *)buffer; /* There are 60 bytes to hash down to 8. 60 isn't divisible by 8, * so loop through the first 56 bytes (7*8), * and then add in the last dword. */ hash_address.word.low = 0; hash_address.word.high = 0; for (i = 0; (i < ((MPT2SAS_MN_LEN+MPT2SAS_SN_LEN)/8)); i++) { hash_address.word.low += *bufferptr; bufferptr++; hash_address.word.high += *bufferptr; bufferptr++; } /* Add the last dword */ hash_address.word.low += *bufferptr; /* Make sure the hash doesn't start with 5, because it could clash * with a SAS address. Change 5 to a D. */ if ((hash_address.word.high & 0x000000F0) == (0x00000050)) hash_address.word.high |= 0x00000080; *sas_address = (u64)hash_address.wwid[0] << 56 | (u64)hash_address.wwid[1] << 48 | (u64)hash_address.wwid[2] << 40 | (u64)hash_address.wwid[3] << 32 | (u64)hash_address.wwid[4] << 24 | (u64)hash_address.wwid[5] << 16 | (u64)hash_address.wwid[6] << 8 | (u64)hash_address.wwid[7]; if (ata_identify.rotational_speed == 1) { *is_SATA_SSD = 1; } return 0; } static int mprsas_get_sata_identify(struct mpr_softc *sc, u16 handle, Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz, u32 devinfo) { Mpi2SataPassthroughRequest_t *mpi_request; Mpi2SataPassthroughReply_t *reply; struct mpr_command *cm; char *buffer; int error = 0; buffer = malloc( sz, M_MPR, M_NOWAIT | M_ZERO); if (!buffer) return ENOMEM; if ((cm = mpr_alloc_command(sc)) == NULL) { free(buffer, M_MPR); return (EBUSY); } mpi_request = (MPI2_SATA_PASSTHROUGH_REQUEST *)cm->cm_req; bzero(mpi_request,sizeof(MPI2_SATA_PASSTHROUGH_REQUEST)); mpi_request->Function = MPI2_FUNCTION_SATA_PASSTHROUGH; mpi_request->VF_ID = 0; mpi_request->DevHandle = htole16(handle); mpi_request->PassthroughFlags = (MPI2_SATA_PT_REQ_PT_FLAGS_PIO | MPI2_SATA_PT_REQ_PT_FLAGS_READ); mpi_request->DataLength = htole32(sz); mpi_request->CommandFIS[0] = 0x27; mpi_request->CommandFIS[1] = 0x80; mpi_request->CommandFIS[2] = (devinfo & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? 0xA1 : 0xEC; cm->cm_sge = &mpi_request->SGL; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = buffer; cm->cm_length = htole32(sz); /* * Start a timeout counter specifically for the SATA ID command. This * is used to fix a problem where the FW does not send a reply sometimes * when a bad disk is in the topology. So, this is used to timeout the * command so that processing can continue normally. */ mpr_dprint(sc, MPR_XINFO, "%s start timeout counter for SATA ID " "command\n", __func__); callout_reset(&cm->cm_callout, MPR_ATA_ID_TIMEOUT * hz, mprsas_ata_id_timeout, cm); error = mpr_wait_command(sc, cm, 60, CAN_SLEEP); mpr_dprint(sc, MPR_XINFO, "%s stop timeout counter for SATA ID " "command\n", __func__); callout_stop(&cm->cm_callout); reply = (Mpi2SataPassthroughReply_t *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } bcopy(buffer, id_buffer, sz); bcopy(reply, mpi_reply, sizeof(Mpi2SataPassthroughReply_t)); if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { printf("%s: error reading SATA PASSTHRU; iocstatus = 0x%x\n", __func__, reply->IOCStatus); error = ENXIO; goto out; } out: /* * If the SATA_ID_TIMEOUT flag has been set for this command, don't free * it. The command will be freed after sending a target reset TM. If * the command did timeout, use EWOULDBLOCK. */ if ((cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) == 0) mpr_free_command(sc, cm); else if (error == 0) error = EWOULDBLOCK; free(buffer, M_MPR); return (error); } static void mprsas_ata_id_timeout(void *data) { struct mpr_softc *sc; struct mpr_command *cm; cm = (struct mpr_command *)data; sc = cm->cm_sc; mtx_assert(&sc->mpr_mtx, MA_OWNED); mpr_dprint(sc, MPR_INFO, "%s checking ATA ID command %p sc %p\n", __func__, cm, sc); if ((callout_pending(&cm->cm_callout)) || (!callout_active(&cm->cm_callout))) { - mpr_dprint(sc, MPR_INFO, "%s ATA ID command almost timed " - "out\n", __func__); + mpr_dprint(sc, MPR_INFO, "%s ATA ID command almost timed out\n", + __func__); return; } callout_deactivate(&cm->cm_callout); /* * Run the interrupt handler to make sure it's not pending. This * isn't perfect because the command could have already completed * and been re-used, though this is unlikely. */ mpr_intr_locked(sc); if (cm->cm_state == MPR_CM_STATE_FREE) { - mpr_dprint(sc, MPR_INFO, "%s ATA ID command almost timed " - "out\n", __func__); + mpr_dprint(sc, MPR_INFO, "%s ATA ID command almost timed out\n", + __func__); return; } mpr_dprint(sc, MPR_INFO, "ATA ID command timeout cm %p\n", cm); /* - * Send wakeup() to the sleeping thread that issued this ATA ID - * command. wakeup() will cause msleep to return a 0 (not EWOULDBLOCK), - * and this will keep reinit() from being called. This way, an Abort - * Task TM can be issued so that the timed out command can be cleared. - * The Abort Task cannot be sent from here because the driver has not - * completed setting up targets. Instead, the command is flagged so - * that special handling will be used to send the abort. + * Send wakeup() to the sleeping thread that issued this ATA ID command. + * wakeup() will cause msleep to return a 0 (not EWOULDBLOCK), and this + * will keep reinit() from being called. This way, an Abort Task TM can + * be issued so that the timed out command can be cleared. The Abort + * Task cannot be sent from here because the driver has not completed + * setting up targets. Instead, the command is flagged so that special + * handling will be used to send the abort. */ cm->cm_flags |= MPR_CM_FLAGS_SATA_ID_TIMEOUT; wakeup(cm); } static int mprsas_volume_add(struct mpr_softc *sc, u16 handle) { struct mprsas_softc *sassc; struct mprsas_target *targ; u64 wwid; unsigned int id; int error = 0; struct mprsas_lun *lun; sassc = sc->sassc; mprsas_startup_increment(sassc); /* wwid is endian safe */ mpr_config_get_volume_wwid(sc, handle, &wwid); if (!wwid) { printf("%s: invalid WWID; cannot add volume to mapping table\n", __func__); error = ENXIO; goto out; } id = mpr_mapping_get_raid_id(sc, wwid, handle); if (id == MPR_MAP_BAD_ID) { printf("%s: could not get ID for volume with handle 0x%04x and " "WWID 0x%016llx\n", __func__, handle, (unsigned long long)wwid); error = ENXIO; goto out; } targ = &sassc->targets[id]; targ->tid = id; targ->handle = handle; targ->devname = wwid; TAILQ_INIT(&targ->commands); TAILQ_INIT(&targ->timedout_commands); while (!SLIST_EMPTY(&targ->luns)) { lun = SLIST_FIRST(&targ->luns); SLIST_REMOVE_HEAD(&targ->luns, lun_link); free(lun, M_MPR); } SLIST_INIT(&targ->luns); #if ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000039)) || \ (__FreeBSD_version < 902502) if ((sassc->flags & MPRSAS_IN_STARTUP) == 0) #endif mprsas_rescan_target(sc, targ); mpr_dprint(sc, MPR_MAPPING, "RAID target id %d added (WWID = 0x%jx)\n", targ->tid, wwid); out: mprsas_startup_decrement(sassc); return (error); } /** * mprsas_SSU_to_SATA_devices * @sc: per adapter object * * Looks through the target list and issues a StartStopUnit SCSI command to each * SATA direct-access device. This helps to ensure that data corruption is * avoided when the system is being shut down. This must be called after the IR * System Shutdown RAID Action is sent if in IR mode. * * Return nothing. */ static void mprsas_SSU_to_SATA_devices(struct mpr_softc *sc) { struct mprsas_softc *sassc = sc->sassc; union ccb *ccb; path_id_t pathid = cam_sim_path(sassc->sim); target_id_t targetid; struct mprsas_target *target; char path_str[64]; struct timeval cur_time, start_time; mpr_lock(sc); /* * For each target, issue a StartStopUnit command to stop the device. */ sc->SSU_started = TRUE; sc->SSU_refcount = 0; for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) { target = &sassc->targets[targetid]; if (target->handle == 0x0) { continue; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpr_dprint(sc, MPR_FAULT, "Unable to alloc CCB to stop " "unit.\n"); return; } /* * The stop_at_shutdown flag will be set if this device is * a SATA direct-access end device. */ if (target->stop_at_shutdown) { if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpr_dprint(sc, MPR_ERROR, "Unable to create " "path to stop unit.\n"); xpt_free_ccb(ccb); return; } xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); mpr_dprint(sc, MPR_INFO, "Sending StopUnit: path %s " "handle %d\n", path_str, target->handle); /* * Issue a START STOP UNIT command for the target. * Increment the SSU counter to be used to count the * number of required replies. */ mpr_dprint(sc, MPR_INFO, "Incrementing SSU count\n"); sc->SSU_refcount++; ccb->ccb_h.target_id = xpt_path_target_id(ccb->ccb_h.path); ccb->ccb_h.ppriv_ptr1 = sassc; scsi_start_stop(&ccb->csio, /*retries*/0, mprsas_stop_unit_done, MSG_SIMPLE_Q_TAG, /*start*/FALSE, /*load/eject*/0, /*immediate*/FALSE, MPR_SENSE_LEN, /*timeout*/10000); xpt_action(ccb); } } mpr_unlock(sc); /* * Wait until all of the SSU commands have completed or time has * expired (60 seconds). Pause for 100ms each time through. If any * command times out, the target will be reset in the SCSI command * timeout routine. */ getmicrotime(&start_time); while (sc->SSU_refcount) { pause("mprwait", hz/10); getmicrotime(&cur_time); if ((cur_time.tv_sec - start_time.tv_sec) > 60) { mpr_dprint(sc, MPR_ERROR, "Time has expired waiting " "for SSU commands to complete.\n"); break; } } } static void mprsas_stop_unit_done(struct cam_periph *periph, union ccb *done_ccb) { struct mprsas_softc *sassc; char path_str[64]; sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str)); mpr_dprint(sassc->sc, MPR_INFO, "Completing stop unit for %s\n", path_str); if (done_ccb == NULL) return; /* * Nothing more to do except free the CCB and path. If the command * timed out, an abort reset, then target reset will be issued during * the SCSI Command process. */ xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } /** * mprsas_ir_shutdown - IR shutdown notification * @sc: per adapter object * * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that * the host system is shutting down. * * Return nothing. */ void mprsas_ir_shutdown(struct mpr_softc *sc) { u16 volume_mapping_flags; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); struct dev_mapping_table *mt_entry; u32 start_idx, end_idx; unsigned int id, found_volume = 0; struct mpr_command *cm; Mpi2RaidActionRequest_t *action; target_id_t targetid; struct mprsas_target *target; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* is IR firmware build loaded? */ if (!sc->ir_firmware) goto out; /* are there any volumes? Look at IR target IDs. */ // TODO-later, this should be looked up in the RAID config structure // when it is implemented. volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { start_idx = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) start_idx = 1; } else start_idx = sc->max_devices - sc->max_volumes; end_idx = start_idx + sc->max_volumes - 1; for (id = start_idx; id < end_idx; id++) { mt_entry = &sc->mapping_table[id]; if ((mt_entry->physical_id != 0) && (mt_entry->missing_count == 0)) { found_volume = 1; break; } } if (!found_volume) goto out; if ((cm = mpr_alloc_command(sc)) == NULL) { printf("%s: command alloc failed\n", __func__); goto out; } action = (MPI2_RAID_ACTION_REQUEST *)cm->cm_req; action->Function = MPI2_FUNCTION_RAID_ACTION; action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; mpr_lock(sc); mpr_wait_command(sc, cm, 5, CAN_SLEEP); mpr_unlock(sc); /* * Don't check for reply, just leave. */ if (cm) mpr_free_command(sc, cm); out: /* * All of the targets must have the correct value set for * 'stop_at_shutdown' for the current 'enable_ssu' sysctl variable. * * The possible values for the 'enable_ssu' variable are: * 0: disable to SSD and HDD * 1: disable only to HDD (default) * 2: disable only to SSD * 3: enable to SSD and HDD * anything else will default to 1. */ for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) { target = &sc->sassc->targets[targetid]; if (target->handle == 0x0) { continue; } if (target->supports_SSU) { switch (sc->enable_ssu) { case MPR_SSU_DISABLE_SSD_DISABLE_HDD: target->stop_at_shutdown = FALSE; break; case MPR_SSU_DISABLE_SSD_ENABLE_HDD: target->stop_at_shutdown = TRUE; if (target->flags & MPR_TARGET_IS_SATA_SSD) { target->stop_at_shutdown = FALSE; } break; case MPR_SSU_ENABLE_SSD_ENABLE_HDD: target->stop_at_shutdown = TRUE; break; case MPR_SSU_ENABLE_SSD_DISABLE_HDD: default: target->stop_at_shutdown = TRUE; if ((target->flags & MPR_TARGET_IS_SATA_SSD) == 0) { target->stop_at_shutdown = FALSE; } break; } } } mprsas_SSU_to_SATA_devices(sc); } Index: head/sys/dev/mpr/mpr_table.c =================================================================== --- head/sys/dev/mpr/mpr_table.c (revision 299264) +++ head/sys/dev/mpr/mpr_table.c (revision 299265) @@ -1,517 +1,516 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* Debugging tables for MPT2 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include char * mpr_describe_table(struct mpr_table_lookup *table, u_int code) { int i; for (i = 0; table[i].string != NULL; i++) { if (table[i].code == code) return(table[i].string); } return(table[i+1].string); } struct mpr_table_lookup mpr_event_names[] = { {"LogData", 0x01}, {"StateChange", 0x02}, {"HardResetReceived", 0x05}, {"EventChange", 0x0a}, {"TaskSetFull", 0x0e}, {"SasDeviceStatusChange", 0x0f}, {"IrOperationStatus", 0x14}, {"SasDiscovery", 0x16}, {"SasBroadcastPrimitive", 0x17}, {"SasInitDeviceStatusChange", 0x18}, {"SasInitTableOverflow", 0x19}, {"SasTopologyChangeList", 0x1c}, {"SasEnclDeviceStatusChange", 0x1d}, {"IrVolume", 0x1e}, {"IrPhysicalDisk", 0x1f}, {"IrConfigurationChangeList", 0x20}, {"LogEntryAdded", 0x21}, {"SasPhyCounter", 0x22}, {"GpioInterrupt", 0x23}, {"HbdPhyEvent", 0x24}, {NULL, 0}, {"Unknown Event", 0} }; struct mpr_table_lookup mpr_phystatus_names[] = { {"NewTargetAdded", 0x01}, {"TargetGone", 0x02}, {"PHYLinkStatusChange", 0x03}, {"PHYLinkStatusUnchanged", 0x04}, {"TargetMissing", 0x05}, {NULL, 0}, {"Unknown Status", 0} }; struct mpr_table_lookup mpr_linkrate_names[] = { {"PHY disabled", 0x01}, {"Speed Negotiation Failed", 0x02}, {"SATA OOB Complete", 0x03}, {"SATA Port Selector", 0x04}, {"SMP Reset in Progress", 0x05}, {"1.5Gbps", 0x08}, {"3.0Gbps", 0x09}, {"6.0Gbps", 0x0a}, {"12.0Gbps", 0x0b}, {NULL, 0}, {"LinkRate Unknown", 0x00} }; struct mpr_table_lookup mpr_sasdev0_devtype[] = { {"End Device", 0x01}, {"Edge Expander", 0x02}, {"Fanout Expander", 0x03}, {NULL, 0}, {"No Device", 0x00} }; struct mpr_table_lookup mpr_phyinfo_reason_names[] = { {"Power On", 0x01}, {"Hard Reset", 0x02}, {"SMP Phy Control Link Reset", 0x03}, {"Loss DWORD Sync", 0x04}, {"Multiplex Sequence", 0x05}, {"I-T Nexus Loss Timer", 0x06}, {"Break Timeout Timer", 0x07}, {"PHY Test Function", 0x08}, {NULL, 0}, {"Unknown Reason", 0x00} }; struct mpr_table_lookup mpr_whoinit_names[] = { {"System BIOS", 0x01}, {"ROM BIOS", 0x02}, {"PCI Peer", 0x03}, {"Host Driver", 0x04}, {"Manufacturing", 0x05}, {NULL, 0}, {"Not Initialized", 0x00} }; struct mpr_table_lookup mpr_sasdisc_reason[] = { {"Discovery Started", 0x01}, {"Discovery Complete", 0x02}, {NULL, 0}, {"Unknown", 0x00} }; struct mpr_table_lookup mpr_sastopo_exp[] = { {"Added", 0x01}, {"Not Responding", 0x02}, {"Responding", 0x03}, {"Delay Not Responding", 0x04}, {NULL, 0}, {"Unknown", 0x00} }; struct mpr_table_lookup mpr_sasdev_reason[] = { {"SMART Data", 0x05}, {"Unsupported", 0x07}, {"Internal Device Reset", 0x08}, {"Task Abort Internal", 0x09}, {"Abort Task Set Internal", 0x0a}, {"Clear Task Set Internal", 0x0b}, {"Query Task Internal", 0x0c}, {"Async Notification", 0x0d}, {"Cmp Internal Device Reset", 0x0e}, {"Cmp Task Abort Internal", 0x0f}, {"Sata Init Failure", 0x10}, {NULL, 0}, {"Unknown", 0x00} }; void mpr_describe_devinfo(uint32_t devinfo, char *string, int len) { snprintf(string, len, "%b,%s", devinfo, "\20" "\4SataHost" "\5SmpInit" "\6StpInit" "\7SspInit" "\10SataDev" "\11SmpTarg" "\12StpTarg" "\13SspTarg" "\14Direct" "\15LsiDev" "\16AtapiDev" "\17SepDev", mpr_describe_table(mpr_sasdev0_devtype, devinfo & 0x03)); } void mpr_print_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) { MPR_PRINTFIELD_START(sc, "IOCFacts"); MPR_PRINTFIELD(sc, facts, MsgVersion, 0x%x); MPR_PRINTFIELD(sc, facts, HeaderVersion, 0x%x); MPR_PRINTFIELD(sc, facts, IOCNumber, %d); MPR_PRINTFIELD(sc, facts, IOCExceptions, 0x%x); MPR_PRINTFIELD(sc, facts, MaxChainDepth, %d); mpr_dprint_field(sc, MPR_XINFO, "WhoInit: %s\n", mpr_describe_table(mpr_whoinit_names, facts->WhoInit)); MPR_PRINTFIELD(sc, facts, NumberOfPorts, %d); MPR_PRINTFIELD(sc, facts, RequestCredit, %d); MPR_PRINTFIELD(sc, facts, ProductID, 0x%x); mpr_dprint_field(sc, MPR_XINFO, "IOCCapabilities: %b\n", facts->IOCCapabilities, "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"); mpr_dprint_field(sc, MPR_XINFO, "FWVersion= %d-%d-%d-%d\n", facts->FWVersion.Struct.Major, facts->FWVersion.Struct.Minor, facts->FWVersion.Struct.Unit, facts->FWVersion.Struct.Dev); MPR_PRINTFIELD(sc, facts, IOCRequestFrameSize, %d); MPR_PRINTFIELD(sc, facts, MaxInitiators, %d); MPR_PRINTFIELD(sc, facts, MaxTargets, %d); MPR_PRINTFIELD(sc, facts, MaxSasExpanders, %d); MPR_PRINTFIELD(sc, facts, MaxEnclosures, %d); mpr_dprint_field(sc, MPR_XINFO, "ProtocolFlags: %b\n", facts->ProtocolFlags, "\20" "\1ScsiTarg" "\2ScsiInit"); MPR_PRINTFIELD(sc, facts, HighPriorityCredit, %d); MPR_PRINTFIELD(sc, facts, MaxReplyDescriptorPostQueueDepth, %d); MPR_PRINTFIELD(sc, facts, ReplyFrameSize, %d); MPR_PRINTFIELD(sc, facts, MaxVolumes, %d); MPR_PRINTFIELD(sc, facts, MaxDevHandle, %d); MPR_PRINTFIELD(sc, facts, MaxPersistentEntries, %d); } void mpr_print_portfacts(struct mpr_softc *sc, MPI2_PORT_FACTS_REPLY *facts) { MPR_PRINTFIELD_START(sc, "PortFacts"); MPR_PRINTFIELD(sc, facts, PortNumber, %d); MPR_PRINTFIELD(sc, facts, PortType, 0x%x); MPR_PRINTFIELD(sc, facts, MaxPostedCmdBuffers, %d); } void mpr_print_event(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event) { MPR_EVENTFIELD_START(sc, "EventReply"); MPR_EVENTFIELD(sc, event, EventDataLength, %d); MPR_EVENTFIELD(sc, event, AckRequired, %d); mpr_dprint_field(sc, MPR_EVENT, "Event: %s (0x%x)\n", mpr_describe_table(mpr_event_names, event->Event), event->Event); MPR_EVENTFIELD(sc, event, EventContext, 0x%x); } void mpr_print_sasdev0(struct mpr_softc *sc, MPI2_CONFIG_PAGE_SAS_DEV_0 *buf) { MPR_PRINTFIELD_START(sc, "SAS Device Page 0"); MPR_PRINTFIELD(sc, buf, Slot, %d); MPR_PRINTFIELD(sc, buf, EnclosureHandle, 0x%x); mpr_dprint_field(sc, MPR_XINFO, "SASAddress: 0x%jx\n", mpr_to_u64(&buf->SASAddress)); MPR_PRINTFIELD(sc, buf, ParentDevHandle, 0x%x); MPR_PRINTFIELD(sc, buf, PhyNum, %d); MPR_PRINTFIELD(sc, buf, AccessStatus, 0x%x); MPR_PRINTFIELD(sc, buf, DevHandle, 0x%x); MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, 0x%x); MPR_PRINTFIELD(sc, buf, ZoneGroup, %d); mpr_dprint_field(sc, MPR_XINFO, "DeviceInfo: %b,%s\n", buf->DeviceInfo, "\20" "\4SataHost" "\5SmpInit" "\6StpInit" "\7SspInit" "\10SataDev" "\11SmpTarg" "\12StpTarg" "\13SspTarg" "\14Direct" "\15LsiDev" "\16AtapiDev" "\17SepDev", mpr_describe_table(mpr_sasdev0_devtype, buf->DeviceInfo & 0x03)); MPR_PRINTFIELD(sc, buf, Flags, 0x%x); MPR_PRINTFIELD(sc, buf, PhysicalPort, %d); MPR_PRINTFIELD(sc, buf, MaxPortConnections, %d); mpr_dprint_field(sc, MPR_XINFO, "DeviceName: 0x%jx\n", mpr_to_u64(&buf->DeviceName)); MPR_PRINTFIELD(sc, buf, PortGroups, %d); MPR_PRINTFIELD(sc, buf, DmaGroup, %d); MPR_PRINTFIELD(sc, buf, ControlGroup, %d); } void mpr_print_evt_sas(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event) { mpr_print_event(sc, event); switch(event->Event) { case MPI2_EVENT_SAS_DISCOVERY: { MPI2_EVENT_DATA_SAS_DISCOVERY *data; data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)&event->EventData; mpr_dprint_field(sc, MPR_EVENT, "Flags: %b\n", data->Flags, "\20" "\1InProgress" "\2DeviceChange"); mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n", mpr_describe_table(mpr_sasdisc_reason, data->ReasonCode)); MPR_EVENTFIELD(sc, data, PhysicalPort, %d); mpr_dprint_field(sc, MPR_EVENT, "DiscoveryStatus: %b\n", data->DiscoveryStatus, "\20" "\1Loop" "\2UnaddressableDev" "\3DupSasAddr" "\5SmpTimeout" "\6ExpRouteFull" "\7RouteIndexError" "\10SmpFailed" "\11SmpCrcError" "\12SubSubLink" "\13TableTableLink" "\14UnsupDevice" "\15TableSubLink" "\16MultiDomain" "\17MultiSub" "\20MultiSubSub" "\34DownstreamInit" "\35MaxPhys" "\36MaxTargs" "\37MaxExpanders" "\40MaxEnclosures"); break; } case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: { MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data; MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy; int i, phynum; data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *) &event->EventData; MPR_EVENTFIELD(sc, data, EnclosureHandle, 0x%x); MPR_EVENTFIELD(sc, data, ExpanderDevHandle, 0x%x); MPR_EVENTFIELD(sc, data, NumPhys, %d); MPR_EVENTFIELD(sc, data, NumEntries, %d); MPR_EVENTFIELD(sc, data, StartPhyNum, %d); mpr_dprint_field(sc, MPR_EVENT, "ExpStatus: %s (0x%x)\n", mpr_describe_table(mpr_sastopo_exp, data->ExpStatus), data->ExpStatus); MPR_EVENTFIELD(sc, data, PhysicalPort, %d); for (i = 0; i < data->NumEntries; i++) { phy = &data->PHY[i]; phynum = data->StartPhyNum + i; mpr_dprint_field(sc, MPR_EVENT, "PHY[%d].AttachedDevHandle: 0x%04x\n", phynum, phy->AttachedDevHandle); mpr_dprint_field(sc, MPR_EVENT, "PHY[%d].LinkRate: %s (0x%x)\n", phynum, mpr_describe_table(mpr_linkrate_names, (phy->LinkRate >> 4) & 0xf), phy->LinkRate); - mpr_dprint_field(sc,MPR_EVENT,"PHY[%d].PhyStatus: " - "%s\n", phynum, - mpr_describe_table(mpr_phystatus_names, + mpr_dprint_field(sc,MPR_EVENT,"PHY[%d].PhyStatus: %s\n", + phynum, mpr_describe_table(mpr_phystatus_names, phy->PhyStatus)); } break; } case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: { MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE *data; data = (MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE *) &event->EventData; MPR_EVENTFIELD(sc, data, EnclosureHandle, 0x%x); mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n", mpr_describe_table(mpr_sastopo_exp, data->ReasonCode)); MPR_EVENTFIELD(sc, data, PhysicalPort, %d); MPR_EVENTFIELD(sc, data, NumSlots, %d); MPR_EVENTFIELD(sc, data, StartSlot, %d); MPR_EVENTFIELD(sc, data, PhyBits, 0x%x); break; } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: { MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *data; data = (MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *) &event->EventData; MPR_EVENTFIELD(sc, data, TaskTag, 0x%x); mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n", mpr_describe_table(mpr_sasdev_reason, data->ReasonCode)); MPR_EVENTFIELD(sc, data, ASC, 0x%x); MPR_EVENTFIELD(sc, data, ASCQ, 0x%x); MPR_EVENTFIELD(sc, data, DevHandle, 0x%x); mpr_dprint_field(sc, MPR_EVENT, "SASAddress: 0x%jx\n", mpr_to_u64(&data->SASAddress)); } default: break; } } void mpr_print_expander1(struct mpr_softc *sc, MPI2_CONFIG_PAGE_EXPANDER_1 *buf) { MPR_PRINTFIELD_START(sc, "SAS Expander Page 1 #%d", buf->Phy); MPR_PRINTFIELD(sc, buf, PhysicalPort, %d); MPR_PRINTFIELD(sc, buf, NumPhys, %d); MPR_PRINTFIELD(sc, buf, Phy, %d); MPR_PRINTFIELD(sc, buf, NumTableEntriesProgrammed, %d); mpr_dprint_field(sc, MPR_XINFO, "ProgrammedLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, (buf->ProgrammedLinkRate >> 4) & 0xf), buf->ProgrammedLinkRate); mpr_dprint_field(sc, MPR_XINFO, "HwLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, (buf->HwLinkRate >> 4) & 0xf), buf->HwLinkRate); MPR_PRINTFIELD(sc, buf, AttachedDevHandle, 0x%04x); mpr_dprint_field(sc, MPR_XINFO, "PhyInfo Reason: %s (0x%x)\n", mpr_describe_table(mpr_phyinfo_reason_names, (buf->PhyInfo >> 16) & 0xf), buf->PhyInfo); mpr_dprint_field(sc, MPR_XINFO, "AttachedDeviceInfo: %b,%s\n", buf->AttachedDeviceInfo, "\20" "\4SATAhost" "\5SMPinit" "\6STPinit" "\7SSPinit" "\10SATAdev" "\11SMPtarg" "\12STPtarg" "\13SSPtarg" "\14Direct" "\15LSIdev" "\16ATAPIdev" "\17SEPdev", mpr_describe_table(mpr_sasdev0_devtype, buf->AttachedDeviceInfo & 0x03)); MPR_PRINTFIELD(sc, buf, ExpanderDevHandle, 0x%04x); MPR_PRINTFIELD(sc, buf, ChangeCount, %d); mpr_dprint_field(sc, MPR_XINFO, "NegotiatedLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, buf->NegotiatedLinkRate & 0xf), buf->NegotiatedLinkRate); MPR_PRINTFIELD(sc, buf, PhyIdentifier, %d); MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, %d); MPR_PRINTFIELD(sc, buf, DiscoveryInfo, 0x%x); MPR_PRINTFIELD(sc, buf, AttachedPhyInfo, 0x%x); mpr_dprint_field(sc, MPR_XINFO, "AttachedPhyInfo Reason: %s (0x%x)\n", mpr_describe_table(mpr_phyinfo_reason_names, buf->AttachedPhyInfo & 0xf), buf->AttachedPhyInfo); MPR_PRINTFIELD(sc, buf, ZoneGroup, %d); MPR_PRINTFIELD(sc, buf, SelfConfigStatus, 0x%x); } void mpr_print_sasphy0(struct mpr_softc *sc, MPI2_CONFIG_PAGE_SAS_PHY_0 *buf) { MPR_PRINTFIELD_START(sc, "SAS PHY Page 0"); MPR_PRINTFIELD(sc, buf, OwnerDevHandle, 0x%04x); MPR_PRINTFIELD(sc, buf, AttachedDevHandle, 0x%04x); MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, %d); mpr_dprint_field(sc, MPR_XINFO, "AttachedPhyInfo Reason: %s (0x%x)\n", mpr_describe_table(mpr_phyinfo_reason_names, buf->AttachedPhyInfo & 0xf), buf->AttachedPhyInfo); mpr_dprint_field(sc, MPR_XINFO, "ProgrammedLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, (buf->ProgrammedLinkRate >> 4) & 0xf), buf->ProgrammedLinkRate); mpr_dprint_field(sc, MPR_XINFO, "HwLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, (buf->HwLinkRate >> 4) & 0xf), buf->HwLinkRate); MPR_PRINTFIELD(sc, buf, ChangeCount, %d); MPR_PRINTFIELD(sc, buf, Flags, 0x%x); mpr_dprint_field(sc, MPR_XINFO, "PhyInfo Reason: %s (0x%x)\n", mpr_describe_table(mpr_phyinfo_reason_names, (buf->PhyInfo >> 16) & 0xf), buf->PhyInfo); mpr_dprint_field(sc, MPR_XINFO, "NegotiatedLinkRate: %s (0x%x)\n", mpr_describe_table(mpr_linkrate_names, buf->NegotiatedLinkRate & 0xf), buf->NegotiatedLinkRate); } void mpr_print_sgl(struct mpr_softc *sc, struct mpr_command *cm, int offset) { MPI2_IEEE_SGE_SIMPLE64 *ieee_sge; MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; MPI2_SGE_SIMPLE64 *sge; MPI2_REQUEST_HEADER *req; struct mpr_chain *chain = NULL; char *frame; u_int i = 0, flags, length; req = (MPI2_REQUEST_HEADER *)cm->cm_req; frame = (char *)cm->cm_req; ieee_sge = (MPI2_IEEE_SGE_SIMPLE64 *)&frame[offset * 4]; sge = (MPI2_SGE_SIMPLE64 *)&frame[offset * 4]; printf("SGL for command %p\n", cm); hexdump(frame, 128, NULL, 0); while ((frame != NULL) && (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE))) { flags = ieee_sge->Flags; length = le32toh(ieee_sge->Length); printf("IEEE seg%d flags=0x%02x len=0x%08x addr=0x%016jx\n", i, flags, length, mpr_to_u64(&ieee_sge->Address)); if (flags & MPI25_IEEE_SGE_FLAGS_END_OF_LIST) break; ieee_sge++; i++; if (flags & MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT) { ieee_sgc = (MPI25_IEEE_SGE_CHAIN64 *)ieee_sge; printf("IEEE chain flags=0x%x len=0x%x Offset=0x%x " "Address=0x%016jx\n", ieee_sgc->Flags, le32toh(ieee_sgc->Length), ieee_sgc->NextChainOffset, mpr_to_u64(&ieee_sgc->Address)); if (chain == NULL) chain = TAILQ_FIRST(&cm->cm_chain_list); else chain = TAILQ_NEXT(chain, chain_link); frame = (char *)chain->chain; ieee_sge = (MPI2_IEEE_SGE_SIMPLE64 *)frame; hexdump(frame, 128, NULL, 0); } } while ((frame != NULL) && (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { flags = le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT; printf("seg%d flags=0x%02x len=0x%06x addr=0x%016jx\n", i, flags, le32toh(sge->FlagsLength) & 0xffffff, mpr_to_u64(&sge->Address)); if (flags & (MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_END_OF_BUFFER)) break; sge++; i++; } } void mpr_print_scsiio_cmd(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_SCSI_IO_REQUEST *req; req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; mpr_print_sgl(sc, cm, req->SGLOffset0); } Index: head/sys/dev/mpr/mpr_user.c =================================================================== --- head/sys/dev/mpr/mpr_user.c (revision 299264) +++ head/sys/dev/mpr/mpr_user.c (revision 299265) @@ -1,2470 +1,2468 @@ /*- * Copyright (c) 2008 Yahoo!, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD userland interface */ /*- * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static d_open_t mpr_open; static d_close_t mpr_close; static d_ioctl_t mpr_ioctl_devsw; static struct cdevsw mpr_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = mpr_open, .d_close = mpr_close, .d_ioctl = mpr_ioctl_devsw, .d_name = "mpr", }; typedef int (mpr_user_f)(struct mpr_command *, struct mpr_usr_command *); static mpr_user_f mpi_pre_ioc_facts; static mpr_user_f mpi_pre_port_facts; static mpr_user_f mpi_pre_fw_download; static mpr_user_f mpi_pre_fw_upload; static mpr_user_f mpi_pre_sata_passthrough; static mpr_user_f mpi_pre_smp_passthrough; static mpr_user_f mpi_pre_config; static mpr_user_f mpi_pre_sas_io_unit_control; static int mpr_user_read_cfg_header(struct mpr_softc *, - struct mpr_cfg_page_req *); + struct mpr_cfg_page_req *); static int mpr_user_read_cfg_page(struct mpr_softc *, - struct mpr_cfg_page_req *, void *); + struct mpr_cfg_page_req *, void *); static int mpr_user_read_extcfg_header(struct mpr_softc *, - struct mpr_ext_cfg_page_req *); + struct mpr_ext_cfg_page_req *); static int mpr_user_read_extcfg_page(struct mpr_softc *, - struct mpr_ext_cfg_page_req *, void *); + struct mpr_ext_cfg_page_req *, void *); static int mpr_user_write_cfg_page(struct mpr_softc *, - struct mpr_cfg_page_req *, void *); + struct mpr_cfg_page_req *, void *); static int mpr_user_setup_request(struct mpr_command *, - struct mpr_usr_command *); + struct mpr_usr_command *); static int mpr_user_command(struct mpr_softc *, struct mpr_usr_command *); static int mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data); static void mpr_user_get_adapter_data(struct mpr_softc *sc, mpr_adapter_data_t *data); -static void mpr_user_read_pci_info(struct mpr_softc *sc, - mpr_pci_info_t *data); +static void mpr_user_read_pci_info(struct mpr_softc *sc, mpr_pci_info_t *data); static uint8_t mpr_get_fw_diag_buffer_number(struct mpr_softc *sc, uint32_t unique_id); static int mpr_post_fw_diag_buffer(struct mpr_softc *sc, mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code); static int mpr_release_fw_diag_buffer(struct mpr_softc *sc, mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, uint32_t diag_type); static int mpr_diag_register(struct mpr_softc *sc, mpr_fw_diag_register_t *diag_register, uint32_t *return_code); static int mpr_diag_unregister(struct mpr_softc *sc, mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code); -static int mpr_diag_query(struct mpr_softc *sc, - mpr_fw_diag_query_t *diag_query, uint32_t *return_code); +static int mpr_diag_query(struct mpr_softc *sc, mpr_fw_diag_query_t *diag_query, + uint32_t *return_code); static int mpr_diag_read_buffer(struct mpr_softc *sc, mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, uint32_t *return_code); static int mpr_diag_release(struct mpr_softc *sc, mpr_fw_diag_release_t *diag_release, uint32_t *return_code); static int mpr_do_diag_action(struct mpr_softc *sc, uint32_t action, uint8_t *diag_action, uint32_t length, uint32_t *return_code); -static int mpr_user_diag_action(struct mpr_softc *sc, - mpr_diag_action_t *data); -static void mpr_user_event_query(struct mpr_softc *sc, - mpr_event_query_t *data); +static int mpr_user_diag_action(struct mpr_softc *sc, mpr_diag_action_t *data); +static void mpr_user_event_query(struct mpr_softc *sc, mpr_event_query_t *data); static void mpr_user_event_enable(struct mpr_softc *sc, mpr_event_enable_t *data); static int mpr_user_event_report(struct mpr_softc *sc, mpr_event_report_t *data); static int mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data); static int mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data); static MALLOC_DEFINE(M_MPRUSER, "mpr_user", "Buffers for mpr(4) ioctls"); /* Macros from compat/freebsd32/freebsd32.h */ #define PTRIN(v) (void *)(uintptr_t)(v) #define PTROUT(v) (uint32_t)(uintptr_t)(v) #define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0) #define PTRIN_CP(src,dst,fld) \ do { (dst).fld = PTRIN((src).fld); } while (0) #define PTROUT_CP(src,dst,fld) \ do { (dst).fld = PTROUT((src).fld); } while (0) /* * MPI functions that support IEEE SGLs for SAS3. */ static uint8_t ieee_sgl_func_list[] = { MPI2_FUNCTION_SCSI_IO_REQUEST, MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, MPI2_FUNCTION_SMP_PASSTHROUGH, MPI2_FUNCTION_SATA_PASSTHROUGH, MPI2_FUNCTION_FW_UPLOAD, MPI2_FUNCTION_FW_DOWNLOAD, MPI2_FUNCTION_TARGET_ASSIST, MPI2_FUNCTION_TARGET_STATUS_SEND, MPI2_FUNCTION_TOOLBOX }; int mpr_attach_user(struct mpr_softc *sc) { int unit; unit = device_get_unit(sc->mpr_dev); - sc->mpr_cdev = make_dev(&mpr_cdevsw, unit, UID_ROOT, GID_OPERATOR, - 0640, "mpr%d", unit); - if (sc->mpr_cdev == NULL) { + sc->mpr_cdev = make_dev(&mpr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, + "mpr%d", unit); + + if (sc->mpr_cdev == NULL) return (ENOMEM); - } + sc->mpr_cdev->si_drv1 = sc; return (0); } void mpr_detach_user(struct mpr_softc *sc) { /* XXX: do a purge of pending requests? */ if (sc->mpr_cdev != NULL) destroy_dev(sc->mpr_cdev); } static int mpr_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int mpr_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int mpr_user_read_cfg_header(struct mpr_softc *sc, struct mpr_cfg_page_req *page_req) { MPI2_CONFIG_PAGE_HEADER *hdr; struct mpr_config_params params; int error; hdr = ¶ms.hdr.Struct; params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; params.page_address = le32toh(page_req->page_address); hdr->PageVersion = 0; hdr->PageLength = 0; hdr->PageNumber = page_req->header.PageNumber; hdr->PageType = page_req->header.PageType; params.buffer = NULL; params.length = 0; params.callback = NULL; if ((error = mpr_read_config_page(sc, ¶ms)) != 0) { /* * Leave the request. Without resetting the chip, it's * still owned by it and we'll just get into trouble * freeing it now. Mark it as abandoned so that if it * shows up later it can be freed. */ mpr_printf(sc, "read_cfg_header timed out\n"); return (ETIMEDOUT); } page_req->ioc_status = htole16(params.status); if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { bcopy(hdr, &page_req->header, sizeof(page_req->header)); } return (0); } static int -mpr_user_read_cfg_page(struct mpr_softc *sc, - struct mpr_cfg_page_req *page_req, void *buf) +mpr_user_read_cfg_page(struct mpr_softc *sc, struct mpr_cfg_page_req *page_req, + void *buf) { MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; struct mpr_config_params params; int error; reqhdr = buf; hdr = ¶ms.hdr.Struct; hdr->PageVersion = reqhdr->PageVersion; hdr->PageLength = reqhdr->PageLength; hdr->PageNumber = reqhdr->PageNumber; hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK; params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; params.page_address = le32toh(page_req->page_address); params.buffer = buf; params.length = le32toh(page_req->len); params.callback = NULL; if ((error = mpr_read_config_page(sc, ¶ms)) != 0) { mpr_printf(sc, "mpr_user_read_cfg_page timed out\n"); return (ETIMEDOUT); } page_req->ioc_status = htole16(params.status); return (0); } static int mpr_user_read_extcfg_header(struct mpr_softc *sc, struct mpr_ext_cfg_page_req *ext_page_req) { MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; struct mpr_config_params params; int error; hdr = ¶ms.hdr.Ext; params.action = MPI2_CONFIG_ACTION_PAGE_HEADER; hdr->PageVersion = ext_page_req->header.PageVersion; hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; hdr->ExtPageLength = 0; hdr->PageNumber = ext_page_req->header.PageNumber; hdr->ExtPageType = ext_page_req->header.ExtPageType; params.page_address = le32toh(ext_page_req->page_address); params.buffer = NULL; params.length = 0; params.callback = NULL; if ((error = mpr_read_config_page(sc, ¶ms)) != 0) { /* * Leave the request. Without resetting the chip, it's * still owned by it and we'll just get into trouble * freeing it now. Mark it as abandoned so that if it * shows up later it can be freed. */ mpr_printf(sc, "mpr_user_read_extcfg_header timed out\n"); return (ETIMEDOUT); } ext_page_req->ioc_status = htole16(params.status); if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_SUCCESS) { ext_page_req->header.PageVersion = hdr->PageVersion; ext_page_req->header.PageNumber = hdr->PageNumber; ext_page_req->header.PageType = hdr->PageType; ext_page_req->header.ExtPageLength = hdr->ExtPageLength; ext_page_req->header.ExtPageType = hdr->ExtPageType; } return (0); } static int mpr_user_read_extcfg_page(struct mpr_softc *sc, struct mpr_ext_cfg_page_req *ext_page_req, void *buf) { MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr; struct mpr_config_params params; int error; reqhdr = buf; hdr = ¶ms.hdr.Ext; params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; params.page_address = le32toh(ext_page_req->page_address); hdr->PageVersion = reqhdr->PageVersion; hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; hdr->PageNumber = reqhdr->PageNumber; hdr->ExtPageType = reqhdr->ExtPageType; hdr->ExtPageLength = reqhdr->ExtPageLength; params.buffer = buf; params.length = le32toh(ext_page_req->len); params.callback = NULL; if ((error = mpr_read_config_page(sc, ¶ms)) != 0) { mpr_printf(sc, "mpr_user_read_extcfg_page timed out\n"); return (ETIMEDOUT); } ext_page_req->ioc_status = htole16(params.status); return (0); } static int mpr_user_write_cfg_page(struct mpr_softc *sc, struct mpr_cfg_page_req *page_req, void *buf) { MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr; struct mpr_config_params params; u_int hdr_attr; int error; reqhdr = buf; hdr = ¶ms.hdr.Struct; hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK; if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE && hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) { mpr_printf(sc, "page type 0x%x not changeable\n", reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK); return (EINVAL); } /* * There isn't any point in restoring stripped out attributes * if you then mask them going down to issue the request. */ hdr->PageVersion = reqhdr->PageVersion; hdr->PageLength = reqhdr->PageLength; hdr->PageNumber = reqhdr->PageNumber; hdr->PageType = reqhdr->PageType; params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; params.page_address = le32toh(page_req->page_address); params.buffer = buf; params.length = le32toh(page_req->len); params.callback = NULL; if ((error = mpr_write_config_page(sc, ¶ms)) != 0) { mpr_printf(sc, "mpr_write_cfg_page timed out\n"); return (ETIMEDOUT); } page_req->ioc_status = htole16(params.status); return (0); } void mpr_init_sge(struct mpr_command *cm, void *req, void *sge) { int off, space; space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; off = (uintptr_t)sge - (uintptr_t)req; KASSERT(off < space, ("bad pointers %p %p, off %d, space %d", req, sge, off, space)); cm->cm_sge = sge; cm->cm_sglsize = space - off; } /* * Prepare the mpr_command for an IOC_FACTS request. */ static int mpi_pre_ioc_facts(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req; MPI2_IOC_FACTS_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); cm->cm_sge = NULL; cm->cm_sglsize = 0; return (0); } /* * Prepare the mpr_command for a PORT_FACTS request. */ static int mpi_pre_port_facts(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req; MPI2_PORT_FACTS_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); cm->cm_sge = NULL; cm->cm_sglsize = 0; return (0); } /* * Prepare the mpr_command for a FW_DOWNLOAD request. */ static int mpi_pre_fw_download(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI25_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req; MPI2_FW_DOWNLOAD_REPLY *rpl; int error; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); if (cmd->len == 0) return (EINVAL); error = copyin(cmd->buf, cm->cm_data, cmd->len); if (error != 0) return (error); mpr_init_sge(cm, req, &req->SGL); /* * For now, the F/W image must be provided in a single request. */ if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0) return (EINVAL); if (req->TotalImageSize != cmd->len) return (EINVAL); req->ImageOffset = 0; req->ImageSize = cmd->len; cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; return (mpr_push_ieee_sge(cm, &req->SGL, 0)); } /* * Prepare the mpr_command for a FW_UPLOAD request. */ static int mpi_pre_fw_upload(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI25_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req; MPI2_FW_UPLOAD_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); mpr_init_sge(cm, req, &req->SGL); if (cmd->len == 0) { /* Perhaps just asking what the size of the fw is? */ return (0); } req->ImageOffset = 0; req->ImageSize = cmd->len; cm->cm_flags |= MPR_CM_FLAGS_DATAIN; return (mpr_push_ieee_sge(cm, &req->SGL, 0)); } /* * Prepare the mpr_command for a SATA_PASSTHROUGH request. */ static int mpi_pre_sata_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; MPI2_SATA_PASSTHROUGH_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); mpr_init_sge(cm, req, &req->SGL); return (0); } /* * Prepare the mpr_command for a SMP_PASSTHROUGH request. */ static int mpi_pre_smp_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req; MPI2_SMP_PASSTHROUGH_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); mpr_init_sge(cm, req, &req->SGL); return (0); } /* * Prepare the mpr_command for a CONFIG request. */ static int mpi_pre_config(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req; MPI2_CONFIG_REPLY *rpl; if (cmd->req_len != sizeof *req) return (EINVAL); if (cmd->rpl_len != sizeof *rpl) return (EINVAL); mpr_init_sge(cm, req, &req->PageBufferSGE); return (0); } /* * Prepare the mpr_command for a SAS_IO_UNIT_CONTROL request. */ static int mpi_pre_sas_io_unit_control(struct mpr_command *cm, struct mpr_usr_command *cmd) { cm->cm_sge = NULL; cm->cm_sglsize = 0; return (0); } /* * A set of functions to prepare an mpr_command for the various * supported requests. */ struct mpr_user_func { U8 Function; mpr_user_f *f_pre; } mpr_user_func_list[] = { { MPI2_FUNCTION_IOC_FACTS, mpi_pre_ioc_facts }, { MPI2_FUNCTION_PORT_FACTS, mpi_pre_port_facts }, { MPI2_FUNCTION_FW_DOWNLOAD, mpi_pre_fw_download }, { MPI2_FUNCTION_FW_UPLOAD, mpi_pre_fw_upload }, { MPI2_FUNCTION_SATA_PASSTHROUGH, mpi_pre_sata_passthrough }, { MPI2_FUNCTION_SMP_PASSTHROUGH, mpi_pre_smp_passthrough}, { MPI2_FUNCTION_CONFIG, mpi_pre_config}, { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, mpi_pre_sas_io_unit_control }, { 0xFF, NULL } /* list end */ }; static int mpr_user_setup_request(struct mpr_command *cm, struct mpr_usr_command *cmd) { MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; struct mpr_user_func *f; for (f = mpr_user_func_list; f->f_pre != NULL; f++) { if (hdr->Function == f->Function) return (f->f_pre(cm, cmd)); } return (EINVAL); } static int mpr_user_command(struct mpr_softc *sc, struct mpr_usr_command *cmd) { MPI2_REQUEST_HEADER *hdr; MPI2_DEFAULT_REPLY *rpl; void *buf = NULL; struct mpr_command *cm = NULL; int err = 0; int sz; mpr_lock(sc); cm = mpr_alloc_command(sc); if (cm == NULL) { mpr_printf(sc, "%s: no mpr requests\n", __func__); err = ENOMEM; goto Ret; } mpr_unlock(sc); hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; mpr_dprint(sc, MPR_USER, "%s: req %p %d rpl %p %d\n", __func__, cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len); if (cmd->req_len > (int)sc->facts->IOCRequestFrameSize * 4) { err = EINVAL; goto RetFreeUnlocked; } err = copyin(cmd->req, hdr, cmd->req_len); if (err != 0) goto RetFreeUnlocked; mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__, hdr->Function, hdr->MsgFlags); if (cmd->len > 0) { buf = malloc(cmd->len, M_MPRUSER, M_WAITOK|M_ZERO); if (!buf) { mpr_printf(sc, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } cm->cm_data = buf; cm->cm_length = cmd->len; } else { cm->cm_data = NULL; cm->cm_length = 0; } cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; err = mpr_user_setup_request(cm, cmd); if (err == EINVAL) { mpr_printf(sc, "%s: unsupported parameter or unsupported " "function in request (function = 0x%X)\n", __func__, hdr->Function); } if (err != 0) goto RetFreeUnlocked; mpr_lock(sc); err = mpr_wait_command(sc, cm, 30, CAN_SLEEP); if (err) { mpr_printf(sc, "%s: invalid request: error %d\n", __func__, err); goto Ret; } rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; if (rpl != NULL) sz = rpl->MsgLength * 4; else sz = 0; if (sz > cmd->rpl_len) { mpr_printf(sc, "%s: user reply buffer (%d) smaller than " "returned buffer (%d)\n", __func__, cmd->rpl_len, sz); sz = cmd->rpl_len; } mpr_unlock(sc); copyout(rpl, cmd->rpl, sz); if (buf != NULL) copyout(buf, cmd->buf, cmd->len); mpr_dprint(sc, MPR_USER, "%s: reply size %d\n", __func__, sz); RetFreeUnlocked: mpr_lock(sc); if (cm != NULL) mpr_free_command(sc, cm); Ret: mpr_unlock(sc); if (buf != NULL) free(buf, M_MPRUSER); return (err); } static int mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data) { MPI2_REQUEST_HEADER *hdr, tmphdr; MPI2_DEFAULT_REPLY *rpl; struct mpr_command *cm = NULL; int i, err = 0, dir = 0, sz; uint8_t tool, function = 0; u_int sense_len; struct mprsas_target *targ = NULL; /* * Only allow one passthru command at a time. Use the MPR_FLAGS_BUSY * bit to denote that a passthru is being processed. */ mpr_lock(sc); if (sc->mpr_flags & MPR_FLAGS_BUSY) { mpr_dprint(sc, MPR_USER, "%s: Only one passthru command " "allowed at a single time.", __func__); mpr_unlock(sc); return (EBUSY); } sc->mpr_flags |= MPR_FLAGS_BUSY; mpr_unlock(sc); /* * Do some validation on data direction. Valid cases are: * 1) DataSize is 0 and direction is NONE * 2) DataSize is non-zero and one of: * a) direction is READ or * b) direction is WRITE or * c) direction is BOTH and DataOutSize is non-zero * If valid and the direction is BOTH, change the direction to READ. * if valid and the direction is not BOTH, make sure DataOutSize is 0. */ if (((data->DataSize == 0) && (data->DataDirection == MPR_PASS_THRU_DIRECTION_NONE)) || ((data->DataSize != 0) && ((data->DataDirection == MPR_PASS_THRU_DIRECTION_READ) || (data->DataDirection == MPR_PASS_THRU_DIRECTION_WRITE) || ((data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH) && (data->DataOutSize != 0))))) { if (data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH) data->DataDirection = MPR_PASS_THRU_DIRECTION_READ; else data->DataOutSize = 0; } else return (EINVAL); mpr_dprint(sc, MPR_USER, "%s: req 0x%jx %d rpl 0x%jx %d " "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__, data->PtrRequest, data->RequestSize, data->PtrReply, data->ReplySize, data->PtrData, data->DataSize, data->PtrDataOut, data->DataOutSize, data->DataDirection); /* * copy in the header so we know what we're dealing with before we * commit to allocating a command for it. */ err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize); if (err != 0) goto RetFreeUnlocked; if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) { err = EINVAL; goto RetFreeUnlocked; } function = tmphdr.Function; mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__, function, tmphdr.MsgFlags); /* * Handle a passthru TM request. */ if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) { MPI2_SCSI_TASK_MANAGE_REQUEST *task; mpr_lock(sc); cm = mprsas_alloc_tm(sc); if (cm == NULL) { err = EINVAL; goto Ret; } /* Copy the header in. Only a small fixup is needed. */ task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; bcopy(&tmphdr, task, data->RequestSize); task->TaskMID = cm->cm_desc.Default.SMID; cm->cm_data = NULL; cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; cm->cm_complete = NULL; cm->cm_complete_data = NULL; targ = mprsas_find_target_by_handle(sc->sassc, 0, task->DevHandle); if (targ == NULL) { mpr_dprint(sc, MPR_INFO, "%s %d : invalid handle for requested TM 0x%x \n", __func__, __LINE__, task->DevHandle); err = 1; } else { mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); err = mpr_wait_command(sc, cm, 30, CAN_SLEEP); } if (err != 0) { err = EIO; mpr_dprint(sc, MPR_FAULT, "%s: task management failed", __func__); } /* * Copy the reply data and sense data to user space. */ if (cm->cm_reply != NULL) { rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; sz = rpl->MsgLength * 4; if (sz > data->ReplySize) { mpr_printf(sc, "%s: user reply buffer (%d) " "smaller than returned buffer (%d)\n", __func__, data->ReplySize, sz); } mpr_unlock(sc); copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize); mpr_lock(sc); } mprsas_free_tm(sc, cm); goto Ret; } mpr_lock(sc); cm = mpr_alloc_command(sc); if (cm == NULL) { mpr_printf(sc, "%s: no mpr requests\n", __func__); err = ENOMEM; goto Ret; } mpr_unlock(sc); hdr = (MPI2_REQUEST_HEADER *)cm->cm_req; bcopy(&tmphdr, hdr, data->RequestSize); /* * Do some checking to make sure the IOCTL request contains a valid * request. Then set the SGL info. */ mpr_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize)); /* * Set up for read, write or both. From check above, DataOutSize will * be 0 if direction is READ or WRITE, but it will have some non-zero * value if the direction is BOTH. So, just use the biggest size to get * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set * up; the first is for the request and the second will contain the * response data. cm_out_len needs to be set here and this will be used * when the SGLs are set up. */ cm->cm_data = NULL; cm->cm_length = MAX(data->DataSize, data->DataOutSize); cm->cm_out_len = data->DataOutSize; cm->cm_flags = 0; if (cm->cm_length != 0) { cm->cm_data = malloc(cm->cm_length, M_MPRUSER, M_WAITOK | M_ZERO); if (cm->cm_data == NULL) { mpr_dprint(sc, MPR_FAULT, "%s: alloc failed for IOCTL " "passthru length %d\n", __func__, cm->cm_length); } else { cm->cm_flags = MPR_CM_FLAGS_DATAIN; if (data->DataOutSize) { cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; err = copyin(PTRIN(data->PtrDataOut), cm->cm_data, data->DataOutSize); } else if (data->DataDirection == MPR_PASS_THRU_DIRECTION_WRITE) { cm->cm_flags = MPR_CM_FLAGS_DATAOUT; err = copyin(PTRIN(data->PtrData), cm->cm_data, data->DataSize); } if (err != 0) mpr_dprint(sc, MPR_FAULT, "%s: failed to copy " "IOCTL data from user space\n", __func__); } } /* * Set this flag only if processing a command that does not need an * IEEE SGL. The CLI Tool within the Toolbox uses IEEE SGLs, so clear * the flag only for that tool if processing a Toolbox function. */ cm->cm_flags |= MPR_CM_FLAGS_SGE_SIMPLE; for (i = 0; i < sizeof (ieee_sgl_func_list); i++) { if (function == ieee_sgl_func_list[i]) { if (function == MPI2_FUNCTION_TOOLBOX) { tool = (uint8_t)hdr->FunctionDependent1; if (tool != MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) break; } cm->cm_flags &= ~MPR_CM_FLAGS_SGE_SIMPLE; break; } } cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; /* * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request * uses SCSI IO or Fast Path SCSI IO descriptor. */ if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { MPI2_SCSI_IO_REQUEST *scsi_io_req; scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr; /* * Put SGE for data and data_out buffer at the end of * scsi_io_request message header (64 bytes in total). * Following above SGEs, the residual space will be used by * sense data. */ scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize - 64); scsi_io_req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); /* * Set SGLOffset0 value. This is the number of dwords that SGL * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct. */ scsi_io_req->SGLOffset0 = 24; /* * Setup descriptor info. RAID passthrough must use the * default request descriptor which is already set, so if this * is a SCSI IO request, change the descriptor to SCSI IO or * Fast Path SCSI IO. Also, if this is a SCSI IO request, * handle the reply in the mprsas_scsio_complete function. */ if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) { targ = mprsas_find_target_by_handle(sc->sassc, 0, scsi_io_req->DevHandle); if (!targ) { printf("No Target found for handle %d\n", scsi_io_req->DevHandle); err = EINVAL; goto RetFreeUnlocked; } if (targ->scsi_req_desc_type == MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { cm->cm_desc.FastPathSCSIIO.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; cm->cm_desc.FastPathSCSIIO.DevHandle = scsi_io_req->DevHandle; scsi_io_req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; } else { cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; cm->cm_desc.SCSIIO.DevHandle = scsi_io_req->DevHandle; } /* * Make sure the DevHandle is not 0 because this is a * likely error. */ if (scsi_io_req->DevHandle == 0) { err = EINVAL; goto RetFreeUnlocked; } } } mpr_lock(sc); err = mpr_wait_command(sc, cm, 30, CAN_SLEEP); if (err) { mpr_printf(sc, "%s: invalid request: error %d\n", __func__, err); mpr_unlock(sc); goto RetFreeUnlocked; } /* * Sync the DMA data, if any. Then copy the data to user space. */ if (cm->cm_data != NULL) { if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) dir = BUS_DMASYNC_POSTREAD; else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) dir = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { mpr_unlock(sc); err = copyout(cm->cm_data, PTRIN(data->PtrData), data->DataSize); mpr_lock(sc); if (err != 0) mpr_dprint(sc, MPR_FAULT, "%s: failed to copy " "IOCTL data to user space\n", __func__); } } /* * Copy the reply data and sense data to user space. */ if (cm->cm_reply != NULL) { rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply; sz = rpl->MsgLength * 4; if (sz > data->ReplySize) { mpr_printf(sc, "%s: user reply buffer (%d) smaller " "than returned buffer (%d)\n", __func__, data->ReplySize, sz); } mpr_unlock(sc); copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize); mpr_lock(sc); if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) || (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { sense_len = MIN((le32toh(((MPI2_SCSI_IO_REPLY *)rpl)-> SenseCount)), sizeof(struct scsi_sense_data)); mpr_unlock(sc); copyout(cm->cm_sense, cm->cm_req + 64, sense_len); mpr_lock(sc); } } } mpr_unlock(sc); RetFreeUnlocked: mpr_lock(sc); if (cm != NULL) { if (cm->cm_data) free(cm->cm_data, M_MPRUSER); mpr_free_command(sc, cm); } Ret: sc->mpr_flags &= ~MPR_FLAGS_BUSY; mpr_unlock(sc); return (err); } static void mpr_user_get_adapter_data(struct mpr_softc *sc, mpr_adapter_data_t *data) { Mpi2ConfigReply_t mpi_reply; Mpi2BiosPage3_t config_page; /* * Use the PCI interface functions to get the Bus, Device, and Function * information. */ data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mpr_dev); data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mpr_dev); data->PciInformation.u.bits.FunctionNumber = pci_get_function(sc->mpr_dev); /* * Get the FW version that should already be saved in IOC Facts. */ data->MpiFirmwareVersion = sc->facts->FWVersion.Word; /* * General device info. */ data->AdapterType = MPRIOCTL_ADAPTER_TYPE_SAS3; data->PCIDeviceHwId = pci_get_device(sc->mpr_dev); data->PCIDeviceHwRev = pci_read_config(sc->mpr_dev, PCIR_REVID, 1); data->SubSystemId = pci_get_subdevice(sc->mpr_dev); data->SubsystemVendorId = pci_get_subvendor(sc->mpr_dev); /* * Get the driver version. */ strcpy((char *)&data->DriverVersion[0], MPR_DRIVER_VERSION); /* * Need to get BIOS Config Page 3 for the BIOS Version. */ data->BiosVersion = 0; mpr_lock(sc); if (mpr_config_get_bios_pg3(sc, &mpi_reply, &config_page)) printf("%s: Error while retrieving BIOS Version\n", __func__); else data->BiosVersion = config_page.BiosVersion; mpr_unlock(sc); } static void mpr_user_read_pci_info(struct mpr_softc *sc, mpr_pci_info_t *data) { int i; /* * Use the PCI interface functions to get the Bus, Device, and Function * information. */ data->BusNumber = pci_get_bus(sc->mpr_dev); data->DeviceNumber = pci_get_slot(sc->mpr_dev); data->FunctionNumber = pci_get_function(sc->mpr_dev); /* * Now get the interrupt vector and the pci header. The vector can * only be 0 right now. The header is the first 256 bytes of config * space. */ data->InterruptVector = 0; for (i = 0; i < sizeof (data->PciHeader); i++) { data->PciHeader[i] = pci_read_config(sc->mpr_dev, i, 1); } } static uint8_t mpr_get_fw_diag_buffer_number(struct mpr_softc *sc, uint32_t unique_id) { uint8_t index; for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { if (sc->fw_diag_buffer_list[index].unique_id == unique_id) { return (index); } } return (MPR_FW_DIAGNOSTIC_UID_NOT_FOUND); } static int mpr_post_fw_diag_buffer(struct mpr_softc *sc, mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code) { MPI2_DIAG_BUFFER_POST_REQUEST *req; MPI2_DIAG_BUFFER_POST_REPLY *reply; struct mpr_command *cm = NULL; int i, status; /* * If buffer is not enabled, just leave. */ *return_code = MPR_FW_DIAG_ERROR_POST_FAILED; if (!pBuffer->enabled) { return (MPR_DIAG_FAILURE); } /* * Clear some flags initially. */ pBuffer->force_release = FALSE; pBuffer->valid_data = FALSE; pBuffer->owned_by_firmware = FALSE; /* * Get a command. */ cm = mpr_alloc_command(sc); if (cm == NULL) { mpr_printf(sc, "%s: no mpr requests\n", __func__); return (MPR_DIAG_FAILURE); } /* * Build the request for releasing the FW Diag Buffer and send it. */ req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req; req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; req->BufferType = pBuffer->buffer_type; req->ExtendedType = pBuffer->extended_type; req->BufferLength = pBuffer->size; for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++) req->ProductSpecific[i] = pBuffer->product_specific[i]; mpr_from_u64(sc->fw_diag_busaddr, &req->BufferAddress); cm->cm_data = NULL; cm->cm_length = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete_data = NULL; /* * Send command synchronously. */ status = mpr_wait_command(sc, cm, 30, CAN_SLEEP); if (status) { mpr_printf(sc, "%s: invalid request: error %d\n", __func__, status); status = MPR_DIAG_FAILURE; goto done; } /* * Process POST reply. */ reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply; if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { status = MPR_DIAG_FAILURE; mpr_dprint(sc, MPR_FAULT, "%s: post of FW Diag Buffer failed " "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and " "TransferLength = 0x%x\n", __func__, reply->IOCStatus, reply->IOCLogInfo, reply->TransferLength); goto done; } /* * Post was successful. */ pBuffer->valid_data = TRUE; pBuffer->owned_by_firmware = TRUE; *return_code = MPR_FW_DIAG_ERROR_SUCCESS; status = MPR_DIAG_SUCCESS; done: mpr_free_command(sc, cm); return (status); } static int mpr_release_fw_diag_buffer(struct mpr_softc *sc, mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code, uint32_t diag_type) { MPI2_DIAG_RELEASE_REQUEST *req; MPI2_DIAG_RELEASE_REPLY *reply; struct mpr_command *cm = NULL; int status; /* * If buffer is not enabled, just leave. */ *return_code = MPR_FW_DIAG_ERROR_RELEASE_FAILED; if (!pBuffer->enabled) { mpr_dprint(sc, MPR_USER, "%s: This buffer type is not " "supported by the IOC", __func__); return (MPR_DIAG_FAILURE); } /* * Clear some flags initially. */ pBuffer->force_release = FALSE; pBuffer->valid_data = FALSE; pBuffer->owned_by_firmware = FALSE; /* * Get a command. */ cm = mpr_alloc_command(sc); if (cm == NULL) { mpr_printf(sc, "%s: no mpr requests\n", __func__); return (MPR_DIAG_FAILURE); } /* * Build the request for releasing the FW Diag Buffer and send it. */ req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req; req->Function = MPI2_FUNCTION_DIAG_RELEASE; req->BufferType = pBuffer->buffer_type; cm->cm_data = NULL; cm->cm_length = 0; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete_data = NULL; /* * Send command synchronously. */ status = mpr_wait_command(sc, cm, 30, CAN_SLEEP); if (status) { mpr_printf(sc, "%s: invalid request: error %d\n", __func__, status); status = MPR_DIAG_FAILURE; goto done; } /* * Process RELEASE reply. */ reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply; if ((reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) || pBuffer->owned_by_firmware) { status = MPR_DIAG_FAILURE; mpr_dprint(sc, MPR_FAULT, "%s: release of FW Diag Buffer " "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n", __func__, reply->IOCStatus, reply->IOCLogInfo); goto done; } /* * Release was successful. */ *return_code = MPR_FW_DIAG_ERROR_SUCCESS; status = MPR_DIAG_SUCCESS; /* * If this was for an UNREGISTER diag type command, clear the unique ID. */ if (diag_type == MPR_FW_DIAG_TYPE_UNREGISTER) { pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID; } done: return (status); } static int mpr_diag_register(struct mpr_softc *sc, mpr_fw_diag_register_t *diag_register, uint32_t *return_code) { mpr_fw_diagnostic_buffer_t *pBuffer; uint8_t extended_type, buffer_type, i; uint32_t buffer_size; uint32_t unique_id; int status; extended_type = diag_register->ExtendedType; buffer_type = diag_register->BufferType; buffer_size = diag_register->RequestedBufferSize; unique_id = diag_register->UniqueId; /* * Check for valid buffer type */ if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; return (MPR_DIAG_FAILURE); } /* * Get the current buffer and look up the unique ID. The unique ID * should not be found. If it is, the ID is already in use. */ i = mpr_get_fw_diag_buffer_number(sc, unique_id); pBuffer = &sc->fw_diag_buffer_list[buffer_type]; if (i != MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } /* * The buffer's unique ID should not be registered yet, and the given * unique ID cannot be 0. */ if ((pBuffer->unique_id != MPR_FW_DIAG_INVALID_UID) || (unique_id == MPR_FW_DIAG_INVALID_UID)) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } /* * If this buffer is already posted as immediate, just change owner. */ if (pBuffer->immediate && pBuffer->owned_by_firmware && (pBuffer->unique_id == MPR_FW_DIAG_INVALID_UID)) { pBuffer->immediate = FALSE; pBuffer->unique_id = unique_id; return (MPR_DIAG_SUCCESS); } /* * Post a new buffer after checking if it's enabled. The DMA buffer * that is allocated will be contiguous (nsegments = 1). */ if (!pBuffer->enabled) { *return_code = MPR_FW_DIAG_ERROR_NO_BUFFER; return (MPR_DIAG_FAILURE); } if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ buffer_size, /* maxsize */ 1, /* nsegments */ buffer_size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->fw_diag_dmat)) { device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer DMA " "tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer, BUS_DMA_NOWAIT, &sc->fw_diag_map)) { device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer " "memory\n"); return (ENOMEM); } bzero(sc->fw_diag_buffer, buffer_size); bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer, buffer_size, mpr_memaddr_cb, &sc->fw_diag_busaddr, 0); pBuffer->size = buffer_size; /* * Copy the given info to the diag buffer and post the buffer. */ pBuffer->buffer_type = buffer_type; pBuffer->immediate = FALSE; if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) { for (i = 0; i < (sizeof (pBuffer->product_specific) / 4); i++) { pBuffer->product_specific[i] = diag_register->ProductSpecific[i]; } } pBuffer->extended_type = extended_type; pBuffer->unique_id = unique_id; status = mpr_post_fw_diag_buffer(sc, pBuffer, return_code); /* * In case there was a failure, free the DMA buffer. */ if (status == MPR_DIAG_FAILURE) { if (sc->fw_diag_busaddr != 0) bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); if (sc->fw_diag_buffer != NULL) bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, sc->fw_diag_map); if (sc->fw_diag_dmat != NULL) bus_dma_tag_destroy(sc->fw_diag_dmat); } return (status); } static int mpr_diag_unregister(struct mpr_softc *sc, mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code) { mpr_fw_diagnostic_buffer_t *pBuffer; uint8_t i; uint32_t unique_id; int status; unique_id = diag_unregister->UniqueId; /* * Get the current buffer and look up the unique ID. The unique ID * should be there. */ i = mpr_get_fw_diag_buffer_number(sc, unique_id); if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } pBuffer = &sc->fw_diag_buffer_list[i]; /* * Try to release the buffer from FW before freeing it. If release * fails, don't free the DMA buffer in case FW tries to access it * later. If buffer is not owned by firmware, can't release it. */ if (!pBuffer->owned_by_firmware) { status = MPR_DIAG_SUCCESS; } else { status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code, MPR_FW_DIAG_TYPE_UNREGISTER); } /* * At this point, return the current status no matter what happens with * the DMA buffer. */ pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID; if (status == MPR_DIAG_SUCCESS) { if (sc->fw_diag_busaddr != 0) bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map); if (sc->fw_diag_buffer != NULL) bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer, sc->fw_diag_map); if (sc->fw_diag_dmat != NULL) bus_dma_tag_destroy(sc->fw_diag_dmat); } return (status); } static int mpr_diag_query(struct mpr_softc *sc, mpr_fw_diag_query_t *diag_query, uint32_t *return_code) { mpr_fw_diagnostic_buffer_t *pBuffer; uint8_t i; uint32_t unique_id; unique_id = diag_query->UniqueId; /* * If ID is valid, query on ID. * If ID is invalid, query on buffer type. */ if (unique_id == MPR_FW_DIAG_INVALID_UID) { i = diag_query->BufferType; if (i >= MPI2_DIAG_BUF_TYPE_COUNT) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } } else { i = mpr_get_fw_diag_buffer_number(sc, unique_id); if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } } /* * Fill query structure with the diag buffer info. */ pBuffer = &sc->fw_diag_buffer_list[i]; diag_query->BufferType = pBuffer->buffer_type; diag_query->ExtendedType = pBuffer->extended_type; if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) { for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4); i++) { diag_query->ProductSpecific[i] = pBuffer->product_specific[i]; } } diag_query->TotalBufferSize = pBuffer->size; diag_query->DriverAddedBufferSize = 0; diag_query->UniqueId = pBuffer->unique_id; diag_query->ApplicationFlags = 0; diag_query->DiagnosticFlags = 0; /* * Set/Clear application flags */ if (pBuffer->immediate) { diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_APP_OWNED; } else { diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_APP_OWNED; } if (pBuffer->valid_data || pBuffer->owned_by_firmware) { diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_BUFFER_VALID; } else { diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_BUFFER_VALID; } if (pBuffer->owned_by_firmware) { diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS; } else { diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS; } return (MPR_DIAG_SUCCESS); } static int mpr_diag_read_buffer(struct mpr_softc *sc, mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf, uint32_t *return_code) { mpr_fw_diagnostic_buffer_t *pBuffer; uint8_t i, *pData; uint32_t unique_id; int status; unique_id = diag_read_buffer->UniqueId; /* * Get the current buffer and look up the unique ID. The unique ID * should be there. */ i = mpr_get_fw_diag_buffer_number(sc, unique_id); if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } pBuffer = &sc->fw_diag_buffer_list[i]; /* * Make sure requested read is within limits */ if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead > pBuffer->size) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; return (MPR_DIAG_FAILURE); } /* * Copy the requested data from DMA to the diag_read_buffer. The DMA * buffer that was allocated is one contiguous buffer. */ pData = (uint8_t *)(sc->fw_diag_buffer + diag_read_buffer->StartingOffset); if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0) return (MPR_DIAG_FAILURE); diag_read_buffer->Status = 0; /* * Set or clear the Force Release flag. */ if (pBuffer->force_release) { diag_read_buffer->Flags |= MPR_FW_DIAG_FLAG_FORCE_RELEASE; } else { diag_read_buffer->Flags &= ~MPR_FW_DIAG_FLAG_FORCE_RELEASE; } /* * If buffer is to be reregistered, make sure it's not already owned by * firmware first. */ status = MPR_DIAG_SUCCESS; if (!pBuffer->owned_by_firmware) { if (diag_read_buffer->Flags & MPR_FW_DIAG_FLAG_REREGISTER) { status = mpr_post_fw_diag_buffer(sc, pBuffer, return_code); } } return (status); } static int mpr_diag_release(struct mpr_softc *sc, mpr_fw_diag_release_t *diag_release, uint32_t *return_code) { mpr_fw_diagnostic_buffer_t *pBuffer; uint8_t i; uint32_t unique_id; int status; unique_id = diag_release->UniqueId; /* * Get the current buffer and look up the unique ID. The unique ID * should be there. */ i = mpr_get_fw_diag_buffer_number(sc, unique_id); if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) { *return_code = MPR_FW_DIAG_ERROR_INVALID_UID; return (MPR_DIAG_FAILURE); } pBuffer = &sc->fw_diag_buffer_list[i]; /* * If buffer is not owned by firmware, it's already been released. */ if (!pBuffer->owned_by_firmware) { *return_code = MPR_FW_DIAG_ERROR_ALREADY_RELEASED; return (MPR_DIAG_FAILURE); } /* * Release the buffer. */ status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code, MPR_FW_DIAG_TYPE_RELEASE); return (status); } static int -mpr_do_diag_action(struct mpr_softc *sc, uint32_t action, - uint8_t *diag_action, uint32_t length, uint32_t *return_code) +mpr_do_diag_action(struct mpr_softc *sc, uint32_t action, uint8_t *diag_action, + uint32_t length, uint32_t *return_code) { mpr_fw_diag_register_t diag_register; mpr_fw_diag_unregister_t diag_unregister; mpr_fw_diag_query_t diag_query; mpr_diag_read_buffer_t diag_read_buffer; mpr_fw_diag_release_t diag_release; int status = MPR_DIAG_SUCCESS; uint32_t original_return_code; original_return_code = *return_code; *return_code = MPR_FW_DIAG_ERROR_SUCCESS; switch (action) { case MPR_FW_DIAG_TYPE_REGISTER: if (!length) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } if (copyin(diag_action, &diag_register, sizeof(diag_register)) != 0) return (MPR_DIAG_FAILURE); status = mpr_diag_register(sc, &diag_register, return_code); break; case MPR_FW_DIAG_TYPE_UNREGISTER: if (length < sizeof(diag_unregister)) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } if (copyin(diag_action, &diag_unregister, sizeof(diag_unregister)) != 0) return (MPR_DIAG_FAILURE); status = mpr_diag_unregister(sc, &diag_unregister, return_code); break; case MPR_FW_DIAG_TYPE_QUERY: if (length < sizeof (diag_query)) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } if (copyin(diag_action, &diag_query, sizeof(diag_query)) != 0) return (MPR_DIAG_FAILURE); status = mpr_diag_query(sc, &diag_query, return_code); if (status == MPR_DIAG_SUCCESS) if (copyout(&diag_query, diag_action, sizeof (diag_query)) != 0) return (MPR_DIAG_FAILURE); break; case MPR_FW_DIAG_TYPE_READ_BUFFER: if (copyin(diag_action, &diag_read_buffer, sizeof(diag_read_buffer)) != 0) return (MPR_DIAG_FAILURE); if (length < diag_read_buffer.BytesToRead) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } status = mpr_diag_read_buffer(sc, &diag_read_buffer, PTRIN(diag_read_buffer.PtrDataBuffer), return_code); if (status == MPR_DIAG_SUCCESS) { if (copyout(&diag_read_buffer, diag_action, sizeof(diag_read_buffer) - sizeof(diag_read_buffer.PtrDataBuffer)) != 0) return (MPR_DIAG_FAILURE); } break; case MPR_FW_DIAG_TYPE_RELEASE: if (length < sizeof(diag_release)) { *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } if (copyin(diag_action, &diag_release, sizeof(diag_release)) != 0) return (MPR_DIAG_FAILURE); status = mpr_diag_release(sc, &diag_release, return_code); break; default: *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER; status = MPR_DIAG_FAILURE; break; } if ((status == MPR_DIAG_FAILURE) && (original_return_code == MPR_FW_DIAG_NEW) && (*return_code != MPR_FW_DIAG_ERROR_SUCCESS)) status = MPR_DIAG_SUCCESS; return (status); } static int mpr_user_diag_action(struct mpr_softc *sc, mpr_diag_action_t *data) { int status; /* * Only allow one diag action at one time. */ if (sc->mpr_flags & MPR_FLAGS_BUSY) { mpr_dprint(sc, MPR_USER, "%s: Only one FW diag command " "allowed at a single time.", __func__); return (EBUSY); } sc->mpr_flags |= MPR_FLAGS_BUSY; /* * Send diag action request */ if (data->Action == MPR_FW_DIAG_TYPE_REGISTER || data->Action == MPR_FW_DIAG_TYPE_UNREGISTER || data->Action == MPR_FW_DIAG_TYPE_QUERY || data->Action == MPR_FW_DIAG_TYPE_READ_BUFFER || data->Action == MPR_FW_DIAG_TYPE_RELEASE) { status = mpr_do_diag_action(sc, data->Action, PTRIN(data->PtrDiagAction), data->Length, &data->ReturnCode); } else status = EINVAL; sc->mpr_flags &= ~MPR_FLAGS_BUSY; return (status); } /* * Copy the event recording mask and the event queue size out. For * clarification, the event recording mask (events_to_record) is not the same * thing as the event mask (event_mask). events_to_record has a bit set for * every event type that is to be recorded by the driver, and event_mask has a * bit cleared for every event that is allowed into the driver from the IOC. * They really have nothing to do with each other. */ static void mpr_user_event_query(struct mpr_softc *sc, mpr_event_query_t *data) { uint8_t i; mpr_lock(sc); data->Entries = MPR_EVENT_QUEUE_SIZE; for (i = 0; i < 4; i++) { data->Types[i] = sc->events_to_record[i]; } mpr_unlock(sc); } /* * Set the driver's event mask according to what's been given. See * mpr_user_event_query for explanation of the event recording mask and the IOC * event mask. It's the app's responsibility to enable event logging by setting * the bits in events_to_record. Initially, no events will be logged. */ static void mpr_user_event_enable(struct mpr_softc *sc, mpr_event_enable_t *data) { uint8_t i; mpr_lock(sc); for (i = 0; i < 4; i++) { sc->events_to_record[i] = data->Types[i]; } mpr_unlock(sc); } /* * Copy out the events that have been recorded, up to the max events allowed. */ static int mpr_user_event_report(struct mpr_softc *sc, mpr_event_report_t *data) { int status = 0; uint32_t size; mpr_lock(sc); size = data->Size; if ((size >= sizeof(sc->recorded_events)) && (status == 0)) { mpr_unlock(sc); if (copyout((void *)sc->recorded_events, PTRIN(data->PtrEvents), size) != 0) status = EFAULT; mpr_lock(sc); } else { /* * data->Size value is not large enough to copy event data. */ status = EFAULT; } /* * Change size value to match the number of bytes that were copied. */ if (status == 0) data->Size = sizeof(sc->recorded_events); mpr_unlock(sc); return (status); } /* * Record events into the driver from the IOC if they are not masked. */ void mprsas_record_event(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event_reply) { uint32_t event; int i, j; uint16_t event_data_len; boolean_t sendAEN = FALSE; event = event_reply->Event; /* * Generate a system event to let anyone who cares know that a * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the * event mask is set to. */ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { sendAEN = TRUE; } /* * Record the event only if its corresponding bit is set in * events_to_record. event_index is the index into recorded_events and * event_number is the overall number of an event being recorded since * start-of-day. event_index will roll over; event_number will never * roll over. */ i = (uint8_t)(event / 32); j = (uint8_t)(event % 32); if ((i < 4) && ((1 << j) & sc->events_to_record[i])) { i = sc->event_index; sc->recorded_events[i].Type = event; sc->recorded_events[i].Number = ++sc->event_number; bzero(sc->recorded_events[i].Data, MPR_MAX_EVENT_DATA_LENGTH * 4); event_data_len = event_reply->EventDataLength; if (event_data_len > 0) { /* * Limit data to size in m_event entry */ if (event_data_len > MPR_MAX_EVENT_DATA_LENGTH) { event_data_len = MPR_MAX_EVENT_DATA_LENGTH; } for (j = 0; j < event_data_len; j++) { sc->recorded_events[i].Data[j] = event_reply->EventData[j]; } /* * check for index wrap-around */ if (++i == MPR_EVENT_QUEUE_SIZE) { i = 0; } sc->event_index = (uint8_t)i; /* * Set flag to send the event. */ sendAEN = TRUE; } } /* * Generate a system event if flag is set to let anyone who cares know * that an event has occurred. */ if (sendAEN) { //SLM-how to send a system event (see kqueue, kevent) // (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS", // "SAS", NULL, NULL, DDI_NOSLEEP); } } static int mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data) { int status = 0; switch (data->Command) { /* * IO access is not supported. */ case REG_IO_READ: case REG_IO_WRITE: mpr_dprint(sc, MPR_USER, "IO access is not supported. " "Use memory access."); status = EINVAL; break; case REG_MEM_READ: data->RegData = mpr_regread(sc, data->RegOffset); break; case REG_MEM_WRITE: mpr_regwrite(sc, data->RegOffset, data->RegData); break; default: status = EINVAL; break; } return (status); } static int mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data) { uint8_t bt2dh = FALSE; uint8_t dh2bt = FALSE; uint16_t dev_handle, bus, target; bus = data->Bus; target = data->TargetID; dev_handle = data->DevHandle; /* * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/ * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is * invalid. */ if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF)) dh2bt = TRUE; if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF)) bt2dh = TRUE; if (!dh2bt && !bt2dh) return (EINVAL); /* * Only handle bus of 0. Make sure target is within range. */ if (bt2dh) { if (bus != 0) return (EINVAL); if (target > sc->max_devices) { mpr_dprint(sc, MPR_FAULT, "Target ID is out of range " "for Bus/Target to DevHandle mapping."); return (EINVAL); } dev_handle = sc->mapping_table[target].dev_handle; if (dev_handle) data->DevHandle = dev_handle; } else { bus = 0; target = mpr_mapping_get_sas_id_from_handle(sc, dev_handle); data->Bus = bus; data->TargetID = target; } return (0); } static int mpr_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag, struct thread *td) { struct mpr_softc *sc; struct mpr_cfg_page_req *page_req; struct mpr_ext_cfg_page_req *ext_page_req; void *mpr_page; int error, msleep_ret; mpr_page = NULL; sc = dev->si_drv1; page_req = (void *)arg; ext_page_req = (void *)arg; switch (cmd) { case MPRIO_READ_CFG_HEADER: mpr_lock(sc); error = mpr_user_read_cfg_header(sc, page_req); mpr_unlock(sc); break; case MPRIO_READ_CFG_PAGE: mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK | M_ZERO); if (!mpr_page) { mpr_printf(sc, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } error = copyin(page_req->buf, mpr_page, sizeof(MPI2_CONFIG_PAGE_HEADER)); if (error) break; mpr_lock(sc); error = mpr_user_read_cfg_page(sc, page_req, mpr_page); mpr_unlock(sc); if (error) break; error = copyout(mpr_page, page_req->buf, page_req->len); break; case MPRIO_READ_EXT_CFG_HEADER: mpr_lock(sc); error = mpr_user_read_extcfg_header(sc, ext_page_req); mpr_unlock(sc); break; case MPRIO_READ_EXT_CFG_PAGE: mpr_page = malloc(ext_page_req->len, M_MPRUSER, M_WAITOK | M_ZERO); if (!mpr_page) { mpr_printf(sc, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } error = copyin(ext_page_req->buf, mpr_page, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); if (error) break; mpr_lock(sc); error = mpr_user_read_extcfg_page(sc, ext_page_req, mpr_page); mpr_unlock(sc); if (error) break; error = copyout(mpr_page, ext_page_req->buf, ext_page_req->len); break; case MPRIO_WRITE_CFG_PAGE: mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK|M_ZERO); if (!mpr_page) { mpr_printf(sc, "Cannot allocate memory %s %d\n", __func__, __LINE__); return (ENOMEM); } error = copyin(page_req->buf, mpr_page, page_req->len); if (error) break; mpr_lock(sc); error = mpr_user_write_cfg_page(sc, page_req, mpr_page); mpr_unlock(sc); break; case MPRIO_MPR_COMMAND: error = mpr_user_command(sc, (struct mpr_usr_command *)arg); break; case MPTIOCTL_PASS_THRU: /* * The user has requested to pass through a command to be * executed by the MPT firmware. Call our routine which does * this. Only allow one passthru IOCTL at one time. */ error = mpr_user_pass_thru(sc, (mpr_pass_thru_t *)arg); break; case MPTIOCTL_GET_ADAPTER_DATA: /* * The user has requested to read adapter data. Call our * routine which does this. */ error = 0; mpr_user_get_adapter_data(sc, (mpr_adapter_data_t *)arg); break; case MPTIOCTL_GET_PCI_INFO: /* * The user has requested to read pci info. Call * our routine which does this. */ mpr_lock(sc); error = 0; mpr_user_read_pci_info(sc, (mpr_pci_info_t *)arg); mpr_unlock(sc); break; case MPTIOCTL_RESET_ADAPTER: mpr_lock(sc); sc->port_enable_complete = 0; uint32_t reinit_start = time_uptime; error = mpr_reinit(sc); /* Sleep for 300 second. */ msleep_ret = msleep(&sc->port_enable_complete, &sc->mpr_mtx, PRIBIO, "mpr_porten", 300 * hz); mpr_unlock(sc); if (msleep_ret) printf("Port Enable did not complete after Diag " "Reset msleep error %d.\n", msleep_ret); else mpr_dprint(sc, MPR_USER, "Hard Reset with Port Enable " "completed in %d seconds.\n", (uint32_t)(time_uptime - reinit_start)); break; case MPTIOCTL_DIAG_ACTION: /* * The user has done a diag buffer action. Call our routine * which does this. Only allow one diag action at one time. */ mpr_lock(sc); error = mpr_user_diag_action(sc, (mpr_diag_action_t *)arg); mpr_unlock(sc); break; case MPTIOCTL_EVENT_QUERY: /* * The user has done an event query. Call our routine which does * this. */ error = 0; mpr_user_event_query(sc, (mpr_event_query_t *)arg); break; case MPTIOCTL_EVENT_ENABLE: /* * The user has done an event enable. Call our routine which * does this. */ error = 0; mpr_user_event_enable(sc, (mpr_event_enable_t *)arg); break; case MPTIOCTL_EVENT_REPORT: /* * The user has done an event report. Call our routine which * does this. */ error = mpr_user_event_report(sc, (mpr_event_report_t *)arg); break; case MPTIOCTL_REG_ACCESS: /* * The user has requested register access. Call our routine * which does this. */ mpr_lock(sc); error = mpr_user_reg_access(sc, (mpr_reg_access_t *)arg); mpr_unlock(sc); break; case MPTIOCTL_BTDH_MAPPING: /* * The user has requested to translate a bus/target to a * DevHandle or a DevHandle to a bus/target. Call our routine * which does this. */ error = mpr_user_btdh(sc, (mpr_btdh_mapping_t *)arg); break; default: error = ENOIOCTL; break; } if (mpr_page != NULL) free(mpr_page, M_MPRUSER); return (error); } #ifdef COMPAT_FREEBSD32 struct mpr_cfg_page_req32 { MPI2_CONFIG_PAGE_HEADER header; uint32_t page_address; uint32_t buf; int len; uint16_t ioc_status; }; struct mpr_ext_cfg_page_req32 { MPI2_CONFIG_EXTENDED_PAGE_HEADER header; uint32_t page_address; uint32_t buf; int len; uint16_t ioc_status; }; struct mpr_raid_action32 { uint8_t action; uint8_t volume_bus; uint8_t volume_id; uint8_t phys_disk_num; uint32_t action_data_word; uint32_t buf; int len; uint32_t volume_status; uint32_t action_data[4]; uint16_t action_status; uint16_t ioc_status; uint8_t write; }; struct mpr_usr_command32 { uint32_t req; uint32_t req_len; uint32_t rpl; uint32_t rpl_len; uint32_t buf; int len; uint32_t flags; }; #define MPRIO_READ_CFG_HEADER32 _IOWR('M', 200, struct mpr_cfg_page_req32) #define MPRIO_READ_CFG_PAGE32 _IOWR('M', 201, struct mpr_cfg_page_req32) #define MPRIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mpr_ext_cfg_page_req32) #define MPRIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mpr_ext_cfg_page_req32) #define MPRIO_WRITE_CFG_PAGE32 _IOWR('M', 204, struct mpr_cfg_page_req32) #define MPRIO_RAID_ACTION32 _IOWR('M', 205, struct mpr_raid_action32) #define MPRIO_MPR_COMMAND32 _IOWR('M', 210, struct mpr_usr_command32) static int mpr_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag, struct thread *td) { struct mpr_cfg_page_req32 *page32 = _arg; struct mpr_ext_cfg_page_req32 *ext32 = _arg; struct mpr_raid_action32 *raid32 = _arg; struct mpr_usr_command32 *user32 = _arg; union { struct mpr_cfg_page_req page; struct mpr_ext_cfg_page_req ext; struct mpr_raid_action raid; struct mpr_usr_command user; } arg; u_long cmd; int error; switch (cmd32) { case MPRIO_READ_CFG_HEADER32: case MPRIO_READ_CFG_PAGE32: case MPRIO_WRITE_CFG_PAGE32: if (cmd32 == MPRIO_READ_CFG_HEADER32) cmd = MPRIO_READ_CFG_HEADER; else if (cmd32 == MPRIO_READ_CFG_PAGE32) cmd = MPRIO_READ_CFG_PAGE; else cmd = MPRIO_WRITE_CFG_PAGE; CP(*page32, arg.page, header); CP(*page32, arg.page, page_address); PTRIN_CP(*page32, arg.page, buf); CP(*page32, arg.page, len); CP(*page32, arg.page, ioc_status); break; case MPRIO_READ_EXT_CFG_HEADER32: case MPRIO_READ_EXT_CFG_PAGE32: if (cmd32 == MPRIO_READ_EXT_CFG_HEADER32) cmd = MPRIO_READ_EXT_CFG_HEADER; else cmd = MPRIO_READ_EXT_CFG_PAGE; CP(*ext32, arg.ext, header); CP(*ext32, arg.ext, page_address); PTRIN_CP(*ext32, arg.ext, buf); CP(*ext32, arg.ext, len); CP(*ext32, arg.ext, ioc_status); break; case MPRIO_RAID_ACTION32: cmd = MPRIO_RAID_ACTION; CP(*raid32, arg.raid, action); CP(*raid32, arg.raid, volume_bus); CP(*raid32, arg.raid, volume_id); CP(*raid32, arg.raid, phys_disk_num); CP(*raid32, arg.raid, action_data_word); PTRIN_CP(*raid32, arg.raid, buf); CP(*raid32, arg.raid, len); CP(*raid32, arg.raid, volume_status); bcopy(raid32->action_data, arg.raid.action_data, sizeof arg.raid.action_data); CP(*raid32, arg.raid, ioc_status); CP(*raid32, arg.raid, write); break; case MPRIO_MPR_COMMAND32: cmd = MPRIO_MPR_COMMAND; PTRIN_CP(*user32, arg.user, req); CP(*user32, arg.user, req_len); PTRIN_CP(*user32, arg.user, rpl); CP(*user32, arg.user, rpl_len); PTRIN_CP(*user32, arg.user, buf); CP(*user32, arg.user, len); CP(*user32, arg.user, flags); break; default: return (ENOIOCTL); } error = mpr_ioctl(dev, cmd, &arg, flag, td); if (error == 0 && (cmd32 & IOC_OUT) != 0) { switch (cmd32) { case MPRIO_READ_CFG_HEADER32: case MPRIO_READ_CFG_PAGE32: case MPRIO_WRITE_CFG_PAGE32: CP(arg.page, *page32, header); CP(arg.page, *page32, page_address); PTROUT_CP(arg.page, *page32, buf); CP(arg.page, *page32, len); CP(arg.page, *page32, ioc_status); break; case MPRIO_READ_EXT_CFG_HEADER32: case MPRIO_READ_EXT_CFG_PAGE32: CP(arg.ext, *ext32, header); CP(arg.ext, *ext32, page_address); PTROUT_CP(arg.ext, *ext32, buf); CP(arg.ext, *ext32, len); CP(arg.ext, *ext32, ioc_status); break; case MPRIO_RAID_ACTION32: CP(arg.raid, *raid32, action); CP(arg.raid, *raid32, volume_bus); CP(arg.raid, *raid32, volume_id); CP(arg.raid, *raid32, phys_disk_num); CP(arg.raid, *raid32, action_data_word); PTROUT_CP(arg.raid, *raid32, buf); CP(arg.raid, *raid32, len); CP(arg.raid, *raid32, volume_status); bcopy(arg.raid.action_data, raid32->action_data, sizeof arg.raid.action_data); CP(arg.raid, *raid32, ioc_status); CP(arg.raid, *raid32, write); break; case MPRIO_MPR_COMMAND32: PTROUT_CP(arg.user, *user32, req); CP(arg.user, *user32, req_len); PTROUT_CP(arg.user, *user32, rpl); CP(arg.user, *user32, rpl_len); PTROUT_CP(arg.user, *user32, buf); CP(arg.user, *user32, len); CP(arg.user, *user32, flags); break; } } return (error); } #endif /* COMPAT_FREEBSD32 */ static int mpr_ioctl_devsw(struct cdev *dev, u_long com, caddr_t arg, int flag, struct thread *td) { #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return (mpr_ioctl32(dev, com, arg, flag, td)); #endif return (mpr_ioctl(dev, com, arg, flag, td)); } Index: head/sys/dev/mpr/mprvar.h =================================================================== --- head/sys/dev/mpr/mprvar.h (revision 299264) +++ head/sys/dev/mpr/mprvar.h (revision 299265) @@ -1,777 +1,775 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. - * Copyright (c) 2013-2015 Avago Technologies + * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #ifndef _MPRVAR_H #define _MPRVAR_H #define MPR_DRIVER_VERSION "09.255.01.00-fbsd" #define MPR_DB_MAX_WAIT 2500 #define MPR_REQ_FRAMES 1024 #define MPR_EVT_REPLY_FRAMES 32 #define MPR_REPLY_FRAMES MPR_REQ_FRAMES #define MPR_CHAIN_FRAMES 2048 #define MPR_SENSE_LEN SSD_FULL_SIZE #define MPR_MSI_COUNT 1 #define MPR_SGE64_SIZE 12 #define MPR_SGE32_SIZE 8 #define MPR_SGC_SIZE 8 #define MPR_FUNCTRACE(sc) \ mpr_dprint((sc), MPR_TRACE, "%s\n", __func__) #define CAN_SLEEP 1 #define NO_SLEEP 0 #define MPR_PERIODIC_DELAY 1 /* 1 second heartbeat/watchdog check */ #define MPR_ATA_ID_TIMEOUT 5 /* 5 second timeout for SATA ID cmd */ #define IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED 0x2810 #define MPR_SCSI_RI_INVALID_FRAME (0x00000002) #define MPR_STRING_LENGTH 64 #define DEFAULT_SPINUP_WAIT 3 /* seconds to wait for spinup */ #include /* * host mapping related macro definitions */ #define MPR_MAPTABLE_BAD_IDX 0xFFFFFFFF #define MPR_DPM_BAD_IDX 0xFFFF #define MPR_ENCTABLE_BAD_IDX 0xFF #define MPR_MAX_MISSING_COUNT 0x0F #define MPR_DEV_RESERVED 0x20000000 #define MPR_MAP_IN_USE 0x10000000 #define MPR_RAID_CHANNEL 1 #define MPR_MAP_BAD_ID 0xFFFFFFFF typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; /** * struct dev_mapping_table - device mapping information * @physical_id: SAS address for drives or WWID for RAID volumes * @device_info: bitfield provides detailed info about the device * @phy_bits: bitfields indicating controller phys * @dpm_entry_num: index of this device in device persistent map table * @dev_handle: device handle for the device pointed by this entry * @channel: target channel * @id: target id * @missing_count: number of times the device not detected by driver * @hide_flag: Hide this physical disk/not (foreign configuration) * @init_complete: Whether the start of the day checks completed or not * @TLR_bits: Turn TLR support on or off */ struct dev_mapping_table { u64 physical_id; u32 device_info; u32 phy_bits; u16 dpm_entry_num; u16 dev_handle; u8 reserved1; u8 channel; u16 id; u8 missing_count; u8 init_complete; u8 TLR_bits; u8 reserved2; }; /** * struct enc_mapping_table - mapping information about an enclosure * @enclosure_id: Logical ID of this enclosure * @start_index: index to the entry in dev_mapping_table * @phy_bits: bitfields indicating controller phys * @dpm_entry_num: index of this enclosure in device persistent map table * @enc_handle: device handle for the enclosure pointed by this entry * @num_slots: number of slots in the enclosure * @start_slot: Starting slot id * @missing_count: number of times the device not detected by driver * @removal_flag: used to mark the device for removal * @skip_search: used as a flag to include/exclude enclosure for search * @init_complete: Whether the start of the day checks completed or not */ struct enc_mapping_table { u64 enclosure_id; u32 start_index; u32 phy_bits; u16 dpm_entry_num; u16 enc_handle; u16 num_slots; u16 start_slot; u8 missing_count; u8 removal_flag; u8 skip_search; u8 init_complete; }; /** * struct map_removal_table - entries to be removed from mapping table * @dpm_entry_num: index of this device in device persistent map table * @dev_handle: device handle for the device pointed by this entry */ struct map_removal_table{ u16 dpm_entry_num; u16 dev_handle; }; typedef struct mpr_fw_diagnostic_buffer { size_t size; uint8_t extended_type; uint8_t buffer_type; uint8_t force_release; uint32_t product_specific[23]; uint8_t immediate; uint8_t enabled; uint8_t valid_data; uint8_t owned_by_firmware; uint32_t unique_id; } mpr_fw_diagnostic_buffer_t; struct mpr_softc; struct mpr_command; struct mprsas_softc; union ccb; struct mprsas_target; struct mpr_column_map; MALLOC_DECLARE(M_MPR); typedef void mpr_evt_callback_t(struct mpr_softc *, uintptr_t, MPI2_EVENT_NOTIFICATION_REPLY *reply); typedef void mpr_command_callback_t(struct mpr_softc *, struct mpr_command *cm); struct mpr_chain { TAILQ_ENTRY(mpr_chain) chain_link; void *chain; uint64_t chain_busaddr; }; /* * This needs to be at least 2 to support SMP passthrough. */ #define MPR_IOVEC_COUNT 2 struct mpr_command { TAILQ_ENTRY(mpr_command) cm_link; TAILQ_ENTRY(mpr_command) cm_recovery; struct mpr_softc *cm_sc; union ccb *cm_ccb; void *cm_data; u_int cm_length; u_int cm_out_len; struct uio cm_uio; struct iovec cm_iovec[MPR_IOVEC_COUNT]; u_int cm_max_segs; u_int cm_sglsize; void *cm_sge; uint8_t *cm_req; uint8_t *cm_reply; uint32_t cm_reply_data; mpr_command_callback_t *cm_complete; void *cm_complete_data; struct mprsas_target *cm_targ; MPI2_REQUEST_DESCRIPTOR_UNION cm_desc; u_int cm_lun; u_int cm_flags; #define MPR_CM_FLAGS_POLLED (1 << 0) #define MPR_CM_FLAGS_COMPLETE (1 << 1) #define MPR_CM_FLAGS_SGE_SIMPLE (1 << 2) #define MPR_CM_FLAGS_DATAOUT (1 << 3) #define MPR_CM_FLAGS_DATAIN (1 << 4) #define MPR_CM_FLAGS_WAKEUP (1 << 5) #define MPR_CM_FLAGS_USE_UIO (1 << 6) #define MPR_CM_FLAGS_SMP_PASS (1 << 7) #define MPR_CM_FLAGS_CHAIN_FAILED (1 << 8) #define MPR_CM_FLAGS_ERROR_MASK MPR_CM_FLAGS_CHAIN_FAILED #define MPR_CM_FLAGS_USE_CCB (1 << 9) #define MPR_CM_FLAGS_SATA_ID_TIMEOUT (1 << 10) u_int cm_state; #define MPR_CM_STATE_FREE 0 #define MPR_CM_STATE_BUSY 1 #define MPR_CM_STATE_TIMEDOUT 2 bus_dmamap_t cm_dmamap; struct scsi_sense_data *cm_sense; TAILQ_HEAD(, mpr_chain) cm_chain_list; uint32_t cm_req_busaddr; uint32_t cm_sense_busaddr; struct callout cm_callout; }; struct mpr_column_map { uint16_t dev_handle; uint8_t phys_disk_num; }; struct mpr_event_handle { TAILQ_ENTRY(mpr_event_handle) eh_list; mpr_evt_callback_t *callback; void *data; uint8_t mask[16]; }; struct mpr_softc { device_t mpr_dev; struct cdev *mpr_cdev; u_int mpr_flags; #define MPR_FLAGS_INTX (1 << 0) #define MPR_FLAGS_MSI (1 << 1) #define MPR_FLAGS_BUSY (1 << 2) #define MPR_FLAGS_SHUTDOWN (1 << 3) #define MPR_FLAGS_DIAGRESET (1 << 4) #define MPR_FLAGS_ATTACH_DONE (1 << 5) u_int mpr_debug; u_int disable_msix; u_int disable_msi; int tm_cmds_active; int io_cmds_active; int io_cmds_highwater; int chain_free; int max_chains; int chain_free_lowwater; u_int enable_ssu; int spinup_wait_time; uint64_t chain_alloc_fail; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; char fw_version[16]; struct mpr_command *commands; struct mpr_chain *chains; struct callout periodic; struct mprsas_softc *sassc; char tmp_string[MPR_STRING_LENGTH]; TAILQ_HEAD(, mpr_command) req_list; TAILQ_HEAD(, mpr_command) high_priority_req_list; TAILQ_HEAD(, mpr_chain) chain_list; TAILQ_HEAD(, mpr_command) tm_list; int replypostindex; int replyfreeindex; struct resource *mpr_regs_resource; bus_space_handle_t mpr_bhandle; bus_space_tag_t mpr_btag; int mpr_regs_rid; bus_dma_tag_t mpr_parent_dmat; bus_dma_tag_t buffer_dmat; MPI2_IOC_FACTS_REPLY *facts; int num_reqs; int num_replies; int fqdepth; /* Free queue */ int pqdepth; /* Post queue */ uint8_t event_mask[16]; TAILQ_HEAD(, mpr_event_handle) event_list; struct mpr_event_handle *mpr_log_eh; struct mtx mpr_mtx; struct intr_config_hook mpr_ich; struct resource *mpr_irq[MPR_MSI_COUNT]; void *mpr_intrhand[MPR_MSI_COUNT]; int mpr_irq_rid[MPR_MSI_COUNT]; uint8_t *req_frames; bus_addr_t req_busaddr; bus_dma_tag_t req_dmat; bus_dmamap_t req_map; uint8_t *reply_frames; bus_addr_t reply_busaddr; bus_dma_tag_t reply_dmat; bus_dmamap_t reply_map; struct scsi_sense_data *sense_frames; bus_addr_t sense_busaddr; bus_dma_tag_t sense_dmat; bus_dmamap_t sense_map; uint8_t *chain_frames; bus_addr_t chain_busaddr; bus_dma_tag_t chain_dmat; bus_dmamap_t chain_map; MPI2_REPLY_DESCRIPTORS_UNION *post_queue; bus_addr_t post_busaddr; uint32_t *free_queue; bus_addr_t free_busaddr; bus_dma_tag_t queues_dmat; bus_dmamap_t queues_map; uint8_t *fw_diag_buffer; bus_addr_t fw_diag_busaddr; bus_dma_tag_t fw_diag_dmat; bus_dmamap_t fw_diag_map; uint8_t ir_firmware; /* static config pages */ Mpi2IOCPage8_t ioc_pg8; Mpi2IOUnitPage8_t iounit_pg8; /* host mapping support */ struct dev_mapping_table *mapping_table; struct enc_mapping_table *enclosure_table; struct map_removal_table *removal_table; uint8_t *dpm_entry_used; uint8_t *dpm_flush_entry; Mpi2DriverMappingPage0_t *dpm_pg0; uint16_t max_devices; uint16_t max_enclosures; uint16_t max_expanders; uint8_t max_volumes; uint8_t num_enc_table_entries; uint8_t num_rsvd_entries; uint8_t num_channels; uint16_t max_dpm_entries; uint8_t is_dpm_enable; uint8_t track_mapping_events; uint32_t pending_map_events; uint8_t mt_full_retry; uint8_t mt_add_device_failed; /* FW diag Buffer List */ mpr_fw_diagnostic_buffer_t fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_COUNT]; /* Event Recording IOCTL support */ uint32_t events_to_record[4]; mpr_event_entry_t recorded_events[MPR_EVENT_QUEUE_SIZE]; uint8_t event_index; uint32_t event_number; /* EEDP and TLR support */ uint8_t eedp_enabled; uint8_t control_TLR; /* Shutdown Event Handler */ eventhandler_tag shutdown_eh; /* To track topo events during reset */ #define MPR_DIAG_RESET_TIMEOUT 300000 uint8_t wait_for_port_enable; uint8_t port_enable_complete; uint8_t msleep_fake_chan; /* StartStopUnit command handling at shutdown */ uint32_t SSU_refcount; uint8_t SSU_started; char exclude_ids[80]; struct timeval lastfail; }; struct mpr_config_params { MPI2_CONFIG_EXT_PAGE_HEADER_UNION hdr; u_int action; u_int page_address; /* Attributes, not a phys address */ u_int status; void *buffer; u_int length; int timeout; void (*callback)(struct mpr_softc *, struct mpr_config_params *); void *cbdata; }; struct scsi_read_capacity_eedp { uint8_t addr[8]; uint8_t length[4]; uint8_t protect; }; static __inline uint32_t mpr_regread(struct mpr_softc *sc, uint32_t offset) { return (bus_space_read_4(sc->mpr_btag, sc->mpr_bhandle, offset)); } static __inline void mpr_regwrite(struct mpr_softc *sc, uint32_t offset, uint32_t val) { bus_space_write_4(sc->mpr_btag, sc->mpr_bhandle, offset, val); } /* free_queue must have Little Endian address * TODO- cm_reply_data is unwanted. We can remove it. * */ static __inline void mpr_free_reply(struct mpr_softc *sc, uint32_t busaddr) { if (++sc->replyfreeindex >= sc->fqdepth) sc->replyfreeindex = 0; sc->free_queue[sc->replyfreeindex] = htole32(busaddr); mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); } static __inline struct mpr_chain * mpr_alloc_chain(struct mpr_softc *sc) { struct mpr_chain *chain; if ((chain = TAILQ_FIRST(&sc->chain_list)) != NULL) { TAILQ_REMOVE(&sc->chain_list, chain, chain_link); sc->chain_free--; if (sc->chain_free < sc->chain_free_lowwater) sc->chain_free_lowwater = sc->chain_free; } else sc->chain_alloc_fail++; return (chain); } static __inline void mpr_free_chain(struct mpr_softc *sc, struct mpr_chain *chain) { #if 0 bzero(chain->chain, 128); #endif sc->chain_free++; TAILQ_INSERT_TAIL(&sc->chain_list, chain, chain_link); } static __inline void mpr_free_command(struct mpr_softc *sc, struct mpr_command *cm) { struct mpr_chain *chain, *chain_temp; if (cm->cm_reply != NULL) mpr_free_reply(sc, cm->cm_reply_data); cm->cm_reply = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_complete_data = NULL; cm->cm_ccb = NULL; cm->cm_targ = NULL; cm->cm_max_segs = 0; cm->cm_lun = 0; cm->cm_state = MPR_CM_STATE_FREE; cm->cm_data = NULL; cm->cm_length = 0; cm->cm_out_len = 0; cm->cm_sglsize = 0; cm->cm_sge = NULL; TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) { TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link); mpr_free_chain(sc, chain); } TAILQ_INSERT_TAIL(&sc->req_list, cm, cm_link); } static __inline struct mpr_command * mpr_alloc_command(struct mpr_softc *sc) { struct mpr_command *cm; cm = TAILQ_FIRST(&sc->req_list); if (cm == NULL) return (NULL); TAILQ_REMOVE(&sc->req_list, cm, cm_link); KASSERT(cm->cm_state == MPR_CM_STATE_FREE, ("mpr: Allocating busy command\n")); cm->cm_state = MPR_CM_STATE_BUSY; return (cm); } static __inline void mpr_free_high_priority_command(struct mpr_softc *sc, struct mpr_command *cm) { struct mpr_chain *chain, *chain_temp; if (cm->cm_reply != NULL) mpr_free_reply(sc, cm->cm_reply_data); cm->cm_reply = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_complete_data = NULL; cm->cm_ccb = NULL; cm->cm_targ = NULL; cm->cm_lun = 0; cm->cm_state = MPR_CM_STATE_FREE; TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) { TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link); mpr_free_chain(sc, chain); } TAILQ_INSERT_TAIL(&sc->high_priority_req_list, cm, cm_link); } static __inline struct mpr_command * mpr_alloc_high_priority_command(struct mpr_softc *sc) { struct mpr_command *cm; cm = TAILQ_FIRST(&sc->high_priority_req_list); if (cm == NULL) return (NULL); TAILQ_REMOVE(&sc->high_priority_req_list, cm, cm_link); KASSERT(cm->cm_state == MPR_CM_STATE_FREE, ("mpr: Allocating busy command\n")); cm->cm_state = MPR_CM_STATE_BUSY; return (cm); } static __inline void mpr_lock(struct mpr_softc *sc) { mtx_lock(&sc->mpr_mtx); } static __inline void mpr_unlock(struct mpr_softc *sc) { mtx_unlock(&sc->mpr_mtx); } #define MPR_INFO (1 << 0) /* Basic info */ #define MPR_FAULT (1 << 1) /* Hardware faults */ #define MPR_EVENT (1 << 2) /* Event data from the controller */ #define MPR_LOG (1 << 3) /* Log data from the controller */ #define MPR_RECOVERY (1 << 4) /* Command error recovery tracing */ #define MPR_ERROR (1 << 5) /* Parameter errors, programming bugs */ #define MPR_INIT (1 << 6) /* Things related to system init */ #define MPR_XINFO (1 << 7) /* More detailed/noisy info */ #define MPR_USER (1 << 8) /* Trace user-generated commands */ #define MPR_MAPPING (1 << 9) /* Trace device mappings */ #define MPR_TRACE (1 << 10) /* Function-by-function trace */ #define MPR_SSU_DISABLE_SSD_DISABLE_HDD 0 #define MPR_SSU_ENABLE_SSD_DISABLE_HDD 1 #define MPR_SSU_DISABLE_SSD_ENABLE_HDD 2 #define MPR_SSU_ENABLE_SSD_ENABLE_HDD 3 #define mpr_printf(sc, args...) \ device_printf((sc)->mpr_dev, ##args) #define mpr_vprintf(sc, args...) \ do { \ if (bootverbose) \ mpr_printf(sc, ##args); \ } while (0) #define mpr_dprint(sc, level, msg, args...) \ do { \ - if ((sc)->mpr_debug & level) \ + if ((sc)->mpr_debug & (level)) \ device_printf((sc)->mpr_dev, msg, ##args); \ } while (0) #define mpr_dprint_field(sc, level, msg, args...) \ do { \ - if ((sc)->mpr_debug & level) \ + if ((sc)->mpr_debug & (level)) \ printf("\t" msg, ##args); \ } while (0) #define MPR_PRINTFIELD_START(sc, tag...) \ mpr_dprint((sc), MPR_INFO, ##tag); \ mpr_dprint_field((sc), MPR_INFO, ":\n") #define MPR_PRINTFIELD_END(sc, tag) \ mpr_dprint((sc), MPR_INFO, tag "\n") #define MPR_PRINTFIELD(sc, facts, attr, fmt) \ mpr_dprint_field((sc), MPR_INFO, #attr ": " #fmt "\n", (facts)->attr) #define MPR_EVENTFIELD_START(sc, tag...) \ mpr_dprint((sc), MPR_EVENT, ##tag); \ mpr_dprint_field((sc), MPR_EVENT, ":\n") #define MPR_EVENTFIELD(sc, facts, attr, fmt) \ mpr_dprint_field((sc), MPR_EVENT, #attr ": " #fmt "\n", (facts)->attr) static __inline void mpr_from_u64(uint64_t data, U64 *mpr) { (mpr)->High = htole32((uint32_t)((data) >> 32)); (mpr)->Low = htole32((uint32_t)((data) & 0xffffffff)); } static __inline uint64_t mpr_to_u64(U64 *data) { return (((uint64_t)le32toh(data->High) << 32) | le32toh(data->Low)); } static __inline void mpr_mask_intr(struct mpr_softc *sc) { uint32_t mask; mask = mpr_regread(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET); mask |= MPI2_HIM_REPLY_INT_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET, mask); } static __inline void mpr_unmask_intr(struct mpr_softc *sc) { uint32_t mask; mask = mpr_regread(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET); mask &= ~MPI2_HIM_REPLY_INT_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET, mask); } int mpr_pci_setup_interrupts(struct mpr_softc *sc); int mpr_pci_restore(struct mpr_softc *sc); int mpr_attach(struct mpr_softc *sc); int mpr_free(struct mpr_softc *sc); void mpr_intr(void *); void mpr_intr_msi(void *); void mpr_intr_locked(void *); int mpr_register_events(struct mpr_softc *, uint8_t *, mpr_evt_callback_t *, void *, struct mpr_event_handle **); int mpr_restart(struct mpr_softc *); -int mpr_update_events(struct mpr_softc *, struct mpr_event_handle *, - uint8_t *); +int mpr_update_events(struct mpr_softc *, struct mpr_event_handle *, uint8_t *); int mpr_deregister_events(struct mpr_softc *, struct mpr_event_handle *); int mpr_push_sge(struct mpr_command *, MPI2_SGE_SIMPLE64 *, size_t, int); int mpr_push_ieee_sge(struct mpr_command *, void *, int); int mpr_add_dmaseg(struct mpr_command *, vm_paddr_t, size_t, u_int, int); int mpr_attach_sas(struct mpr_softc *sc); int mpr_detach_sas(struct mpr_softc *sc); int mpr_read_config_page(struct mpr_softc *, struct mpr_config_params *); int mpr_write_config_page(struct mpr_softc *, struct mpr_config_params *); void mpr_memaddr_cb(void *, bus_dma_segment_t *, int , int ); void mpr_init_sge(struct mpr_command *cm, void *req, void *sge); int mpr_attach_user(struct mpr_softc *); void mpr_detach_user(struct mpr_softc *); void mprsas_record_event(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event_reply); int mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm); -int mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, - int timeout, int sleep_flag); +int mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, int timeout, + int sleep_flag); int mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm); int mpr_config_get_bios_pg3(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage3_t *config_page); int mpr_config_get_raid_volume_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address); int mpr_config_get_ioc_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *, Mpi2IOCPage8_t *); int mpr_config_get_iounit_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page); int mpr_config_get_sas_device_pg0(struct mpr_softc *, Mpi2ConfigReply_t *, Mpi2SasDevicePage0_t *, u32 , u16 ); int mpr_config_get_dpm_pg0(struct mpr_softc *, Mpi2ConfigReply_t *, Mpi2DriverMappingPage0_t *, u16 ); int mpr_config_get_raid_volume_pg1(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u16 handle); int mpr_config_get_volume_wwid(struct mpr_softc *sc, u16 volume_handle, u64 *wwid); int mpr_config_get_raid_pd_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 page_address); void mprsas_ir_shutdown(struct mpr_softc *sc); int mpr_reinit(struct mpr_softc *sc); void mprsas_handle_reinit(struct mpr_softc *sc); void mpr_base_static_config_pages(struct mpr_softc *sc); int mpr_mapping_initialize(struct mpr_softc *); void mpr_mapping_topology_change_event(struct mpr_softc *, Mpi2EventDataSasTopologyChangeList_t *); int mpr_mapping_is_reinit_required(struct mpr_softc *); void mpr_mapping_free_memory(struct mpr_softc *sc); int mpr_config_set_dpm_pg0(struct mpr_softc *, Mpi2ConfigReply_t *, Mpi2DriverMappingPage0_t *, u16 ); void mpr_mapping_exit(struct mpr_softc *); void mpr_mapping_check_devices(struct mpr_softc *, int); int mpr_mapping_allocate_memory(struct mpr_softc *sc); unsigned int mpr_mapping_get_sas_id(struct mpr_softc *, uint64_t , u16); unsigned int mpr_mapping_get_sas_id_from_handle(struct mpr_softc *sc, u16 handle); unsigned int mpr_mapping_get_raid_id(struct mpr_softc *sc, u64 wwid, u16 handle); unsigned int mpr_mapping_get_raid_id_from_handle(struct mpr_softc *sc, u16 volHandle); void mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *, Mpi2EventDataSasEnclDevStatusChange_t *event_data); void mpr_mapping_ir_config_change_event(struct mpr_softc *sc, Mpi2EventDataIrConfigChangeList_t *event_data); void mprsas_evt_handler(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *event); void mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle); -void mprsas_prepare_volume_remove(struct mprsas_softc *sassc, - uint16_t handle); +void mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle); int mprsas_startup(struct mpr_softc *sc); -struct mprsas_target * mprsas_find_target_by_handle(struct mprsas_softc *, - int, uint16_t); +struct mprsas_target * mprsas_find_target_by_handle(struct mprsas_softc *, int, + uint16_t); void mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets); struct mpr_command * mprsas_alloc_tm(struct mpr_softc *sc); void mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm); void mprsas_release_simq_reinit(struct mprsas_softc *sassc); int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type); SYSCTL_DECL(_hw_mpr); /* Compatibility shims for different OS versions */ #if __FreeBSD_version >= 800001 #define mpr_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mpr_kproc_exit(arg) kproc_exit(arg) #else #define mpr_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mpr_kproc_exit(arg) kthread_exit(arg) #endif #if defined(CAM_PRIORITY_XPT) #define MPR_PRIORITY_XPT CAM_PRIORITY_XPT #else #define MPR_PRIORITY_XPT 5 #endif #if __FreeBSD_version < 800107 // Prior to FreeBSD-8.0 scp3_flags was not defined. #define spc3_flags reserved #define SPC3_SID_PROTECT 0x01 #define SPC3_SID_3PC 0x08 #define SPC3_SID_TPGS_MASK 0x30 #define SPC3_SID_TPGS_IMPLICIT 0x10 #define SPC3_SID_TPGS_EXPLICIT 0x20 #define SPC3_SID_ACC 0x40 #define SPC3_SID_SCCS 0x80 #define CAM_PRIORITY_NORMAL CAM_PRIORITY_NONE #endif #endif