Index: stable/10/sys/dev/mrsas/mrsas.c =================================================================== --- stable/10/sys/dev/mrsas/mrsas.c (revision 300735) +++ stable/10/sys/dev/mrsas/mrsas.c (revision 300736) @@ -1,4148 +1,4492 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include #include #include /* * Function prototypes */ static d_open_t mrsas_open; static d_close_t mrsas_close; static d_read_t mrsas_read; static d_write_t mrsas_write; static d_ioctl_t mrsas_ioctl; static d_poll_t mrsas_poll; +static void mrsas_ich_startup(void *arg); static struct mrsas_mgmt_info mrsas_mgmt_info; static struct mrsas_ident *mrsas_find_ident(device_t); static int mrsas_setup_msix(struct mrsas_softc *sc); static int mrsas_allocate_msix(struct mrsas_softc *sc); static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); static void mrsas_flush_cache(struct mrsas_softc *sc); static void mrsas_reset_reply_desc(struct mrsas_softc *sc); static void mrsas_ocr_thread(void *arg); static int mrsas_get_map_info(struct mrsas_softc *sc); static int mrsas_get_ld_map_info(struct mrsas_softc *sc); static int mrsas_sync_map_info(struct mrsas_softc *sc); static int mrsas_get_pd_list(struct mrsas_softc *sc); static int mrsas_get_ld_list(struct mrsas_softc *sc); static int mrsas_setup_irq(struct mrsas_softc *sc); static int mrsas_alloc_mem(struct mrsas_softc *sc); static int mrsas_init_fw(struct mrsas_softc *sc); static int mrsas_setup_raidmap(struct mrsas_softc *sc); -static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); +static void megasas_setup_jbod_map(struct mrsas_softc *sc); +static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); static int mrsas_clear_intr(struct mrsas_softc *sc); static int mrsas_get_ctrl_info(struct mrsas_softc *sc); static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort); static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg); u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd); void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); int mrsas_init_adapter(struct mrsas_softc *sc); int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); int mrsas_ioc_init(struct mrsas_softc *sc); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); -int mrsas_reset_ctrl(struct mrsas_softc *sc); -int mrsas_wait_for_outstanding(struct mrsas_softc *sc); +int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); +int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); +int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size); void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_disable_intr(struct mrsas_softc *sc); void mrsas_enable_intr(struct mrsas_softc *sc); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_mem(struct mrsas_softc *sc); void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); void mrsas_isr(void *arg); void mrsas_teardown_intr(struct mrsas_softc *sc); void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); void mrsas_kill_hba(struct mrsas_softc *sc); void mrsas_aen_handler(struct mrsas_softc *sc); void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status); void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus); struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_cam_attach(struct mrsas_softc *sc); extern void mrsas_cam_detach(struct mrsas_softc *sc); extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_xpt_freeze(struct mrsas_softc *sc); extern void mrsas_xpt_release(struct mrsas_softc *sc); extern MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); /* * PCI device struct and table * */ typedef struct mrsas_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; const char *desc; } MRSAS_CTLR_ID; MRSAS_CTLR_ID device_table[] = { {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, + {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, + {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, {0, 0, 0, 0, NULL} }; /* * Character device entry points * */ static struct cdevsw mrsas_cdevsw = { .d_version = D_VERSION, .d_open = mrsas_open, .d_close = mrsas_close, .d_read = mrsas_read, .d_write = mrsas_write, .d_ioctl = mrsas_ioctl, .d_poll = mrsas_poll, .d_name = "mrsas", }; MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); /* * In the cdevsw routines, we find our softc by using the si_drv1 member of * struct cdev. We set this variable to point to our softc in our attach * routine when we create the /dev entry. */ int mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } /* * Register Read/Write Functions * */ void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; bus_space_write_4(bus_tag, bus_handle, offset, value); } u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); } /* * Interrupt Disable/Enable/Clear Functions * */ void mrsas_disable_intr(struct mrsas_softc *sc) { u_int32_t mask = 0xFFFFFFFF; u_int32_t status; sc->mask_interrupts = 1; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); /* Dummy read to force pci flush */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } void mrsas_enable_intr(struct mrsas_softc *sc) { u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; u_int32_t status; sc->mask_interrupts = 0; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } static int mrsas_clear_intr(struct mrsas_softc *sc) { u_int32_t status, fw_status, fw_state; /* Read received interrupt */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); /* * If FW state change interrupt is received, write to it again to * clear */ if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) { fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { device_printf(sc->mrsas_dev, "FW is in FAULT state!\n"); if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); } mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status); mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); return (1); } /* Not our interrupt, so just return */ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return (0); /* We got a reply interrupt */ return (1); } /* * PCI Support Functions * */ static struct mrsas_ident * mrsas_find_ident(device_t dev) { struct mrsas_ident *pci_device; for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { if ((pci_device->vendor == pci_get_vendor(dev)) && (pci_device->device == pci_get_device(dev)) && ((pci_device->subvendor == pci_get_subvendor(dev)) || (pci_device->subvendor == 0xffff)) && ((pci_device->subdevice == pci_get_subdevice(dev)) || (pci_device->subdevice == 0xffff))) return (pci_device); } return (NULL); } static int mrsas_probe(device_t dev) { static u_int8_t first_ctrl = 1; struct mrsas_ident *id; if ((id = mrsas_find_ident(dev)) != NULL) { if (first_ctrl) { printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION); first_ctrl = 0; } device_set_desc(dev, id->desc); /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ return (-30); } return (ENXIO); } /* * mrsas_setup_sysctl: setup sysctl values for mrsas * input: Adapter instance soft state * * Setup sysctl entries for mrsas driver. */ static void mrsas_setup_sysctl(struct mrsas_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", device_get_unit(sc->mrsas_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, "Disable the use of OCR"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, strlen(MRSAS_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_count", CTLFLAG_RD, &sc->reset_count, 0, "number of ocr from start of the day"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "fw_outstanding", CTLFLAG_RD, &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, "Driver debug level"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 0, "Driver IO timeout value in mili-second."); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, &sc->mrsas_fw_fault_check_delay, 0, "FW fault check thread delay in seconds. "); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_in_progress", CTLFLAG_RD, &sc->reset_in_progress, 0, "ocr in progress status"); } /* * mrsas_get_tunables: get tunable parameters. * input: Adapter instance soft state * * Get tunable parameters. This will help to debug driver at boot time. */ static void mrsas_get_tunables(struct mrsas_softc *sc) { char tmpstr[80]; /* XXX default to some debugging for now */ sc->mrsas_debug = MRSAS_FAULT; sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; sc->mrsas_fw_fault_check_delay = 1; sc->reset_count = 0; sc->reset_in_progress = 0; /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", device_get_unit(sc->mrsas_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); } /* * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. * Used to get sequence number at driver load time. * input: Adapter soft state * * Allocates DMAable memory for the event log info internal command. */ int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) { int el_info_size; /* Allocate get event log info command */ el_info_size = sizeof(struct mrsas_evt_log_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, el_info_size, 1, el_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->el_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, sc->el_info_mem, el_info_size, mrsas_addr_cb, &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); return (ENOMEM); } memset(sc->el_info_mem, 0, el_info_size); return (0); } /* * mrsas_free_evt_info_cmd: Free memory for Event log info command * input: Adapter soft state * * Deallocates memory for the event log info internal command. */ void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) { if (sc->el_info_phys_addr) bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); if (sc->el_info_mem != NULL) bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); if (sc->el_info_tag != NULL) bus_dma_tag_destroy(sc->el_info_tag); } /* * mrsas_get_seq_num: Get latest event sequence number * @sc: Adapter soft state * @eli: Firmware event log sequence number information. * * Firmware maintains a log of all events in a non-volatile area. * Driver get the sequence number using DCMD * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. */ static int mrsas_get_seq_num(struct mrsas_softc *sc, struct mrsas_evt_log_info *eli) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; + u_int8_t do_ocr = 1, retcode = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); - mrsas_issue_blocked_cmd(sc, cmd); + retcode = mrsas_issue_blocked_cmd(sc, cmd); + if (retcode == ETIMEDOUT) + goto dcmd_timeout; + do_ocr = 0; /* * Copy the data back into callers buffer */ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); mrsas_free_evt_log_info_cmd(sc); - mrsas_release_mfi_cmd(cmd); - return 0; +dcmd_timeout: + if (do_ocr) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); + + return retcode; } /* * mrsas_register_aen: Register for asynchronous event notification * @sc: Adapter soft state * @seq_num: Starting sequence number * @class_locale: Class of the event * * This function subscribes for events beyond the @seq_num * and type @class_locale. * */ static int mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, u_int32_t class_locale_word) { int ret_val; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; union mrsas_evt_class_locale curr_aen; union mrsas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new AEN * request we currently have. If it is, then we don't have to do * anything. In other words, whichever events the current AEN request * is subscribing to, have already been subscribed to. If the old_cmd * is _not_ inclusive, then we have to abort that command, form a * class_locale that is superset of both old and current and re-issue * to the FW */ curr_aen.word = class_locale_word; if (sc->aen_cmd) { prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. Locale numbers don't have such hierarchy. They * are bitmap values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; sc->aen_cmd->abort_aen = 1; ret_val = mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (ret_val) { printf("mrsas: Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = mrsas_get_mfi_cmd(sc); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; sc->last_seq_num = seq_num; dcmd->mbox.w[1] = curr_aen.word; dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); if (sc->aen_cmd != NULL) { mrsas_release_mfi_cmd(cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ sc->aen_cmd = cmd; /* * Issue the aen registration frame */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); return (1); } return 0; } /* * mrsas_start_aen: Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int mrsas_start_aen(struct mrsas_softc *sc) { struct mrsas_evt_log_info eli; union mrsas_evt_class_locale class_locale; /* Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (mrsas_get_seq_num(sc, &eli)) return -1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return mrsas_register_aen(sc, eli.newest_seq_num + 1, class_locale.word); } /* * mrsas_setup_msix: Allocate MSI-x vectors * @sc: adapter soft state */ static int mrsas_setup_msix(struct mrsas_softc *sc) { int i; for (i = 0; i < sc->msix_vectors; i++) { sc->irq_context[i].sc = sc; sc->irq_context[i].MSIxIndex = i; sc->irq_id[i] = i + 1; sc->mrsas_irq[i] = bus_alloc_resource_any (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] ,RF_ACTIVE); if (sc->mrsas_irq[i] == NULL) { device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); goto irq_alloc_failed; } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[i], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[i], &sc->intr_handle[i])) { device_printf(sc->mrsas_dev, "Cannot set up MSI-x interrupt handler\n"); goto irq_alloc_failed; } } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_allocate_msix: Setup MSI-x vectors * @sc: adapter soft state */ static int mrsas_allocate_msix(struct mrsas_softc *sc) { if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { device_printf(sc->mrsas_dev, "Using MSI-X with %d number" " of vectors\n", sc->msix_vectors); } else { device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); goto irq_alloc_failed; } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_attach: PCI entry point * input: pointer to device struct * * Performs setup of PCI and registers, initializes mutexes and linked lists, * registers interrupts and CAM, and initializes the adapter/controller to * its proper state. */ static int mrsas_attach(device_t dev) { struct mrsas_softc *sc = device_get_softc(dev); uint32_t cmd, bar, error; - struct cdev *linux_dev; + memset(sc, 0, sizeof(struct mrsas_softc)); + /* Look up our softc and initialize its fields. */ sc->mrsas_dev = dev; sc->device_id = pci_get_device(dev); mrsas_get_tunables(sc); /* * Set up PCI and registers */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_PORTEN) == 0) { return (ENXIO); } /* Force the busmaster enable bit on. */ cmd |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); goto attach_fail; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Intialize mutexes */ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); - /* - * Intialize a counting Semaphore to take care no. of concurrent - * IOCTLs - */ - sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION); - /* Intialize linked list */ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); mrsas_atomic_set(&sc->fw_outstanding, 0); sc->io_cmds_highwater = 0; - /* Create a /dev entry for this device. */ - sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT, - GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", - device_get_unit(dev)); - if (device_get_unit(dev) == 0) - make_dev_alias_p(MAKEDEV_CHECKNAME, &linux_dev, sc->mrsas_cdev, - "megaraid_sas_ioctl_node"); - if (sc->mrsas_cdev) - sc->mrsas_cdev->si_drv1 = sc; - sc->adprecovery = MRSAS_HBA_OPERATIONAL; sc->UnevenSpanSupport = 0; sc->msix_enable = 0; /* Initialize Firmware */ if (mrsas_init_fw(sc) != SUCCESS) { goto attach_fail_fw; } - /* Register SCSI mid-layer */ + /* Register mrsas to CAM layer */ if ((mrsas_cam_attach(sc) != SUCCESS)) { goto attach_fail_cam; } /* Register IRQs */ if (mrsas_setup_irq(sc) != SUCCESS) { goto attach_fail_irq; } - /* Enable Interrupts */ - mrsas_enable_intr(sc); - error = mrsas_kproc_create(mrsas_ocr_thread, sc, &sc->ocr_thread, 0, 0, "mrsas_ocr%d", device_get_unit(sc->mrsas_dev)); if (error) { - printf("Error %d starting rescan thread\n", error); - goto attach_fail_irq; + device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); + goto attach_fail_ocr_thread; } - mrsas_setup_sysctl(sc); - - /* Initiate AEN (Asynchronous Event Notification) */ - - if (mrsas_start_aen(sc)) { - printf("Error: start aen failed\n"); - goto fail_start_aen; - } /* - * Add this controller to mrsas_mgmt_info structure so that it can be - * exported to management applications + * After FW initialization and OCR thread creation + * we will defer the cdev creation, AEN setup on ICH callback */ - if (device_get_unit(dev) == 0) - memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); + sc->mrsas_ich.ich_func = mrsas_ich_startup; + sc->mrsas_ich.ich_arg = sc; + if (config_intrhook_establish(&sc->mrsas_ich) != 0) { + device_printf(sc->mrsas_dev, "Config hook is already established\n"); + } + mrsas_setup_sysctl(sc); + return SUCCESS; - mrsas_mgmt_info.count++; - mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; - mrsas_mgmt_info.max_index++; - - return (0); - -fail_start_aen: +attach_fail_ocr_thread: + if (sc->ocr_thread_active) + wakeup(&sc->ocr_chan); attach_fail_irq: mrsas_teardown_intr(sc); attach_fail_cam: mrsas_cam_detach(sc); attach_fail_fw: /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ if (sc->msix_enable == 1) pci_release_msi(sc->mrsas_dev); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); - /* Destroy the counting semaphore created for Ioctl */ - sema_destroy(&sc->ioctl_count_sema); attach_fail: - destroy_dev(sc->mrsas_cdev); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } return (ENXIO); } /* + * Interrupt config hook + */ +static void +mrsas_ich_startup(void *arg) +{ + struct mrsas_softc *sc = (struct mrsas_softc *)arg; + + /* + * Intialize a counting Semaphore to take care no. of concurrent IOCTLs + */ + sema_init(&sc->ioctl_count_sema, + MRSAS_MAX_MFI_CMDS - 5, + IOCTL_SEMA_DESCRIPTION); + + /* Create a /dev entry for mrsas controller. */ + sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, + GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", + device_get_unit(sc->mrsas_dev)); + + if (device_get_unit(sc->mrsas_dev) == 0) { + make_dev_alias_p(MAKEDEV_CHECKNAME, + &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, + "megaraid_sas_ioctl_node"); + } + if (sc->mrsas_cdev) + sc->mrsas_cdev->si_drv1 = sc; + + /* + * Add this controller to mrsas_mgmt_info structure so that it can be + * exported to management applications + */ + if (device_get_unit(sc->mrsas_dev) == 0) + memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); + + mrsas_mgmt_info.count++; + mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; + mrsas_mgmt_info.max_index++; + + /* Enable Interrupts */ + mrsas_enable_intr(sc); + + /* Initiate AEN (Asynchronous Event Notification) */ + if (mrsas_start_aen(sc)) { + device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " + "Further events from the controller will not be communicated.\n" + "Either there is some problem in the controller" + "or the controller does not support AEN.\n" + "Please contact to the SUPPORT TEAM if the problem persists\n"); + } + if (sc->mrsas_ich.ich_arg != NULL) { + device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); + config_intrhook_disestablish(&sc->mrsas_ich); + sc->mrsas_ich.ich_arg = NULL; + } +} + +/* * mrsas_detach: De-allocates and teardown resources * input: pointer to device struct * * This function is the entry point for device disconnect and detach. * It performs memory de-allocations, shutdown of the controller and various * teardown and destroy resource functions. */ static int mrsas_detach(device_t dev) { struct mrsas_softc *sc; int i = 0; sc = device_get_softc(dev); sc->remove_in_progress = 1; /* Destroy the character device so no other IOCTL will be handled */ + if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) + destroy_dev(sc->mrsas_linux_emulator_cdev); destroy_dev(sc->mrsas_cdev); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < mrsas_mgmt_info.max_index; i++) { if (mrsas_mgmt_info.sc_ptr[i] == sc) { mrsas_mgmt_info.count--; mrsas_mgmt_info.sc_ptr[i] = NULL; break; } } if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, - "[%2d]waiting for ocr to be finished\n", i); + "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_shutdown", hz); } i = 0; while (sc->ocr_thread_active) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for " "mrsas_ocr thread to quit ocr %d\n", i, sc->ocr_thread_active); } pause("mr_shutdown", hz); } mrsas_flush_cache(sc); mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); mrsas_disable_intr(sc); mrsas_cam_detach(sc); mrsas_teardown_intr(sc); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); /* Wait for all the semaphores to be released */ while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5)) pause("mr_shutdown", hz); /* Destroy the counting semaphore created for Ioctl */ sema_destroy(&sc->ioctl_count_sema); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); return (0); } /* * mrsas_free_mem: Frees allocated memory * input: Adapter instance soft state * * This function is called from mrsas_detach() to free previously allocated * memory. */ void mrsas_free_mem(struct mrsas_softc *sc) { int i; u_int32_t max_cmd; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; /* * Free RAID map memory */ for (i = 0; i < 2; i++) { if (sc->raidmap_phys_addr[i]) bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); if (sc->raidmap_mem[i] != NULL) bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); if (sc->raidmap_tag[i] != NULL) bus_dma_tag_destroy(sc->raidmap_tag[i]); if (sc->ld_drv_map[i] != NULL) free(sc->ld_drv_map[i], M_MRSAS); } - + for (i = 0; i < 2; i++) { + if (sc->jbodmap_phys_addr[i]) + bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); + if (sc->jbodmap_mem[i] != NULL) + bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); + if (sc->jbodmap_tag[i] != NULL) + bus_dma_tag_destroy(sc->jbodmap_tag[i]); + } /* * Free version buffer memroy */ if (sc->verbuf_phys_addr) bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); if (sc->verbuf_mem != NULL) bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); if (sc->verbuf_tag != NULL) bus_dma_tag_destroy(sc->verbuf_tag); /* * Free sense buffer memory */ if (sc->sense_phys_addr) bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); if (sc->sense_mem != NULL) bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); if (sc->sense_tag != NULL) bus_dma_tag_destroy(sc->sense_tag); /* * Free chain frame memory */ if (sc->chain_frame_phys_addr) bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); if (sc->chain_frame_mem != NULL) bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); if (sc->chain_frame_tag != NULL) bus_dma_tag_destroy(sc->chain_frame_tag); /* * Free IO Request memory */ if (sc->io_request_phys_addr) bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); if (sc->io_request_mem != NULL) bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); if (sc->io_request_tag != NULL) bus_dma_tag_destroy(sc->io_request_tag); /* * Free Reply Descriptor memory */ if (sc->reply_desc_phys_addr) bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); if (sc->reply_desc_mem != NULL) bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); if (sc->reply_desc_tag != NULL) bus_dma_tag_destroy(sc->reply_desc_tag); /* * Free event detail memory */ if (sc->evt_detail_phys_addr) bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); if (sc->evt_detail_mem != NULL) bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); if (sc->evt_detail_tag != NULL) bus_dma_tag_destroy(sc->evt_detail_tag); /* * Free MFI frames */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { mfi_cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, mfi_cmd); } } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); /* * Free MPT internal command list */ max_cmd = sc->max_fw_cmds; if (sc->mpt_cmd_list) { for (i = 0; i < max_cmd; i++) { mpt_cmd = sc->mpt_cmd_list[i]; bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); free(sc->mpt_cmd_list[i], M_MRSAS); } free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; } /* * Free MFI internal command list */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { free(sc->mfi_cmd_list[i], M_MRSAS); } free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; } /* * Free request descriptor memory */ free(sc->req_desc, M_MRSAS); sc->req_desc = NULL; /* * Destroy parent tag */ if (sc->mrsas_parent_tag != NULL) bus_dma_tag_destroy(sc->mrsas_parent_tag); /* * Free ctrl_info memory */ if (sc->ctrl_info != NULL) free(sc->ctrl_info, M_MRSAS); } /* * mrsas_teardown_intr: Teardown interrupt * input: Adapter instance soft state * * This function is called from mrsas_detach() to teardown and release bus * interrupt resourse. */ void mrsas_teardown_intr(struct mrsas_softc *sc) { int i; if (!sc->msix_enable) { if (sc->intr_handle[0]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); if (sc->mrsas_irq[0] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[0], sc->mrsas_irq[0]); sc->intr_handle[0] = NULL; } else { for (i = 0; i < sc->msix_vectors; i++) { if (sc->intr_handle[i]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], sc->intr_handle[i]); if (sc->mrsas_irq[i] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[i], sc->mrsas_irq[i]); sc->intr_handle[i] = NULL; } pci_release_msi(sc->mrsas_dev); } } /* * mrsas_suspend: Suspend entry point * input: Device struct pointer * * This function is the entry point for system suspend from the OS. */ static int mrsas_suspend(device_t dev) { - struct mrsas_softc *sc; - - sc = device_get_softc(dev); + /* This will be filled when the driver will have hibernation support */ return (0); } /* * mrsas_resume: Resume entry point * input: Device struct pointer * * This function is the entry point for system resume from the OS. */ static int mrsas_resume(device_t dev) { - struct mrsas_softc *sc; - - sc = device_get_softc(dev); + /* This will be filled when the driver will have hibernation support */ return (0); } /** * mrsas_get_softc_instance: Find softc instance based on cmd type * * This function will return softc instance based on cmd type. * In some case, application fire ioctl on required management instance and * do not provide host_no. Use cdev->si_drv1 to get softc instance for those * case, else get the softc instance from host_no provided by application in * user data. */ static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) { struct mrsas_softc *sc = NULL; struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; if (cmd == MRSAS_IOC_GET_PCI_INFO) { sc = dev->si_drv1; } else { /* * get the Host number & the softc from data sent by the * Application */ sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; if (sc == NULL) printf("There is no Controller number %d\n", user_ioc->host_no); else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) mrsas_dprint(sc, MRSAS_FAULT, "Invalid Controller number %d\n", user_ioc->host_no); } return sc; } /* * mrsas_ioctl: IOCtl commands entry point. * * This function is the entry point for IOCtls from the OS. It calls the * appropriate function for processing depending on the command received. */ static int mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) { struct mrsas_softc *sc; int ret = 0, i = 0; MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; sc = mrsas_get_softc_instance(dev, cmd, arg); if (!sc) return ENOENT; if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_INFO, "Driver remove or shutdown called.\n"); return ENOENT; } mtx_lock_spin(&sc->ioctl_lock); if (!sc->reset_in_progress) { mtx_unlock_spin(&sc->ioctl_lock); goto do_ioctl; } mtx_unlock_spin(&sc->ioctl_lock); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, - "[%2d]waiting for " - "OCR to be finished %d\n", i, - sc->ocr_thread_active); + "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_ioctl", hz); } do_ioctl: switch (cmd) { case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: #ifdef COMPAT_FREEBSD32 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: #endif /* * Decrement the Ioctl counting Semaphore before getting an * mfi command */ sema_wait(&sc->ioctl_count_sema); ret = mrsas_passthru(sc, (void *)arg, cmd); /* Increment the Ioctl counting semaphore value */ sema_post(&sc->ioctl_count_sema); break; case MRSAS_IOC_SCAN_BUS: ret = mrsas_bus_scan(sc); break; case MRSAS_IOC_GET_PCI_INFO: pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," "pci device no: %d, pci function no: %d," "pci domain ID: %d\n", pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, pciDrvInfo->functionNumber, pciDrvInfo->domainID); ret = 0; break; default: mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); ret = ENOENT; } return (ret); } /* * mrsas_poll: poll entry point for mrsas driver fd * * This function is the entry point for poll from the OS. It waits for some AEN * events to be triggered from the controller and notifies back. */ static int mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mrsas_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mrsas_aen_triggered) { revents |= poll_events & (POLLIN | POLLRDNORM); } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { mtx_lock(&sc->aen_lock); sc->mrsas_poll_waiting = 1; selrecord(td, &sc->mrsas_select); mtx_unlock(&sc->aen_lock); } } return revents; } /* * mrsas_setup_irq: Set up interrupt * input: Adapter instance soft state * * This function sets up interrupts as a bus resource, with flags indicating * resource permitting contemporaneous sharing and for resource to activate * atomically. */ static int mrsas_setup_irq(struct mrsas_softc *sc) { if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); else { device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); sc->irq_context[0].sc = sc; sc->irq_context[0].MSIxIndex = 0; sc->irq_id[0] = 0; sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); if (sc->mrsas_irq[0] == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate legcay" "interrupt\n"); return (FAIL); } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[0], &sc->intr_handle[0])) { device_printf(sc->mrsas_dev, "Cannot set up legacy" "interrupt\n"); return (FAIL); } } return (0); } /* * mrsas_isr: ISR entry point * input: argument pointer * * This function is the interrupt service routine entry point. There are two * types of interrupts, state change interrupt and response interrupt. If an * interrupt is not ours, we just return. */ void mrsas_isr(void *arg) { struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; struct mrsas_softc *sc = irq_context->sc; int status = 0; if (sc->mask_interrupts) return; if (!sc->msix_vectors) { status = mrsas_clear_intr(sc); if (!status) return; } /* If we are resetting, bail */ if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { printf(" Entered into ISR when OCR is going active. \n"); mrsas_clear_intr(sc); return; } /* Process for reply request and clear response interrupt */ if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) mrsas_clear_intr(sc); return; } /* * mrsas_complete_cmd: Process reply request * input: Adapter instance soft state * * This function is called from mrsas_isr() to process reply request and clear * response interrupt. Processing of the reply request entails walking * through the reply descriptor array for the command request pended from * Firmware. We look at the Function field to determine the command type and * perform the appropriate action. Before we return, we clear the response * interrupt. */ -static int +int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) { Mpi2ReplyDescriptorsUnion_t *desc; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int8_t reply_descript_type; u_int16_t smid, num_completed; u_int8_t status, extStatus; union desc_value desc_val; PLD_LOAD_BALANCE_INFO lbinfo; u_int32_t device_id; int threshold_reply_count = 0; /* If we have a hardware error, not need to continue */ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return (DONE); desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) + sc->last_reply_idx[MSIxIndex]; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; num_completed = 0; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; /* Find our reply descriptor for the command and process */ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { smid = reply_desc->SMID; cmd_mpt = sc->mpt_cmd_list[smid - 1]; scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; status = scsi_io_req->RaidContext.status; extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; lbinfo = &sc->load_balance_info[device_id]; if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; } /* Fall thru and complete IO */ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); mrsas_cmd_done(sc, cmd_mpt); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; mrsas_atomic_dec(&sc->fw_outstanding); break; case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); cmd_mpt->flags = 0; mrsas_release_mpt_cmd(cmd_mpt); break; } sc->last_reply_idx[MSIxIndex]++; if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) sc->last_reply_idx[MSIxIndex] = 0; desc->Words = ~((uint64_t)0x00); /* set it back to all * 0xFFFFFFFFs */ num_completed++; threshold_reply_count++; /* Get the next reply descriptor */ if (!sc->last_reply_idx[MSIxIndex]) { desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); } else desc++; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; /* * Write to reply post index after completing threshold reply * count and still there are more replies in reply queue * pending to be completed. */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { if (sc->msix_enable) { if ((sc->device_id == MRSAS_INVADER) || - (sc->device_id == MRSAS_FURY)) + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); threshold_reply_count = 0; } } /* No match, just return */ if (num_completed == 0) return (DONE); /* Clear response interrupt */ if (sc->msix_enable) { if ((sc->device_id == MRSAS_INVADER) || - (sc->device_id == MRSAS_FURY)) { + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); return (0); } /* * mrsas_map_mpt_cmd_status: Allocate DMAable memory. * input: Adapter instance soft state * * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. * It checks the command status and maps the appropriate CAM status for the * CCB. */ void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) { struct mrsas_softc *sc = cmd->sc; u_int8_t *sense_data; switch (status) { case MFI_STAT_OK: cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; if (sense_data) { /* For now just copy 18 bytes back */ memcpy(sense_data, cmd->sense, 18); cmd->ccb_ptr->csio.sense_len = 18; cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: if (cmd->ccb_ptr->ccb_h.target_lun) cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; else cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; break; default: device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; cmd->ccb_ptr->csio.scsi_status = status; } return; } /* * mrsas_alloc_mem: Allocate DMAable memory * input: Adapter instance soft state * * This function creates the parent DMA tag and allocates DMAable memory. DMA * tag describes constraints of DMA mapping. Memory allocated is mapped into * Kernel virtual address. Callback argument is physical memory address. */ static int mrsas_alloc_mem(struct mrsas_softc *sc) { u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, evt_detail_size, count; /* * Allocate parent DMA tag */ if (bus_dma_tag_create(NULL, /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - MRSAS_MAX_IO_SIZE, /* maxsize */ - MRSAS_MAX_SGL, /* nsegments */ - MRSAS_MAX_IO_SIZE, /* maxsegsize */ + MAXPHYS, /* maxsize */ + sc->max_num_sge, /* nsegments */ + MAXPHYS, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mrsas_parent_tag /* tag */ )) { device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); return (ENOMEM); } /* * Allocate for version buffer */ verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, verbuf_size, 1, verbuf_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->verbuf_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); return (ENOMEM); } bzero(sc->verbuf_mem, verbuf_size); if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); return (ENOMEM); } /* * Allocate IO Request Frames */ io_req_size = sc->io_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, io_req_size, 1, io_req_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->io_request_tag)) { device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); return (ENOMEM); } bzero(sc->io_request_mem, io_req_size); if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, sc->io_request_mem, io_req_size, mrsas_addr_cb, &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (ENOMEM); } /* * Allocate Chain Frames */ chain_frame_size = sc->chain_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, chain_frame_size, 1, chain_frame_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->chain_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); return (ENOMEM); } bzero(sc->chain_frame_mem, chain_frame_size); if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); return (ENOMEM); } count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; /* * Allocate Reply Descriptor Array */ reply_desc_size = sc->reply_alloc_sz * count; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reply_desc_size, 1, reply_desc_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->reply_desc_tag)) { device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); return (ENOMEM); } /* * Allocate Sense Buffer Array. Keep in lower 4GB */ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; if (bus_dma_tag_create(sc->mrsas_parent_tag, 64, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sense_size, 1, sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, BUS_DMA_NOWAIT, &sc->sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); return (ENOMEM); } /* * Allocate for Event detail structure */ evt_detail_size = sizeof(struct mrsas_evt_detail); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, evt_detail_size, 1, evt_detail_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->evt_detail_tag)) { device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); return (ENOMEM); } bzero(sc->evt_detail_mem, evt_detail_size); if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); return (ENOMEM); } /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (280kB). */ if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - MRSAS_MAX_IO_SIZE, - MRSAS_MAX_SGL, - MRSAS_MAX_IO_SIZE, + MAXPHYS, + sc->max_num_sge, /* nsegments */ + MAXPHYS, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->io_lock, &sc->data_tag)) { device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); return (ENOMEM); } return (0); } /* * mrsas_addr_cb: Callback function of bus_dmamap_load() * input: callback argument, machine dependent type * that describes DMA segments, number of segments, error code * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_setup_raidmap: Set up RAID map. * input: Adapter instance soft state * * Allocate DMA memory for the RAID maps and perform setup. */ static int mrsas_setup_raidmap(struct mrsas_softc *sc) { int i; for (i = 0; i < 2; i++) { sc->ld_drv_map[i] = (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); /* Do Error handling */ if (!sc->ld_drv_map[i]) { device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); if (i == 1) free(sc->ld_drv_map[0], M_MRSAS); /* ABORT driver initialization */ goto ABORT; } } for (int i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->max_map_sz, 1, sc->max_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->raidmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); return (ENOMEM); } bzero(sc->raidmap_mem[i], sc->max_map_sz); if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], sc->raidmap_mem[i], sc->max_map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); return (ENOMEM); } if (!sc->raidmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); return (ENOMEM); } } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); return (0); ABORT: return (1); } +/** + * megasas_setup_jbod_map - setup jbod map for FP seq_number. + * @sc: Adapter soft state + * + * Return 0 on success. + */ +void +megasas_setup_jbod_map(struct mrsas_softc *sc) +{ + int i; + uint32_t pd_seq_map_sz; + + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); + + if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { + sc->use_seqnum_jbod_fp = 0; + return; + } + if (sc->jbodmap_mem[0]) + goto skip_alloc; + + for (i = 0; i < 2; i++) { + if (bus_dma_tag_create(sc->mrsas_parent_tag, + 4, 0, + BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, + NULL, NULL, + pd_seq_map_sz, + 1, + pd_seq_map_sz, + BUS_DMA_ALLOCNOW, + NULL, NULL, + &sc->jbodmap_tag[i])) { + device_printf(sc->mrsas_dev, + "Cannot allocate jbod map tag.\n"); + return; + } + if (bus_dmamem_alloc(sc->jbodmap_tag[i], + (void **)&sc->jbodmap_mem[i], + BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { + device_printf(sc->mrsas_dev, + "Cannot allocate jbod map memory.\n"); + return; + } + bzero(sc->jbodmap_mem[i], pd_seq_map_sz); + + if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], + sc->jbodmap_mem[i], pd_seq_map_sz, + mrsas_addr_cb, &sc->jbodmap_phys_addr[i], + BUS_DMA_NOWAIT)) { + device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); + return; + } + if (!sc->jbodmap_mem[i]) { + device_printf(sc->mrsas_dev, + "Cannot allocate memory for jbod map.\n"); + sc->use_seqnum_jbod_fp = 0; + return; + } + } + +skip_alloc: + if (!megasas_sync_pd_seq_num(sc, false) && + !megasas_sync_pd_seq_num(sc, true)) + sc->use_seqnum_jbod_fp = 1; + else + sc->use_seqnum_jbod_fp = 0; + + device_printf(sc->mrsas_dev, "Jbod map is supported\n"); +} + /* * mrsas_init_fw: Initialize Firmware * input: Adapter soft state * * Calls transition_to_ready() to make sure Firmware is in operational state and * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It * issues internal commands to get the controller info after the IOC_INIT * command response is received by Firmware. Note: code relating to * get_pdlist, get_ld_list and max_sectors are currently not being used, it * is left here as placeholder. */ static int mrsas_init_fw(struct mrsas_softc *sc) { int ret, loop, ocr = 0; u_int32_t max_sectors_1; u_int32_t max_sectors_2; u_int32_t tmp_sectors; u_int32_t scratch_pad_2; int msix_enable = 0; int fw_msix_count = 0; /* Make sure Firmware is ready */ ret = mrsas_transition_to_ready(sc, ocr); if (ret != SUCCESS) { return (ret); } /* MSI-x index 0- reply post host index register */ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; /* Check if MSI-X is supported while in ready state */ msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; if (msix_enable) { scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); /* Check max MSI-X vectors */ if (sc->device_id == MRSAS_TBOLT) { sc->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = sc->msix_vectors; } else { /* Invader/Fury supports 96 MSI-X vectors */ sc->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; fw_msix_count = sc->msix_vectors; for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { sc->msix_reg_offset[loop] = MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10); } } /* Don't bother allocating more MSI-X vectors than cpus */ sc->msix_vectors = min(sc->msix_vectors, mp_ncpus); /* Allocate MSI-x vectors */ if (mrsas_allocate_msix(sc) == SUCCESS) sc->msix_enable = 1; else sc->msix_enable = 0; device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," "Online CPU %d Current MSIX <%d>\n", fw_msix_count, mp_ncpus, sc->msix_vectors); } if (mrsas_init_adapter(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); return (1); } /* Allocate internal commands for pass-thru */ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); return (1); } sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); if (!sc->ctrl_info) { device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); return (1); } /* * Get the controller info from FW, so that the MAX VD support * availability can be decided. */ if (mrsas_get_ctrl_info(sc)) { device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); return (1); } sc->secure_jbod_support = (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; if (sc->secure_jbod_support) device_printf(sc->mrsas_dev, "FW supports SED \n"); + if (sc->use_seqnum_jbod_fp) + device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); + if (mrsas_setup_raidmap(sc) != SUCCESS) { - device_printf(sc->mrsas_dev, "Set up RAID map failed.\n"); - return (1); + device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " + "There seems to be some problem in the controller\n" + "Please contact to the SUPPORT TEAM if the problem persists\n"); } + megasas_setup_jbod_map(sc); + /* For pass-thru, get PD/LD list and controller info */ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); - mrsas_get_pd_list(sc); - + if (mrsas_get_pd_list(sc) != SUCCESS) { + device_printf(sc->mrsas_dev, "Get PD list failed.\n"); + return (1); + } memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); - mrsas_get_ld_list(sc); - + if (mrsas_get_ld_list(sc) != SUCCESS) { + device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); + return (1); + } /* * Compute the max allowed sectors per IO: The controller info has * two limits on max sectors. Driver should use the minimum of these * two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information to * calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * sc->ctrl_info->max_strips_per_io; max_sectors_2 = sc->ctrl_info->max_request_size; tmp_sectors = min(max_sectors_1, max_sectors_2); sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) sc->max_sectors_per_req = tmp_sectors; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; sc->UnevenSpanSupport = sc->ctrl_info->adapterOperations2.supportUnevenSpans; if (sc->UnevenSpanSupport) { device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", sc->UnevenSpanSupport); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 1; else sc->fast_path_io = 0; } return (0); } /* * mrsas_init_adapter: Initializes the adapter/controller * input: Adapter soft state * * Prepares for the issuing of the IOC Init cmd to FW for initializing the * ROC/controller. The FW register is read to determined the number of * commands that is supported. All memory allocations for IO is based on * max_cmd. Appropriate calculations are performed in this function. */ int mrsas_init_adapter(struct mrsas_softc *sc) { uint32_t status; - u_int32_t max_cmd; + u_int32_t max_cmd, scratch_pad_2; int ret; int i = 0; /* Read FW status register */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); /* Get operational params from status register */ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; /* Decrement the max supported by 1, to correlate with FW */ sc->max_fw_cmds = sc->max_fw_cmds - 1; max_cmd = sc->max_fw_cmds; /* Determine allocation size of command frames */ sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); - sc->chain_frames_alloc_sz = 1024 * max_cmd; + scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, + outbound_scratch_pad_2)); + /* + * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, + * Firmware support extended IO chain frame which is 4 time more + * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = + * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K + */ + if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) + sc->max_chain_frame_sz = + ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) + * MEGASAS_1MB_IO; + else + sc->max_chain_frame_sz = + ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) + * MEGASAS_256K_IO; + + sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd; sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; - sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION); + sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; + mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n", + sc->max_num_sge, sc->max_chain_frame_sz); + /* Used for pass thru MFI frame (DCMD) */ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(MPI2_SGE_IO_UNION)) / 16; int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; ret = mrsas_alloc_mem(sc); if (ret != SUCCESS) return (ret); ret = mrsas_alloc_mpt_cmds(sc); if (ret != SUCCESS) return (ret); ret = mrsas_ioc_init(sc); if (ret != SUCCESS) return (ret); return (0); } /* * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) { int ioc_init_size; /* Allocate IOC INIT command */ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioc_init_size, 1, ioc_init_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ioc_init_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); return (ENOMEM); } bzero(sc->ioc_init_mem, ioc_init_size); if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); return (ENOMEM); } return (0); } /* * mrsas_free_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Deallocates memory of the IOC Init cmd. */ void mrsas_free_ioc_cmd(struct mrsas_softc *sc) { if (sc->ioc_init_phys_mem) bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); if (sc->ioc_init_mem != NULL) bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); if (sc->ioc_init_tag != NULL) bus_dma_tag_destroy(sc->ioc_init_tag); } /* * mrsas_ioc_init: Sends IOC Init command to FW * input: Adapter soft state * * Issues the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_ioc_init(struct mrsas_softc *sc) { struct mrsas_init_frame *init_frame; pMpi2IOCInitRequest_t IOCInitMsg; MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; bus_addr_t phys_addr; int i, retcode = 0; /* Allocate memory for the IOC INIT command */ if (mrsas_alloc_ioc_cmd(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); return (1); } IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMsg->MsgVersion = MPI2_VERSION; IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* driver support Extended MSIX */ if ((sc->device_id == MRSAS_INVADER) || - (sc->device_id == MRSAS_FURY)) { + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { init_frame->driver_operations. mfi_capabilities.support_additional_msix = 1; } if (sc->verbuf_mem) { snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", MRSAS_VERSION); init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; init_frame->driver_ver_hi = 0; } init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; + if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) + init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; init_frame->queue_info_new_phys_addr_lo = phys_addr; init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; req_desc.MFAIo.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); mrsas_disable_intr(sc); mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (init_frame->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (init_frame->cmd_status == 0xFF) DELAY(1000); else break; } } if (init_frame->cmd_status == 0) mrsas_dprint(sc, MRSAS_OCR, "IOC INIT response received from FW.\n"); else { if (init_frame->cmd_status == 0xFF) device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); else device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); retcode = 1; } mrsas_free_ioc_cmd(sc); return (retcode); } /* * mrsas_alloc_mpt_cmds: Allocates the command packets * input: Adapter instance soft state * * This function allocates the internal commands for IOs. Each command that is * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An * array is allocated with mrsas_mpt_cmd context. The free commands are * maintained in a linked list (cmd pool). SMID value range is from 1 to * max_fw_cmds. */ int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd, count; struct mrsas_mpt_cmd *cmd; pMpi2ReplyDescriptorsUnion_t reply_desc; u_int32_t offset, chain_offset, sense_offset; bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; u_int8_t *io_req_base, *chain_frame_base, *sense_base; max_cmd = sc->max_fw_cmds; sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); if (!sc->req_desc) { device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); return (ENOMEM); } memset(sc->req_desc, 0, sc->request_alloc_sz); /* * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); return (ENOMEM); } memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mpt_cmd_list[j], M_MRSAS); free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; return (ENOMEM); } } io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; chain_frame_base = (u_int8_t *)sc->chain_frame_mem; chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; sense_base = (u_int8_t *)sc->sense_mem; sense_base_phys = (bus_addr_t)sc->sense_phys_addr; for (i = 0; i < max_cmd; i++) { cmd = sc->mpt_cmd_list[i]; offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; - chain_offset = 1024 * i; + chain_offset = sc->max_chain_frame_sz * i; sense_offset = MRSAS_SENSE_LEN * i; memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); cmd->index = i + 1; cmd->ccb_ptr = NULL; callout_init(&cmd->cm_callout, 0); cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; cmd->sc = sc; cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; cmd->sense = sense_base + sense_offset; cmd->sense_phys_addr = sense_base_phys + sense_offset; if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { return (FAIL); } TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); } /* Initialize reply descriptor array to 0xFFFFFFFF */ reply_desc = sc->reply_desc_mem; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } return (0); } /* * mrsas_fire_cmd: Sends command to FW * input: Adapter softstate * request descriptor address low * request descriptor address high * * This functions fires the command to Firmware by writing to the * inbound_low_queue_port and inbound_high_queue_port. */ void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi) { mtx_lock(&sc->pci_lock); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), req_desc_lo); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), req_desc_hi); mtx_unlock(&sc->pci_lock); } /* * mrsas_transition_to_ready: Move FW to Ready state input: * Adapter instance soft state * * During the initialization, FW passes can potentially be in any one of several * possible states. If the FW in operational, waiting-for-handshake states, * driver must take steps to bring it to ready state. Otherwise, it has to * wait for the ready state. */ int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) { int i; u_int8_t max_wait; u_int32_t val, fw_state; u_int32_t cur_state; u_int32_t abs_state, curr_abs_state; val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = val & MFI_STATE_MASK; max_wait = MRSAS_RESET_WAIT_TIME; if (fw_state != MFI_STATE_READY) device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); while (fw_state != MFI_STATE_READY) { abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); switch (fw_state) { case MFI_STATE_FAULT: device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); if (ocr) { cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* Set the CLR bit in inbound doorbell */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_HOTPLUG); cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 * secs */ mrsas_disable_intr(sc); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); for (i = 0; i < max_wait * 1000; i++) { if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) DELAY(1000); else break; } cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 * seconds */ cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: cur_state = MFI_STATE_FLUSH_CACHE; break; default: device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK); curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); if (abs_state == curr_abs_state) DELAY(1000); else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); return 0; } /* * mrsas_get_mfi_cmd: Get a cmd from free command pool * input: Adapter soft state * * This function removes an MFI command from the command list. */ struct mrsas_mfi_cmd * mrsas_get_mfi_cmd(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd = NULL; mtx_lock(&sc->mfi_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); } mtx_unlock(&sc->mfi_cmd_pool_lock); return cmd; } /* * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. * input: Adapter Context. * * This function will check FW status register and flag do_timeout_reset flag. * It will do OCR/Kill adapter if FW is in fault state or IO timed out has * trigger reset. */ static void mrsas_ocr_thread(void *arg) { struct mrsas_softc *sc; u_int32_t fw_status, fw_state; sc = (struct mrsas_softc *)arg; mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); sc->ocr_thread_active = 1; mtx_lock(&sc->sim_lock); for (;;) { /* Sleep for 1 second and check the queue status */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); - if (sc->remove_in_progress) { + if (sc->remove_in_progress || + sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { mrsas_dprint(sc, MRSAS_OCR, - "Exit due to shutdown from %s\n", __func__); + "Exit due to %s from %s\n", + sc->remove_in_progress ? "Shutdown" : + "Hardware critical error", __func__); break; } fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) { - device_printf(sc->mrsas_dev, "OCR started due to %s!\n", + device_printf(sc->mrsas_dev, "%s started due to %s!\n", + sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR", sc->do_timedout_reset ? "IO Timeout" : "FW fault detected"); mtx_lock_spin(&sc->ioctl_lock); sc->reset_in_progress = 1; sc->reset_count++; mtx_unlock_spin(&sc->ioctl_lock); mrsas_xpt_freeze(sc); - mrsas_reset_ctrl(sc); + mrsas_reset_ctrl(sc, sc->do_timedout_reset); mrsas_xpt_release(sc); sc->reset_in_progress = 0; sc->do_timedout_reset = 0; } } mtx_unlock(&sc->sim_lock); sc->ocr_thread_active = 0; mrsas_kproc_exit(0); } /* * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. * input: Adapter Context. * * This function will clear reply descriptor so that post OCR driver and FW will * lost old history. */ void mrsas_reset_reply_desc(struct mrsas_softc *sc) { int i, count; pMpi2ReplyDescriptorsUnion_t reply_desc; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; reply_desc = sc->reply_desc_mem; for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } } /* * mrsas_reset_ctrl: Core function to OCR/Kill adapter. * input: Adapter Context. * * This function will run from thread context so that it can sleep. 1. Do not * handle OCR if FW is in HW critical error. 2. Wait for outstanding command * to complete for 180 seconds. 3. If #2 does not find any outstanding * command Controller is in working state, so skip OCR. Otherwise, do * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post * OCR, Re-fire Managment command and move Controller to Operation state. */ int -mrsas_reset_ctrl(struct mrsas_softc *sc) +mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) { int retval = SUCCESS, i, j, retry = 0; u_int32_t host_diag, abs_state, status_reg, reset_adapter; union ccb *ccb; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; - MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; + union mrsas_evt_class_locale class_locale; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { device_printf(sc->mrsas_dev, "mrsas: Hardware critical error, returning FAIL.\n"); return FAIL; } mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; mrsas_disable_intr(sc); - DELAY(1000 * 1000); + msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", + sc->mrsas_fw_fault_check_delay * hz); /* First try waiting for commands to complete */ - if (mrsas_wait_for_outstanding(sc)) { + if (mrsas_wait_for_outstanding(sc, reset_reason)) { mrsas_dprint(sc, MRSAS_OCR, "resetting adapter from %s.\n", __func__); /* Now return commands back to the CAM layer */ mtx_unlock(&sc->sim_lock); for (i = 0; i < sc->max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; if (mpt_cmd->ccb_ptr) { ccb = (union ccb *)(mpt_cmd->ccb_ptr); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; mrsas_cmd_done(sc, mpt_cmd); mrsas_atomic_dec(&sc->fw_outstanding); } } mtx_lock(&sc->sim_lock); status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (sc->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; goto out; } /* Now try to reset the chip */ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_FLUSH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_1ST_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_2ND_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_3RD_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_4TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_5TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_6TH_KEY_VALUE); /* Check that the diag write enable (DRWE) bit is on */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 100) { mrsas_dprint(sc, MRSAS_OCR, "Host diag unlock failed!\n"); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) continue; /* Send chip reset command */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), host_diag | HOST_DIAG_RESET_ADAPTER); DELAY(3000 * 1000); /* Make sure reset adapter bit is cleared */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 1000) { mrsas_dprint(sc, MRSAS_OCR, "Diag reset adapter never cleared!\n"); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) continue; abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { DELAY(100 * 1000); abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," " state = 0x%x\n", abs_state); continue; } /* Wait for FW to become ready */ if (mrsas_transition_to_ready(sc, 1)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas: Failed to transition controller to ready.\n"); continue; } mrsas_reset_reply_desc(sc); if (mrsas_ioc_init(sc)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); continue; } - /* Re-fire management commands */ for (j = 0; j < sc->max_fw_cmds; j++) { mpt_cmd = sc->mpt_cmd_list[j]; if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; - if (mfi_cmd->frame->dcmd.opcode == - MR_DCMD_LD_MAP_GET_INFO) { - mrsas_release_mfi_cmd(mfi_cmd); - mrsas_release_mpt_cmd(mpt_cmd); - } else { - req_desc = mrsas_get_request_desc(sc, - mfi_cmd->cmd_id.context.smid - 1); - mrsas_dprint(sc, MRSAS_OCR, - "Re-fire command DCMD opcode 0x%x index %d\n ", - mfi_cmd->frame->dcmd.opcode, j); - if (!req_desc) - device_printf(sc->mrsas_dev, - "Cannot build MPT cmd.\n"); - else - mrsas_fire_cmd(sc, req_desc->addr.u.low, - req_desc->addr.u.high); - } + mrsas_release_mfi_cmd(mfi_cmd); + mrsas_release_mpt_cmd(mpt_cmd); } } + sc->aen_cmd = NULL; + /* Reset load balance info */ memset(sc->load_balance_info, 0, sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); if (mrsas_get_ctrl_info(sc)) { mrsas_kill_hba(sc); retval = FAIL; goto out; } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); + megasas_setup_jbod_map(sc); + + memset(sc->pd_list, 0, + MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); + if (mrsas_get_pd_list(sc) != SUCCESS) { + device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n" + "Will get the latest PD LIST after OCR on event.\n"); + } + memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); + if (mrsas_get_ld_list(sc) != SUCCESS) { + device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n" + "Will get the latest LD LIST after OCR on event.\n"); + } mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; + /* Register AEN with FW for last sequence number */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + if (mrsas_register_aen(sc, sc->last_seq_num, + class_locale.word)) { + device_printf(sc->mrsas_dev, + "ERROR: AEN registration FAILED from OCR !!! " + "Further events from the controller cannot be notified." + "Either there is some problem in the controller" + "or the controller does not support AEN.\n" + "Please contact to the SUPPORT TEAM if the problem persists\n"); + } /* Adapter reset completed successfully */ device_printf(sc->mrsas_dev, "Reset successful\n"); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; } else { mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; } out: mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_dprint(sc, MRSAS_OCR, "Reset Exit with %d.\n", retval); return retval; } /* * mrsas_kill_hba: Kill HBA when OCR is not supported * input: Adapter Context. * * This function will kill HBA when OCR is not supported. */ void mrsas_kill_hba(struct mrsas_softc *sc) { sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; - pause("mrsas_kill_hba", 1000); + DELAY(1000 * 1000); mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_STOP_ADP); /* Flush */ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); mrsas_complete_outstanding_ioctls(sc); } /** * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba * input: Controller softc * * Returns void */ void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) { int i; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int32_t count, MSIxIndex; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->max_fw_cmds; i++) { cmd_mpt = sc->mpt_cmd_list[i]; if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_mptmfi_passthru(sc, cmd_mfi, cmd_mpt->io_request->RaidContext.status); } } } } /* * mrsas_wait_for_outstanding: Wait for outstanding commands * input: Adapter Context. * * This function will wait for 180 seconds for outstanding commands to be * completed. */ int -mrsas_wait_for_outstanding(struct mrsas_softc *sc) +mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) { int i, outstanding, retval = 0; u_int32_t fw_state, count, MSIxIndex; for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_OCR, "Driver remove or shutdown called.\n"); retval = 1; goto out; } /* Check if firmware is in fault state */ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { mrsas_dprint(sc, MRSAS_OCR, "Found FW in FAULT state, will reset adapter.\n"); retval = 1; goto out; } + if (check_reason == MFI_DCMD_TIMEOUT_OCR) { + mrsas_dprint(sc, MRSAS_OCR, + "DCMD IO TIMEOUT detected, will reset adapter.\n"); + retval = 1; + goto out; + } outstanding = mrsas_atomic_read(&sc->fw_outstanding); if (!outstanding) goto out; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " "commands to complete\n", i, outstanding); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); } DELAY(1000 * 1000); } if (mrsas_atomic_read(&sc->fw_outstanding)) { mrsas_dprint(sc, MRSAS_OCR, " pending commands remain after waiting," " will reset adapter.\n"); retval = 1; } out: return retval; } /* * mrsas_release_mfi_cmd: Return a cmd to free command pool * input: Command packet for return to free cmd pool * * This function returns the MFI command to the command list. */ void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd) { struct mrsas_softc *sc = cmd->sc; mtx_lock(&sc->mfi_cmd_pool_lock); cmd->ccb_ptr = NULL; cmd->cmd_id.frame_count = 0; TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); mtx_unlock(&sc->mfi_cmd_pool_lock); return; } /* * mrsas_get_controller_info: Returns FW's controller structure * input: Adapter soft state * Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. This * information is mainly used to find out the maximum IO transfer per command * supported by the FW. */ static int mrsas_get_ctrl_info(struct mrsas_softc *sc) { int retcode = 0; + u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); - if (!mrsas_issue_polled(sc, cmd)) - memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); + retcode = mrsas_issue_polled(sc, cmd); + if (retcode == ETIMEDOUT) + goto dcmd_timeout; else - retcode = 1; + memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); + do_ocr = 0; mrsas_update_ext_vd_details(sc); + sc->use_seqnum_jbod_fp = + sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; + +dcmd_timeout: mrsas_free_ctlr_info_cmd(sc); - mrsas_release_mfi_cmd(cmd); + + if (do_ocr) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); + return (retcode); } /* * mrsas_update_ext_vd_details : Update details w.r.t Extended VD * input: * sc - Controller's softc */ static void mrsas_update_ext_vd_details(struct mrsas_softc *sc) { sc->max256vdSupport = sc->ctrl_info->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (sc->ctrl_info->max_lds > 64) sc->max256vdSupport = 1; sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; if (sc->max256vdSupport) { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count - 1)); sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); if (sc->max256vdSupport) sc->current_map_sz = sc->new_map_sz; else sc->current_map_sz = sc->old_map_sz; } /* * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command * input: Adapter soft state * * Allocates DMAable memory for the controller info internal command. */ int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) { int ctlr_info_size; /* Allocate get controller info command */ ctlr_info_size = sizeof(struct mrsas_ctrl_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ctlr_info_size, 1, ctlr_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ctlr_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); return (ENOMEM); } memset(sc->ctlr_info_mem, 0, ctlr_info_size); return (0); } /* * mrsas_free_ctlr_info_cmd: Free memory for controller info command * input: Adapter soft state * * Deallocates memory of the get controller info cmd. */ void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) { if (sc->ctlr_info_phys_addr) bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); if (sc->ctlr_info_mem != NULL) bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); if (sc->ctlr_info_tag != NULL) bus_dma_tag_destroy(sc->ctlr_info_tag); } /* * mrsas_issue_polled: Issues a polling command * inputs: Adapter soft state * Command packet to be issued * * This function is for posting of internal commands to Firmware. MFI requires * the cmd_status to be set to 0xFF before posting. The maximun wait time of * the poll response timer is 180 seconds. */ int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { struct mrsas_header *frame_hdr = &cmd->frame->hdr; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; - int i, retcode = 0; + int i, retcode = SUCCESS; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* Issue the frame using inbound queue port */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (frame_hdr->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (frame_hdr->cmd_status == 0xFF) DELAY(1000); else break; } } - if (frame_hdr->cmd_status != 0) { - if (frame_hdr->cmd_status == 0xFF) - device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait); - else - device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status); - retcode = 1; + if (frame_hdr->cmd_status == 0xFF) { + device_printf(sc->mrsas_dev, "DCMD timed out after %d " + "seconds from %s\n", max_wait, __func__); + device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", + cmd->frame->dcmd.opcode); + retcode = ETIMEDOUT; } return (retcode); } /* * mrsas_issue_dcmd: Issues a MFI Pass thru cmd * input: Adapter soft state mfi cmd pointer * * This function is called by mrsas_issued_blocked_cmd() and * mrsas_issued_polled(), to build the MPT command and then fire the command * to Firmware. */ int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = mrsas_build_mpt_cmd(sc, cmd); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); return (1); } mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return (0); } /* * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd * input: Adapter soft state mfi cmd to build * * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru * command and prepares the MPT command to send to Firmware. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; u_int16_t index; if (mrsas_build_mptmfi_passthru(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); return NULL; } index = cmd->cmd_id.context.smid; req_desc = mrsas_get_request_desc(sc, index - 1); if (!req_desc) return NULL; req_desc->addr.Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = index; return (req_desc); } /* * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command * input: Adapter soft state mfi cmd pointer * * The MPT command and the io_request are setup as a passthru command. The SGE * chain address is set to frame_phys_addr of the MFI command. */ u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) { MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; struct mrsas_mpt_cmd *mpt_cmd; struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; mpt_cmd = mrsas_get_mpt_cmd(sc); if (!mpt_cmd) return (1); /* Save the smid. To be used for returning the cmd */ mfi_cmd->cmd_id.context.smid = mpt_cmd->index; mpt_cmd->sync_cmd_idx = mfi_cmd->index; /* * For cmds where the flag is set, store the flag and check on * completion. For cmds with this flag, don't call * mrsas_complete_cmd. */ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; io_req = mpt_cmd->io_request; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = sc->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; - mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME; + mpi25_ieee_chain->Length = sc->max_chain_frame_sz; return (0); } /* * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds * input: Adapter soft state Command to be issued * * This function waits on an event for the command to be returned from the ISR. * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing * internal and ioctl commands. */ int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; unsigned long total_time = 0; - int retcode = 0; + int retcode = SUCCESS; /* Initialize cmd_status */ - cmd->cmd_status = ECONNREFUSED; + cmd->cmd_status = 0xFF; /* Build MPT-MFI command for issue to FW */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } sc->chan = (void *)&cmd; while (1) { - if (cmd->cmd_status == ECONNREFUSED) { + if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; - total_time++; - if (total_time >= max_wait) { - device_printf(sc->mrsas_dev, - "Internal command timed out after %d seconds.\n", max_wait); - retcode = 1; - break; + + if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL + * command */ + total_time++; + if (total_time >= max_wait) { + device_printf(sc->mrsas_dev, + "Internal command timed out after %d seconds.\n", max_wait); + retcode = 1; + break; + } } } + + if (cmd->cmd_status == 0xFF) { + device_printf(sc->mrsas_dev, "DCMD timed out after %d " + "seconds from %s\n", max_wait, __func__); + device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", + cmd->frame->dcmd.opcode); + retcode = ETIMEDOUT; + } return (retcode); } /* * mrsas_complete_mptmfi_passthru: Completes a command * input: @sc: Adapter soft state * @cmd: Command to be completed * @status: cmd completion status * * This function is called from mrsas_complete_cmd() after an interrupt is * received from Firmware, and io_request->Function is * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. */ void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status) { struct mrsas_header *hdr = &cmd->frame->hdr; u_int8_t cmd_status = cmd->frame->hdr.cmd_status; /* Reset the retry counter for future re-tries */ cmd->retry_for_fw_reset = 0; if (cmd->ccb_ptr) cmd->ccb_ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; mrsas_wakeup(sc, cmd); break; } case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: /* Check for LD map update */ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { sc->fast_path_io = 0; mtx_lock(&sc->raidmap_lock); + sc->map_update_cmd = NULL; if (cmd_status != 0) { if (cmd_status != MFI_STAT_NOT_FOUND) device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); else { mrsas_release_mfi_cmd(cmd); mtx_unlock(&sc->raidmap_lock); break; } } else sc->map_id++; mrsas_release_mfi_cmd(cmd); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 0; else sc->fast_path_io = 1; mrsas_sync_map_info(sc); mtx_unlock(&sc->raidmap_lock); break; } if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { sc->mrsas_aen_triggered = 0; } + /* FW has an updated PD sequence */ + if ((cmd->frame->dcmd.opcode == + MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && + (cmd->frame->dcmd.mbox.b[0] == 1)) { + + mtx_lock(&sc->raidmap_lock); + sc->jbod_seq_cmd = NULL; + mrsas_release_mfi_cmd(cmd); + + if (cmd_status == MFI_STAT_OK) { + sc->pd_seq_map_id++; + /* Re-register a pd sync seq num cmd */ + if (megasas_sync_pd_seq_num(sc, true)) + sc->use_seqnum_jbod_fp = 0; + } else { + sc->use_seqnum_jbod_fp = 0; + device_printf(sc->mrsas_dev, + "Jbod map sync failed, status=%x\n", cmd_status); + } + mtx_unlock(&sc->raidmap_lock); + break; + } /* See if got an event notification */ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) mrsas_complete_aen(sc, cmd); else mrsas_wakeup(sc, cmd); break; case MFI_CMD_ABORT: /* Command issued to abort another cmd return */ mrsas_complete_abort(sc, cmd); break; default: device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /* * mrsas_wakeup: Completes an internal command * input: Adapter soft state * Command to be completed * * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait * timer is started. This function is called from * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up * from the command wait. */ void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; - if (cmd->cmd_status == ECONNREFUSED) + if (cmd->cmd_status == 0xFF) cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); return; } /* * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: * Adapter soft state Shutdown/Hibernate * * This function issues a DCMD internal command to Firmware to initiate shutdown * of the controller. */ static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); return; } if (sc->aen_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); - if (sc->map_update_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); + if (sc->jbod_seq_cmd) + mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } /* * mrsas_flush_cache: Requests FW to flush all its caches input: * Adapter soft state * * This function is issues a DCMD internal command to Firmware to initiate * flushing of all caches. */ static void mrsas_flush_cache(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); return; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } +int +megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) +{ + int retcode = 0; + u_int8_t do_ocr = 1; + struct mrsas_mfi_cmd *cmd; + struct mrsas_dcmd_frame *dcmd; + uint32_t pd_seq_map_sz; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + bus_addr_t pd_seq_h; + + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + + (sizeof(struct MR_PD_CFG_SEQ) * + (MAX_PHYSICAL_DEVICES - 1)); + + cmd = mrsas_get_mfi_cmd(sc); + if (!cmd) { + device_printf(sc->mrsas_dev, + "Cannot alloc for ld map info cmd.\n"); + return 1; + } + dcmd = &cmd->frame->dcmd; + + pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; + pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; + if (!pd_sync) { + device_printf(sc->mrsas_dev, + "Failed to alloc mem for jbod map info.\n"); + mrsas_release_mfi_cmd(cmd); + return (ENOMEM); + } + memset(pd_sync, 0, pd_seq_map_sz); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = (pd_seq_map_sz); + dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); + dcmd->sgl.sge32[0].length = (pd_seq_map_sz); + + if (pend) { + dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; + dcmd->flags = (MFI_FRAME_DIR_WRITE); + sc->jbod_seq_cmd = cmd; + if (mrsas_issue_dcmd(sc, cmd)) { + device_printf(sc->mrsas_dev, + "Fail to send sync map info command.\n"); + return 1; + } else + return 0; + } else + dcmd->flags = MFI_FRAME_DIR_READ; + + retcode = mrsas_issue_polled(sc, cmd); + if (retcode == ETIMEDOUT) + goto dcmd_timeout; + + if (pd_sync->count > MAX_PHYSICAL_DEVICES) { + device_printf(sc->mrsas_dev, + "driver supports max %d JBOD, but FW reports %d\n", + MAX_PHYSICAL_DEVICES, pd_sync->count); + retcode = -EINVAL; + } + if (!retcode) + sc->pd_seq_map_id++; + do_ocr = 0; + +dcmd_timeout: + if (do_ocr) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); + + return (retcode); +} + /* * mrsas_get_map_info: Load and validate RAID map input: * Adapter instance soft state * * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load * and validate RAID map. It returns 0 if successful, 1 other- wise. */ static int mrsas_get_map_info(struct mrsas_softc *sc) { uint8_t retcode = 0; sc->fast_path_io = 0; if (!mrsas_get_ld_map_info(sc)) { retcode = MR_ValidateMapInfo(sc); if (retcode == 0) { sc->fast_path_io = 1; return 0; } } return 1; } /* * mrsas_get_ld_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_get_ld_map_info(struct mrsas_softc *sc) { int retcode = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; void *map; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; if (!map) { device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(map, 0, sizeof(sc->max_map_sz)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; - if (!mrsas_issue_polled(sc, cmd)) - retcode = 0; - else { - device_printf(sc->mrsas_dev, - "Fail to send get LD map info cmd.\n"); - retcode = 1; - } - mrsas_release_mfi_cmd(cmd); + retcode = mrsas_issue_polled(sc, cmd); + if (retcode == ETIMEDOUT) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_sync_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_sync_map_info(struct mrsas_softc *sc) { int retcode = 0, i; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t size_sync_info, num_lds; MR_LD_TARGET_SYNC *target_map = NULL; MR_DRV_RAID_MAP_ALL *map; MR_LD_RAID *raid; MR_LD_TARGET_SYNC *ld_sync; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); return 1; } map = sc->ld_drv_map[sc->map_id & 1]; num_lds = map->raidMap.ldCount; dcmd = &cmd->frame->dcmd; size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; memset(target_map, 0, sc->max_map_sz); map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; ld_sync = (MR_LD_TARGET_SYNC *) target_map; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; sc->map_update_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return (1); } return (retcode); } /* * mrsas_get_pd_list: Returns FW's PD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about system * supported by Firmware. */ static int mrsas_get_pd_list(struct mrsas_softc *sc) { int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; + u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_PD_LIST *pd_list_mem; struct MR_PD_ADDRESS *pd_addr; bus_addr_t pd_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); mrsas_release_mfi_cmd(cmd); + mrsas_free_tmp_dcmd(tcmd); + free(tcmd, M_MRSAS); return (ENOMEM); } else { pd_list_mem = tcmd->tmp_dcmd_mem; pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); - if (!mrsas_issue_polled(sc, cmd)) - retcode = 0; - else - retcode = 1; + retcode = mrsas_issue_polled(sc, cmd); + if (retcode == ETIMEDOUT) + goto dcmd_timeout; /* Get the instance PD list */ pd_count = MRSAS_MAX_PD; pd_addr = pd_list_mem->addr; - if (retcode == 0 && pd_list_mem->count < pd_count) { + if (pd_list_mem->count < pd_count) { memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } + /* + * Use mutext/spinlock if pd_list component size increase more than + * 32 bit. + */ + memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); + do_ocr = 0; } - /* - * Use mutext/spinlock if pd_list component size increase more than - * 32 bit. - */ - memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); +dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); - mrsas_release_mfi_cmd(cmd); free(tcmd, M_MRSAS); + + if (do_ocr) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); + return (retcode); } /* * mrsas_get_ld_list: Returns FW's LD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about supported by * the FW. */ static int mrsas_get_ld_list(struct mrsas_softc *sc) { int ld_list_size, retcode = 0, ld_index = 0, ids = 0; + u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_LD_LIST *ld_list_mem; bus_addr_t ld_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); ld_list_size = sizeof(struct MR_LD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); mrsas_release_mfi_cmd(cmd); + mrsas_free_tmp_dcmd(tcmd); + free(tcmd, M_MRSAS); return (ENOMEM); } else { ld_list_mem = tcmd->tmp_dcmd_mem; ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (sc->max256vdSupport) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->pad_0 = 0; - if (!mrsas_issue_polled(sc, cmd)) - retcode = 0; - else - retcode = 1; + retcode = mrsas_issue_polled(sc, cmd); + if (retcode == ETIMEDOUT) + goto dcmd_timeout; #if VD_EXT_DEBUG printf("Number of LDs %d\n", ld_list_mem->ldCount); #endif /* Get the instance LD list */ - if ((retcode == 0) && - (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) { + if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { sc->CurLdCount = ld_list_mem->ldCount; memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { if (ld_list_mem->ldList[ld_index].state != 0) { ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; } } + do_ocr = 0; } +dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); - mrsas_release_mfi_cmd(cmd); free(tcmd, M_MRSAS); + + if (do_ocr) + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + else + mrsas_release_mfi_cmd(cmd); + return (retcode); } /* * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: * Adapter soft state Temp command Size of alloction * * Allocates DMAable memory for a temporary internal command. The allocated * memory is initialized to all zeros upon successful loading of the dma * mapped memory. */ int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_ALLOCNOW, NULL, NULL, &tcmd->tmp_dcmd_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); return (ENOMEM); } memset(tcmd->tmp_dcmd_mem, 0, size); return (0); } /* * mrsas_free_tmp_dcmd: Free memory for temporary command input: * temporary dcmd pointer * * Deallocates memory of the temporary command for use in the construction of * the internal DCMD. */ void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) { if (tmp->tmp_dcmd_phys_addr) bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_mem != NULL) bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_tag != NULL) bus_dma_tag_destroy(tmp->tmp_dcmd_tag); } /* * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: * Adapter soft state Previously issued cmd to be aborted * * This function is used to abort previously issued commands, such as AEN and * RAID map sync map commands. The abort command is sent as a DCMD internal * command and subsequently the driver will wait for a return status. The * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. */ static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort) { struct mrsas_mfi_cmd *cmd; struct mrsas_abort_frame *abort_fr; u_int8_t retcode = 0; unsigned long total_time = 0; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); return (1); } abort_fr = &cmd->frame->abort; /* Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); return (1); } /* Wait for this cmd to complete */ sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); retcode = 1; break; } } cmd->sync_cmd = 0; mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_complete_abort: Completes aborting a command input: * Adapter soft state Cmd that was issued to abort another cmd * * The mrsas_issue_blocked_abort_cmd() function waits for the command status to * change after sending the command. This function is called from * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. */ void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); } return; } /* * mrsas_aen_handler: AEN processing callback function from thread context * input: Adapter soft state * * Asynchronous event handler */ void mrsas_aen_handler(struct mrsas_softc *sc) { union mrsas_evt_class_locale class_locale; int doscan = 0; u_int32_t seq_num; - int error; + int error, fail_aen = 0; if (sc == NULL) { printf("invalid instance!\n"); return; } if (sc->evt_detail_mem) { switch (sc->evt_detail_mem->code) { case MR_EVT_PD_INSERTED: - mrsas_get_pd_list(sc); - mrsas_bus_scan_sim(sc, sc->sim_1); + fail_aen = mrsas_get_pd_list(sc); + if (!fail_aen) + mrsas_bus_scan_sim(sc, sc->sim_1); + else + goto skip_register_aen; doscan = 0; break; case MR_EVT_PD_REMOVED: - mrsas_get_pd_list(sc); - mrsas_bus_scan_sim(sc, sc->sim_1); + fail_aen = mrsas_get_pd_list(sc); + if (!fail_aen) + mrsas_bus_scan_sim(sc, sc->sim_1); + else + goto skip_register_aen; doscan = 0; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: mrsas_bus_scan_sim(sc, sc->sim_0); doscan = 0; break; case MR_EVT_LD_CREATED: - mrsas_get_ld_list(sc); - mrsas_bus_scan_sim(sc, sc->sim_0); + fail_aen = mrsas_get_ld_list(sc); + if (!fail_aen) + mrsas_bus_scan_sim(sc, sc->sim_0); + else + goto skip_register_aen; doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; default: doscan = 0; break; } } else { device_printf(sc->mrsas_dev, "invalid evt_detail\n"); return; } if (doscan) { - mrsas_get_pd_list(sc); - mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); - mrsas_bus_scan_sim(sc, sc->sim_1); - mrsas_get_ld_list(sc); - mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); - mrsas_bus_scan_sim(sc, sc->sim_0); + fail_aen = mrsas_get_pd_list(sc); + if (!fail_aen) { + mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); + mrsas_bus_scan_sim(sc, sc->sim_1); + } else + goto skip_register_aen; + + fail_aen = mrsas_get_ld_list(sc); + if (!fail_aen) { + mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); + mrsas_bus_scan_sim(sc, sc->sim_0); + } else + goto skip_register_aen; } seq_num = sc->evt_detail_mem->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (sc->aen_cmd != NULL) return; mtx_lock(&sc->aen_lock); error = mrsas_register_aen(sc, seq_num, class_locale.word); mtx_unlock(&sc->aen_lock); if (error) device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); + +skip_register_aen: + return; } /* * mrsas_complete_aen: Completes AEN command * input: Adapter soft state * Cmd that was issued to abort another cmd * * This function will be called from ISR and will continue event processing from * thread context by enqueuing task in ev_tq (callback function * "mrsas_aen_handler"). */ void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { /* * Don't signal app if it is just an aborted previously registered * aen */ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { sc->mrsas_aen_triggered = 1; mtx_lock(&sc->aen_lock); if (sc->mrsas_poll_waiting) { sc->mrsas_poll_waiting = 0; selwakeup(&sc->mrsas_select); } mtx_unlock(&sc->aen_lock); } else cmd->abort_aen = 0; sc->aen_cmd = NULL; mrsas_release_mfi_cmd(cmd); if (!sc->remove_in_progress) taskqueue_enqueue(sc->ev_tq, &sc->ev_task); return; } static device_method_t mrsas_methods[] = { DEVMETHOD(device_probe, mrsas_probe), DEVMETHOD(device_attach, mrsas_attach), DEVMETHOD(device_detach, mrsas_detach), DEVMETHOD(device_suspend, mrsas_suspend), DEVMETHOD(device_resume, mrsas_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), {0, 0} }; static driver_t mrsas_driver = { "mrsas", mrsas_methods, sizeof(struct mrsas_softc) }; static devclass_t mrsas_devclass; DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); MODULE_DEPEND(mrsas, cam, 1, 1, 1); Index: stable/10/sys/dev/mrsas/mrsas.h =================================================================== --- stable/10/sys/dev/mrsas/mrsas.h (revision 300735) +++ stable/10/sys/dev/mrsas/mrsas.h (revision 300736) @@ -1,2794 +1,2843 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Authors: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Authors: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #ifndef MRSAS_H #define MRSAS_H #include /* defines used in kernel.h */ #include #include #include #include #include /* types used in module initialization */ #include /* cdevsw struct */ #include /* uio struct */ #include #include /* structs, prototypes for pci bus * stuff */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* For pci_get macros! */ #include #define IOCTL_SEMA_DESCRIPTION "mrsas semaphore for MFI pool" /* * Device IDs and PCI */ #define MRSAS_TBOLT 0x005b #define MRSAS_INVADER 0x005d #define MRSAS_FURY 0x005f +#define MRSAS_INTRUDER 0x00ce +#define MRSAS_INTRUDER_24 0x00cf #define MRSAS_PCI_BAR0 0x10 #define MRSAS_PCI_BAR1 0x14 #define MRSAS_PCI_BAR2 0x1C /* * Firmware State Defines */ #define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF #define MRSAS_FWSTATE_SGE_MASK 0x00FF0000 #define MRSAS_FW_STATE_CHNG_INTERRUPT 1 /* * Message Frame Defines */ #define MRSAS_SENSE_LEN 96 #define MRSAS_FUSION_MAX_RESET_TRIES 3 /* * Miscellaneous Defines */ #define BYTE_ALIGNMENT 1 #define MRSAS_MAX_NAME_LENGTH 32 -#define MRSAS_VERSION "06.707.05.00-fbsd" +#define MRSAS_VERSION "06.709.07.00-fbsd" #define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF #define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */ #define DONE 0 #define MRSAS_PAGE_SIZE 4096 #define MRSAS_RESET_NOTICE_INTERVAL 5 #define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */ #define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */ #define THRESHOLD_REPLY_COUNT 50 #define MAX_MSIX_COUNT 128 /* * Boolean types */ #if (__FreeBSD_version < 901000) typedef enum _boolean { false, true } boolean; #endif enum err { SUCCESS, FAIL }; MALLOC_DECLARE(M_MRSAS); SYSCTL_DECL(_hw_mrsas); #define MRSAS_INFO (1 << 0) #define MRSAS_TRACE (1 << 1) #define MRSAS_FAULT (1 << 2) #define MRSAS_OCR (1 << 3) #define MRSAS_TOUT MRSAS_OCR #define MRSAS_AEN (1 << 4) #define MRSAS_PRL11 (1 << 5) #define mrsas_dprint(sc, level, msg, args...) \ do { \ if (sc->mrsas_debug & level) \ device_printf(sc->mrsas_dev, msg, ##args); \ } while (0) /**************************************************************************** * Raid Context structure which describes MegaRAID specific IO Paramenters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames ****************************************************************************/ typedef struct _RAID_CONTEXT { u_int8_t Type:4; u_int8_t nseg:4; u_int8_t resvd0; u_int16_t timeoutValue; u_int8_t regLockFlags; u_int8_t resvd1; u_int16_t VirtualDiskTgtId; u_int64_t regLockRowLBA; u_int32_t regLockLength; u_int16_t nextLMId; u_int8_t exStatus; u_int8_t status; u_int8_t RAIDFlags; u_int8_t numSGE; u_int16_t configSeqNum; u_int8_t spanArm; - u_int8_t resvd2[3]; + u_int8_t priority; /* 0x1D MR_PRIORITY_RANGE */ + u_int8_t numSGEExt; /* 0x1E 1M IO support */ + u_int8_t resvd2; /* 0x1F */ } RAID_CONTEXT; /************************************************************************* * MPI2 Defines ************************************************************************/ #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ #define MPI2_WHOINIT_HOST_DRIVER (0x04) #define MPI2_VERSION_MAJOR (0x02) #define MPI2_VERSION_MINOR (0x00) #define MPI2_VERSION_MAJOR_MASK (0xFF00) #define MPI2_VERSION_MAJOR_SHIFT (8) #define MPI2_VERSION_MINOR_MASK (0x00FF) #define MPI2_VERSION_MINOR_SHIFT (0) #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ MPI2_VERSION_MINOR) #define MPI2_HEADER_VERSION_UNIT (0x10) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) #define MPI2_HEADER_VERSION_DEV_SHIFT (0) #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV) #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) #define MPI2_SCSIIO_CONTROL_READ (0x02000000) #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) #ifndef MPI2_POINTER #define MPI2_POINTER * #endif /*************************************** * MPI2 Structures ***************************************/ typedef struct _MPI25_IEEE_SGE_CHAIN64 { u_int64_t Address; u_int32_t Length; u_int16_t Reserved1; u_int8_t NextChainOffset; u_int8_t Flags; } MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64, Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t; typedef struct _MPI2_SGE_SIMPLE_UNION { u_int32_t FlagsLength; union { u_int32_t Address32; u_int64_t Address64; } u; } MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION, Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t; typedef struct { u_int8_t CDB[20]; /* 0x00 */ u_int32_t PrimaryReferenceTag; /* 0x14 */ u_int16_t PrimaryApplicationTag;/* 0x18 */ u_int16_t PrimaryApplicationTagMask; /* 0x1A */ u_int32_t TransferLength; /* 0x1C */ } MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32, Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t; typedef struct _MPI2_SGE_CHAIN_UNION { u_int16_t Length; u_int8_t NextChainOffset; u_int8_t Flags; union { u_int32_t Address32; u_int64_t Address64; } u; } MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION, Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t; typedef struct _MPI2_IEEE_SGE_SIMPLE32 { u_int32_t Address; u_int32_t FlagsLength; } MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32, Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t; typedef struct _MPI2_IEEE_SGE_SIMPLE64 { u_int64_t Address; u_int32_t Length; u_int16_t Reserved1; u_int8_t Reserved2; u_int8_t Flags; } MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64, Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t; typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { MPI2_IEEE_SGE_SIMPLE32 Simple32; MPI2_IEEE_SGE_SIMPLE64 Simple64; } MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION, Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t; typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; typedef union _MPI2_IEEE_SGE_CHAIN_UNION { MPI2_IEEE_SGE_CHAIN32 Chain32; MPI2_IEEE_SGE_CHAIN64 Chain64; } MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION, Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t; typedef union _MPI2_SGE_IO_UNION { MPI2_SGE_SIMPLE_UNION MpiSimple; MPI2_SGE_CHAIN_UNION MpiChain; MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; } MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION, Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t; typedef union { u_int8_t CDB32[32]; MPI2_SCSI_IO_CDB_EEDP32 EEDP32; MPI2_SGE_SIMPLE_UNION SGE; } MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION, Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t; /* * RAID SCSI IO Request Message Total SGE count will be one less than * _MPI2_SCSI_IO_REQUEST */ typedef struct _MPI2_RAID_SCSI_IO_REQUEST { u_int16_t DevHandle; /* 0x00 */ u_int8_t ChainOffset; /* 0x02 */ u_int8_t Function; /* 0x03 */ u_int16_t Reserved1; /* 0x04 */ u_int8_t Reserved2; /* 0x06 */ u_int8_t MsgFlags; /* 0x07 */ u_int8_t VP_ID; /* 0x08 */ u_int8_t VF_ID; /* 0x09 */ u_int16_t Reserved3; /* 0x0A */ u_int32_t SenseBufferLowAddress;/* 0x0C */ u_int16_t SGLFlags; /* 0x10 */ u_int8_t SenseBufferLength; /* 0x12 */ u_int8_t Reserved4; /* 0x13 */ u_int8_t SGLOffset0; /* 0x14 */ u_int8_t SGLOffset1; /* 0x15 */ u_int8_t SGLOffset2; /* 0x16 */ u_int8_t SGLOffset3; /* 0x17 */ u_int32_t SkipCount; /* 0x18 */ u_int32_t DataLength; /* 0x1C */ u_int32_t BidirectionalDataLength; /* 0x20 */ u_int16_t IoFlags; /* 0x24 */ u_int16_t EEDPFlags; /* 0x26 */ u_int32_t EEDPBlockSize; /* 0x28 */ u_int32_t SecondaryReferenceTag;/* 0x2C */ u_int16_t SecondaryApplicationTag; /* 0x30 */ u_int16_t ApplicationTagTranslationMask; /* 0x32 */ u_int8_t LUN[8]; /* 0x34 */ u_int32_t Control; /* 0x3C */ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ RAID_CONTEXT RaidContext; /* 0x60 */ MPI2_SGE_IO_UNION SGL; /* 0x80 */ } MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST, MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t; /* * MPT RAID MFA IO Descriptor. */ typedef struct _MRSAS_RAID_MFA_IO_DESCRIPTOR { u_int32_t RequestFlags:8; u_int32_t MessageAddress1:24; /* bits 31:8 */ u_int32_t MessageAddress2; /* bits 61:32 */ } MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR, *PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR; /* Default Request Descriptor */ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t DescriptorTypeDependent; /* 0x06 */ } MPI2_DEFAULT_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t; /* High Priority Request Descriptor */ typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t Reserved1; /* 0x06 */ } MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, Mpi2HighPriorityRequestDescriptor_t, MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t; /* SCSI IO Request Descriptor */ typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t DevHandle; /* 0x06 */ } MPI2_SCSI_IO_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t; /* SCSI Target Request Descriptor */ typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, Mpi2SCSITargetRequestDescriptor_t, MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t; /* RAID Accelerator Request Descriptor */ typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { u_int8_t RequestFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t LMID; /* 0x04 */ u_int16_t Reserved; /* 0x06 */ } MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, Mpi2RAIDAcceleratorRequestDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t; /* union of Request Descriptors */ typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION { MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; union { struct { u_int32_t low; u_int32_t high; } u; u_int64_t Words; } addr; } MRSAS_REQUEST_DESCRIPTOR_UNION; /* Default Reply Descriptor */ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t DescriptorTypeDependent1; /* 0x02 */ u_int32_t DescriptorTypeDependent2; /* 0x04 */ } MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t; /* Address Reply Descriptor */ typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int32_t ReplyFrameAddress; /* 0x04 */ } MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t; /* SCSI IO Success Reply Descriptor */ typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int16_t TaskTag; /* 0x04 */ u_int16_t Reserved1; /* 0x06 */ } MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, Mpi2SCSIIOSuccessReplyDescriptor_t, MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t; /* TargetAssist Success Reply Descriptor */ typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int8_t SequenceNumber; /* 0x04 */ u_int8_t Reserved1; /* 0x05 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, Mpi2TargetAssistSuccessReplyDescriptor_t, MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t; /* Target Command Buffer Reply Descriptor */ typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int8_t VP_ID; /* 0x02 */ u_int8_t Flags; /* 0x03 */ u_int16_t InitiatorDevHandle; /* 0x04 */ u_int16_t IoIndex; /* 0x06 */ } MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, Mpi2TargetCommandBufferReplyDescriptor_t, MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t; /* RAID Accelerator Success Reply Descriptor */ typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { u_int8_t ReplyFlags; /* 0x00 */ u_int8_t MSIxIndex; /* 0x01 */ u_int16_t SMID; /* 0x02 */ u_int32_t Reserved; /* 0x04 */ } MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; /* union of Reply Descriptors */ typedef union _MPI2_REPLY_DESCRIPTORS_UNION { MPI2_DEFAULT_REPLY_DESCRIPTOR Default; MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; u_int64_t Words; } MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION, Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t; typedef union { volatile unsigned int val; unsigned int val_rdonly; } mrsas_atomic_t; #define mrsas_atomic_read(v) atomic_load_acq_int(&(v)->val) #define mrsas_atomic_set(v,i) atomic_store_rel_int(&(v)->val, i) #define mrsas_atomic_dec(v) atomic_fetchadd_int(&(v)->val, -1) #define mrsas_atomic_inc(v) atomic_fetchadd_int(&(v)->val, 1) /* IOCInit Request message */ typedef struct _MPI2_IOC_INIT_REQUEST { u_int8_t WhoInit; /* 0x00 */ u_int8_t Reserved1; /* 0x01 */ u_int8_t ChainOffset; /* 0x02 */ u_int8_t Function; /* 0x03 */ u_int16_t Reserved2; /* 0x04 */ u_int8_t Reserved3; /* 0x06 */ u_int8_t MsgFlags; /* 0x07 */ u_int8_t VP_ID; /* 0x08 */ u_int8_t VF_ID; /* 0x09 */ u_int16_t Reserved4; /* 0x0A */ u_int16_t MsgVersion; /* 0x0C */ u_int16_t HeaderVersion; /* 0x0E */ u_int32_t Reserved5; /* 0x10 */ u_int16_t Reserved6; /* 0x14 */ u_int8_t Reserved7; /* 0x16 */ u_int8_t HostMSIxVectors; /* 0x17 */ u_int16_t Reserved8; /* 0x18 */ u_int16_t SystemRequestFrameSize; /* 0x1A */ u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */ u_int16_t ReplyFreeQueueDepth; /* 0x1E */ u_int32_t SenseBufferAddressHigh; /* 0x20 */ u_int32_t SystemReplyAddressHigh; /* 0x24 */ u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */ u_int64_t ReplyDescriptorPostQueueAddress; /* 0x30 */ u_int64_t ReplyFreeQueueAddress;/* 0x38 */ u_int64_t TimeStamp; /* 0x40 */ } MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST, Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t; /* * MR private defines */ #define MR_PD_INVALID 0xFFFF #define MAX_SPAN_DEPTH 8 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) #define MAX_ROW_SIZE 32 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 #define MAX_LOGICAL_DRIVES_EXT 256 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) #define MAX_ARRAYS 128 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) #define MAX_ARRAYS_EXT 256 #define MAX_API_ARRAYS_EXT MAX_ARRAYS_EXT #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 +#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 #define MRSAS_MAX_PD_CHANNELS 1 #define MRSAS_MAX_LD_CHANNELS 1 #define MRSAS_MAX_DEV_PER_CHANNEL 256 #define MRSAS_DEFAULT_INIT_ID -1 #define MRSAS_MAX_LUN 8 #define MRSAS_DEFAULT_CMD_PER_LUN 256 #define MRSAS_MAX_PD (MRSAS_MAX_PD_CHANNELS * \ MRSAS_MAX_DEV_PER_CHANNEL) #define MRSAS_MAX_LD_IDS (MRSAS_MAX_LD_CHANNELS * \ MRSAS_MAX_DEV_PER_CHANNEL) #define VD_EXT_DEBUG 0 /******************************************************************* * RAID map related structures ********************************************************************/ #pragma pack(1) typedef struct _MR_DEV_HANDLE_INFO { u_int16_t curDevHdl; u_int8_t validHandles; u_int8_t reserved; u_int16_t devHandle[2]; } MR_DEV_HANDLE_INFO; #pragma pack() typedef struct _MR_ARRAY_INFO { u_int16_t pd[MAX_RAIDMAP_ROW_SIZE]; } MR_ARRAY_INFO; typedef struct _MR_QUAD_ELEMENT { u_int64_t logStart; u_int64_t logEnd; u_int64_t offsetInSpan; u_int32_t diff; u_int32_t reserved1; } MR_QUAD_ELEMENT; typedef struct _MR_SPAN_INFO { u_int32_t noElements; u_int32_t reserved1; MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; } MR_SPAN_INFO; typedef struct _MR_LD_SPAN_ { u_int64_t startBlk; u_int64_t numBlks; u_int16_t arrayRef; u_int8_t spanRowSize; u_int8_t spanRowDataSize; u_int8_t reserved[4]; } MR_LD_SPAN; typedef struct _MR_SPAN_BLOCK_INFO { u_int64_t num_rows; MR_LD_SPAN span; MR_SPAN_INFO block_span_info; } MR_SPAN_BLOCK_INFO; typedef struct _MR_LD_RAID { struct { u_int32_t fpCapable:1; u_int32_t reserved5:3; u_int32_t ldPiMode:4; u_int32_t pdPiMode:4; u_int32_t encryptionType:8; u_int32_t fpWriteCapable:1; u_int32_t fpReadCapable:1; u_int32_t fpWriteAcrossStripe:1; u_int32_t fpReadAcrossStripe:1; u_int32_t fpNonRWCapable:1; u_int32_t reserved4:7; } capability; u_int32_t reserved6; u_int64_t size; u_int8_t spanDepth; u_int8_t level; u_int8_t stripeShift; u_int8_t rowSize; u_int8_t rowDataSize; u_int8_t writeMode; u_int8_t PRL; u_int8_t SRL; u_int16_t targetId; u_int8_t ldState; u_int8_t regTypeReqOnWrite; u_int8_t modFactor; u_int8_t regTypeReqOnRead; u_int16_t seqNum; struct { u_int32_t ldSyncRequired:1; u_int32_t regTypeReqOnReadLsValid:1; u_int32_t reserved:30; } flags; u_int8_t LUN[8]; u_int8_t fpIoTimeoutForLd; u_int8_t reserved2[3]; u_int32_t logicalBlockLength; struct { u_int32_t LdPiExp:4; u_int32_t LdLogicalBlockExp:4; u_int32_t reserved1:24; } exponent; u_int8_t reserved3[0x80 - 0x38]; } MR_LD_RAID; typedef struct _MR_LD_SPAN_MAP { MR_LD_RAID ldRaid; u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE]; MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; } MR_LD_SPAN_MAP; typedef struct _MR_FW_RAID_MAP { u_int32_t totalSize; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } raid_desc; u_int32_t ldCount; u_int32_t Reserved1; /* * This doesn't correspond to FW Ld Tgt Id to LD, but will purge. For * example: if tgt Id is 4 and FW LD is 2, and there is only one LD, * FW will populate the array like this. [0xFF, 0xFF, 0xFF, 0xFF, * 0x0,.....]. This is to help reduce the entire strcture size if * there are few LDs or driver is looking info for 1 LD only. */ u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS]; u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; MR_LD_SPAN_MAP ldSpanMap[1]; } MR_FW_RAID_MAP; typedef struct _MR_FW_RAID_MAP_EXT { /* Not used in new map */ u_int32_t reserved; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } fw_raid_desc; u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; u_int16_t ldCount; u_int16_t arCount; u_int16_t spanCount; u_int16_t reserve3; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT]; } MR_FW_RAID_MAP_EXT; typedef struct _MR_DRV_RAID_MAP { /* * Total size of this structure, including this field. This feild * will be manupulated by driver for ext raid map, else pick the * value from firmware raid map. */ u_int32_t totalSize; union { struct { u_int32_t maxLd; u_int32_t maxSpanDepth; u_int32_t maxRowSize; u_int32_t maxPdCount; u_int32_t maxArrays; } validationInfo; u_int32_t version[5]; u_int32_t reserved1[5]; } drv_raid_desc; /* timeout value used by driver in FP IOs */ u_int8_t fpPdIoTimeoutSec; u_int8_t reserved2[7]; u_int16_t ldCount; u_int16_t arCount; u_int16_t spanCount; u_int16_t reserve3; MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; u_int8_t ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; MR_LD_SPAN_MAP ldSpanMap[1]; } MR_DRV_RAID_MAP; /* * Driver raid map size is same as raid map ext MR_DRV_RAID_MAP_ALL is * created to sync with old raid. And it is mainly for code re-use purpose. */ #pragma pack(1) typedef struct _MR_DRV_RAID_MAP_ALL { MR_DRV_RAID_MAP raidMap; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; } MR_DRV_RAID_MAP_ALL; #pragma pack() typedef struct _LD_LOAD_BALANCE_INFO { u_int8_t loadBalanceFlag; u_int8_t reserved1; mrsas_atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES]; u_int64_t last_accessed_block[MAX_PHYSICAL_DEVICES]; } LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO; /* SPAN_SET is info caclulated from span info from Raid map per ld */ typedef struct _LD_SPAN_SET { u_int64_t log_start_lba; u_int64_t log_end_lba; u_int64_t span_row_start; u_int64_t span_row_end; u_int64_t data_strip_start; u_int64_t data_strip_end; u_int64_t data_row_start; u_int64_t data_row_end; u_int8_t strip_offset[MAX_SPAN_DEPTH]; u_int32_t span_row_data_width; u_int32_t diff; u_int32_t reserved[2]; } LD_SPAN_SET, *PLD_SPAN_SET; typedef struct LOG_BLOCK_SPAN_INFO { LD_SPAN_SET span_set[MAX_SPAN_DEPTH]; } LD_SPAN_INFO, *PLD_SPAN_INFO; #pragma pack(1) typedef struct _MR_FW_RAID_MAP_ALL { MR_FW_RAID_MAP raidMap; MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; } MR_FW_RAID_MAP_ALL; #pragma pack() struct IO_REQUEST_INFO { u_int64_t ldStartBlock; u_int32_t numBlocks; u_int16_t ldTgtId; u_int8_t isRead; u_int16_t devHandle; u_int64_t pdBlock; u_int8_t fpOkForIo; u_int8_t IoforUnevenSpan; u_int8_t start_span; u_int8_t reserved; u_int64_t start_row; /* span[7:5], arm[4:0] */ u_int8_t span_arm; u_int8_t pd_after_lb; }; +/* + * define MR_PD_CFG_SEQ structure for system PDs + */ +struct MR_PD_CFG_SEQ { + u_int16_t seqNum; + u_int16_t devHandle; + u_int8_t reserved[4]; +} __packed; + +struct MR_PD_CFG_SEQ_NUM_SYNC { + u_int32_t size; + u_int32_t count; + struct MR_PD_CFG_SEQ seq[1]; +} __packed; + + typedef struct _MR_LD_TARGET_SYNC { u_int8_t targetId; u_int8_t reserved; u_int16_t seqNum; } MR_LD_TARGET_SYNC; #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) #define IEEE_SGE_FLAGS_END_OF_LIST (0x40) union desc_value { u_int64_t word; struct { u_int32_t low; u_int32_t high; } u; }; /******************************************************************* * Temporary command ********************************************************************/ struct mrsas_tmp_dcmd { bus_dma_tag_t tmp_dcmd_tag; bus_dmamap_t tmp_dcmd_dmamap; void *tmp_dcmd_mem; bus_addr_t tmp_dcmd_phys_addr; }; /******************************************************************* * Register set, included legacy controllers 1068 and 1078, * structure extended for 1078 registers *******************************************************************/ #pragma pack(1) typedef struct _mrsas_register_set { u_int32_t doorbell; /* 0000h */ u_int32_t fusion_seq_offset; /* 0004h */ u_int32_t fusion_host_diag; /* 0008h */ u_int32_t reserved_01; /* 000Ch */ u_int32_t inbound_msg_0; /* 0010h */ u_int32_t inbound_msg_1; /* 0014h */ u_int32_t outbound_msg_0; /* 0018h */ u_int32_t outbound_msg_1; /* 001Ch */ u_int32_t inbound_doorbell; /* 0020h */ u_int32_t inbound_intr_status; /* 0024h */ u_int32_t inbound_intr_mask; /* 0028h */ u_int32_t outbound_doorbell; /* 002Ch */ u_int32_t outbound_intr_status; /* 0030h */ u_int32_t outbound_intr_mask; /* 0034h */ u_int32_t reserved_1[2]; /* 0038h */ u_int32_t inbound_queue_port; /* 0040h */ u_int32_t outbound_queue_port; /* 0044h */ u_int32_t reserved_2[9]; /* 0048h */ u_int32_t reply_post_host_index;/* 006Ch */ u_int32_t reserved_2_2[12]; /* 0070h */ u_int32_t outbound_doorbell_clear; /* 00A0h */ u_int32_t reserved_3[3]; /* 00A4h */ u_int32_t outbound_scratch_pad; /* 00B0h */ u_int32_t outbound_scratch_pad_2; /* 00B4h */ u_int32_t reserved_4[2]; /* 00B8h */ u_int32_t inbound_low_queue_port; /* 00C0h */ u_int32_t inbound_high_queue_port; /* 00C4h */ u_int32_t reserved_5; /* 00C8h */ u_int32_t res_6[11]; /* CCh */ u_int32_t host_diag; u_int32_t seq_offset; u_int32_t index_registers[807]; /* 00CCh */ } mrsas_reg_set; #pragma pack() /******************************************************************* * Firmware Interface Defines ******************************************************************* * MFI stands for MegaRAID SAS FW Interface. This is just a moniker * for protocol between the software and firmware. Commands are * issued using "message frames". ******************************************************************/ /* * FW posts its state in upper 4 bits of outbound_msg_0 register */ #define MFI_STATE_MASK 0xF0000000 #define MFI_STATE_UNDEFINED 0x00000000 #define MFI_STATE_BB_INIT 0x10000000 #define MFI_STATE_FW_INIT 0x40000000 #define MFI_STATE_WAIT_HANDSHAKE 0x60000000 #define MFI_STATE_FW_INIT_2 0x70000000 #define MFI_STATE_DEVICE_SCAN 0x80000000 #define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000 #define MFI_STATE_FLUSH_CACHE 0xA0000000 #define MFI_STATE_READY 0xB0000000 #define MFI_STATE_OPERATIONAL 0xC0000000 #define MFI_STATE_FAULT 0xF0000000 #define MFI_RESET_REQUIRED 0x00000001 #define MFI_RESET_ADAPTER 0x00000002 #define MEGAMFI_FRAME_SIZE 64 #define MRSAS_MFI_FRAME_SIZE 1024 #define MRSAS_MFI_SENSE_SIZE 128 /* * During FW init, clear pending cmds & reset state using inbound_msg_0 * * ABORT : Abort all pending cmds READY : Move from OPERATIONAL to * READY state; discard queue info MFIMODE : Discard (possible) low MFA * posted in 64-bit mode (??) CLR_HANDSHAKE: FW is waiting for HANDSHAKE from * BIOS or Driver HOTPLUG : Resume from Hotplug MFI_STOP_ADP : Send * signal to FW to stop processing */ #define WRITE_SEQUENCE_OFFSET (0x0000000FC) #define HOST_DIAGNOSTIC_OFFSET (0x000000F8) #define DIAG_WRITE_ENABLE (0x00000080) #define DIAG_RESET_ADAPTER (0x00000004) #define MFI_ADP_RESET 0x00000040 #define MFI_INIT_ABORT 0x00000001 #define MFI_INIT_READY 0x00000002 #define MFI_INIT_MFIMODE 0x00000004 #define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 #define MFI_INIT_HOTPLUG 0x00000010 #define MFI_STOP_ADP 0x00000020 #define MFI_RESET_FLAGS MFI_INIT_READY| \ MFI_INIT_MFIMODE| \ MFI_INIT_ABORT /* * MFI frame flags */ #define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 #define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 #define MFI_FRAME_SGL32 0x0000 #define MFI_FRAME_SGL64 0x0002 #define MFI_FRAME_SENSE32 0x0000 #define MFI_FRAME_SENSE64 0x0004 #define MFI_FRAME_DIR_NONE 0x0000 #define MFI_FRAME_DIR_WRITE 0x0008 #define MFI_FRAME_DIR_READ 0x0010 #define MFI_FRAME_DIR_BOTH 0x0018 #define MFI_FRAME_IEEE 0x0020 /* * Definition for cmd_status */ #define MFI_CMD_STATUS_POLL_MODE 0xFF /* * MFI command opcodes */ #define MFI_CMD_INIT 0x00 #define MFI_CMD_LD_READ 0x01 #define MFI_CMD_LD_WRITE 0x02 #define MFI_CMD_LD_SCSI_IO 0x03 #define MFI_CMD_PD_SCSI_IO 0x04 #define MFI_CMD_DCMD 0x05 #define MFI_CMD_ABORT 0x06 #define MFI_CMD_SMP 0x07 #define MFI_CMD_STP 0x08 #define MFI_CMD_INVALID 0xff #define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_LD_GET_LIST 0x03010000 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 #define MR_FLUSH_CTRL_CACHE 0x01 #define MR_FLUSH_DISK_CACHE 0x02 #define MR_DCMD_CTRL_SHUTDOWN 0x01050000 #define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000 #define MR_ENABLE_DRIVE_SPINDOWN 0x01 #define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 #define MR_DCMD_CTRL_EVENT_GET 0x01040300 #define MR_DCMD_CTRL_EVENT_WAIT 0x01040500 #define MR_DCMD_LD_GET_PROPERTIES 0x03030000 #define MR_DCMD_CLUSTER 0x08000000 #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 #define MR_DCMD_CLUSTER_RESET_LD 0x08010200 #define MR_DCMD_PD_LIST_QUERY 0x02010100 #define MR_DCMD_CTRL_MISC_CPX 0x0100e200 #define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201 #define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202 #define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203 #define MAX_MR_ROW_SIZE 32 #define MR_CPX_DIR_WRITE 1 #define MR_CPX_DIR_READ 0 #define MR_CPX_VERSION 1 #define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200 #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b #define MR_EVT_PD_REMOVED 0x0070 #define MR_EVT_LD_CREATED 0x008a #define MR_EVT_LD_DELETED 0x008b #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db #define MR_EVT_LD_OFFLINE 0x00fc #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 #define MR_EVT_CTRL_PERF_COLLECTION 0x017e /* * MFI command completion codes */ enum MFI_STAT { MFI_STAT_OK = 0x00, MFI_STAT_INVALID_CMD = 0x01, MFI_STAT_INVALID_DCMD = 0x02, MFI_STAT_INVALID_PARAMETER = 0x03, MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04, MFI_STAT_ABORT_NOT_POSSIBLE = 0x05, MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06, MFI_STAT_APP_IN_USE = 0x07, MFI_STAT_APP_NOT_INITIALIZED = 0x08, MFI_STAT_ARRAY_INDEX_INVALID = 0x09, MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a, MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b, MFI_STAT_DEVICE_NOT_FOUND = 0x0c, MFI_STAT_DRIVE_TOO_SMALL = 0x0d, MFI_STAT_FLASH_ALLOC_FAIL = 0x0e, MFI_STAT_FLASH_BUSY = 0x0f, MFI_STAT_FLASH_ERROR = 0x10, MFI_STAT_FLASH_IMAGE_BAD = 0x11, MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12, MFI_STAT_FLASH_NOT_OPEN = 0x13, MFI_STAT_FLASH_NOT_STARTED = 0x14, MFI_STAT_FLUSH_FAILED = 0x15, MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16, MFI_STAT_LD_CC_IN_PROGRESS = 0x17, MFI_STAT_LD_INIT_IN_PROGRESS = 0x18, MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19, MFI_STAT_LD_MAX_CONFIGURED = 0x1a, MFI_STAT_LD_NOT_OPTIMAL = 0x1b, MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c, MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d, MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e, MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f, MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, MFI_STAT_MFC_HW_ERROR = 0x21, MFI_STAT_NO_HW_PRESENT = 0x22, MFI_STAT_NOT_FOUND = 0x23, MFI_STAT_NOT_IN_ENCL = 0x24, MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25, MFI_STAT_PD_TYPE_WRONG = 0x26, MFI_STAT_PR_DISABLED = 0x27, MFI_STAT_ROW_INDEX_INVALID = 0x28, MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29, MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a, MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b, MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c, MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d, MFI_STAT_SCSI_IO_FAILED = 0x2e, MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f, MFI_STAT_SHUTDOWN_FAILED = 0x30, MFI_STAT_TIME_NOT_SET = 0x31, MFI_STAT_WRONG_STATE = 0x32, MFI_STAT_LD_OFFLINE = 0x33, MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34, MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35, MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, MFI_STAT_I2C_ERRORS_DETECTED = 0x37, MFI_STAT_PCI_ERRORS_DETECTED = 0x38, MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67, MFI_STAT_INVALID_STATUS = 0xFF }; /* * Number of mailbox bytes in DCMD message frame */ #define MFI_MBOX_SIZE 12 enum MR_EVT_CLASS { MR_EVT_CLASS_DEBUG = -2, MR_EVT_CLASS_PROGRESS = -1, MR_EVT_CLASS_INFO = 0, MR_EVT_CLASS_WARNING = 1, MR_EVT_CLASS_CRITICAL = 2, MR_EVT_CLASS_FATAL = 3, MR_EVT_CLASS_DEAD = 4, }; enum MR_EVT_LOCALE { MR_EVT_LOCALE_LD = 0x0001, MR_EVT_LOCALE_PD = 0x0002, MR_EVT_LOCALE_ENCL = 0x0004, MR_EVT_LOCALE_BBU = 0x0008, MR_EVT_LOCALE_SAS = 0x0010, MR_EVT_LOCALE_CTRL = 0x0020, MR_EVT_LOCALE_CONFIG = 0x0040, MR_EVT_LOCALE_CLUSTER = 0x0080, MR_EVT_LOCALE_ALL = 0xffff, }; enum MR_EVT_ARGS { MR_EVT_ARGS_NONE, MR_EVT_ARGS_CDB_SENSE, MR_EVT_ARGS_LD, MR_EVT_ARGS_LD_COUNT, MR_EVT_ARGS_LD_LBA, MR_EVT_ARGS_LD_OWNER, MR_EVT_ARGS_LD_LBA_PD_LBA, MR_EVT_ARGS_LD_PROG, MR_EVT_ARGS_LD_STATE, MR_EVT_ARGS_LD_STRIP, MR_EVT_ARGS_PD, MR_EVT_ARGS_PD_ERR, MR_EVT_ARGS_PD_LBA, MR_EVT_ARGS_PD_LBA_LD, MR_EVT_ARGS_PD_PROG, MR_EVT_ARGS_PD_STATE, MR_EVT_ARGS_PCI, MR_EVT_ARGS_RATE, MR_EVT_ARGS_STR, MR_EVT_ARGS_TIME, MR_EVT_ARGS_ECC, MR_EVT_ARGS_LD_PROP, MR_EVT_ARGS_PD_SPARE, MR_EVT_ARGS_PD_INDEX, MR_EVT_ARGS_DIAG_PASS, MR_EVT_ARGS_DIAG_FAIL, MR_EVT_ARGS_PD_LBA_LBA, MR_EVT_ARGS_PORT_PHY, MR_EVT_ARGS_PD_MISSING, MR_EVT_ARGS_PD_ADDRESS, MR_EVT_ARGS_BITMAP, MR_EVT_ARGS_CONNECTOR, MR_EVT_ARGS_PD_PD, MR_EVT_ARGS_PD_FRU, MR_EVT_ARGS_PD_PATHINFO, MR_EVT_ARGS_PD_POWER_STATE, MR_EVT_ARGS_GENERIC, }; /* * Thunderbolt (and later) Defines */ -#define MRSAS_MAX_SZ_CHAIN_FRAME 1024 +#define MEGASAS_CHAIN_FRAME_SZ_MIN 1024 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009) #define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256 #define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0 #define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1 #define MRSAS_LOAD_BALANCE_FLAG 0x1 #define MRSAS_DCMD_MBOX_PEND_FLAG 0x1 #define HOST_DIAG_WRITE_ENABLE 0x80 #define HOST_DIAG_RESET_ADAPTER 0x4 #define MRSAS_TBOLT_MAX_RESET_TRIES 3 #define MRSAS_MAX_MFI_CMDS 32 /* * Invader Defines */ #define MPI2_TYPE_CUDA 0x2 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 /* * T10 PI defines */ #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 #define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f #define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9 #define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB #define MRSAS_SCSI_ADDL_CDB_LEN 0x18 #define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20 #define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60 #define MRSAS_SCSIBLOCKSIZE 512 /* * Raid context flags */ #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30 typedef enum MR_RAID_FLAGS_IO_SUB_TYPE { MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, } MR_RAID_FLAGS_IO_SUB_TYPE; /* * Request descriptor types */ #define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 #define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1 #define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2 #define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 #define MRSAS_FP_CMD_LEN 16 #define MRSAS_FUSION_IN_RESET 0 #define RAID_CTX_SPANARM_ARM_SHIFT (0) #define RAID_CTX_SPANARM_ARM_MASK (0x1f) #define RAID_CTX_SPANARM_SPAN_SHIFT (5) #define RAID_CTX_SPANARM_SPAN_MASK (0xE0) /* * Define region lock types */ typedef enum _REGION_TYPE { REGION_TYPE_UNUSED = 0, REGION_TYPE_SHARED_READ = 1, REGION_TYPE_SHARED_WRITE = 2, REGION_TYPE_EXCLUSIVE = 3, } REGION_TYPE; /* * SCSI-CAM Related Defines */ #define MRSAS_SCSI_MAX_LUNS 0 #define MRSAS_SCSI_INITIATOR_ID 255 #define MRSAS_SCSI_MAX_CMDS 8 #define MRSAS_SCSI_MAX_CDB_LEN 16 #define MRSAS_SCSI_SENSE_BUFFERSIZE 96 -#define MRSAS_MAX_SGL 70 -#define MRSAS_MAX_IO_SIZE (256 * 1024) #define MRSAS_INTERNAL_CMDS 32 +#define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000 +#define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0 +#define MEGASAS_256K_IO 128 +#define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4) + /* Request types */ #define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0 #define MRSAS_REQ_TYPE_AEN_FETCH 0x1 #define MRSAS_REQ_TYPE_PASSTHRU 0x2 #define MRSAS_REQ_TYPE_GETSET_PARAM 0x3 #define MRSAS_REQ_TYPE_SCSI_IO 0x4 /* Request states */ #define MRSAS_REQ_STATE_FREE 0 #define MRSAS_REQ_STATE_BUSY 1 #define MRSAS_REQ_STATE_TRAN 2 #define MRSAS_REQ_STATE_COMPLETE 3 typedef enum _MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, NON_READ_WRITE_LDIO = 1, READ_WRITE_SYSPDIO = 2, NON_READ_WRITE_SYSPDIO = 3, } MR_SCSI_CMD_TYPE; enum mrsas_req_flags { MRSAS_DIR_UNKNOWN = 0x1, MRSAS_DIR_IN = 0x2, MRSAS_DIR_OUT = 0x4, MRSAS_DIR_NONE = 0x8, }; /* * Adapter Reset States */ enum { MRSAS_HBA_OPERATIONAL = 0, MRSAS_ADPRESET_SM_INFAULT = 1, MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, MRSAS_ADPRESET_SM_OPERATIONAL = 3, MRSAS_HW_CRITICAL_ERROR = 4, MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, }; /* * MPT Command Structure */ struct mrsas_mpt_cmd { MRSAS_RAID_SCSI_IO_REQUEST *io_request; bus_addr_t io_request_phys_addr; MPI2_SGE_IO_UNION *chain_frame; bus_addr_t chain_frame_phys_addr; u_int32_t sge_count; u_int8_t *sense; bus_addr_t sense_phys_addr; u_int8_t retry_for_fw_reset; MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc; u_int32_t sync_cmd_idx; u_int32_t index; u_int8_t flags; u_int8_t pd_r1_lb; u_int8_t load_balance; bus_size_t length; u_int32_t error_code; bus_dmamap_t data_dmamap; void *data; union ccb *ccb_ptr; struct callout cm_callout; struct mrsas_softc *sc; TAILQ_ENTRY(mrsas_mpt_cmd) next; }; /* * MFI Command Structure */ struct mrsas_mfi_cmd { union mrsas_frame *frame; bus_dmamap_t frame_dmamap; void *frame_mem; bus_addr_t frame_phys_addr; u_int8_t *sense; bus_dmamap_t sense_dmamap; void *sense_mem; bus_addr_t sense_phys_addr; u_int32_t index; u_int8_t sync_cmd; u_int8_t cmd_status; u_int8_t abort_aen; u_int8_t retry_for_fw_reset; struct mrsas_softc *sc; union ccb *ccb_ptr; union { struct { u_int16_t smid; u_int16_t resvd; } context; u_int32_t frame_count; } cmd_id; TAILQ_ENTRY(mrsas_mfi_cmd) next; }; /* * define constants for device list query options */ enum MR_PD_QUERY_TYPE { MR_PD_QUERY_TYPE_ALL = 0, MR_PD_QUERY_TYPE_STATE = 1, MR_PD_QUERY_TYPE_POWER_STATE = 2, MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, MR_PD_QUERY_TYPE_SPEED = 4, MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b #define MR_EVT_PD_REMOVED 0x0070 #define MR_EVT_LD_CREATED 0x008a #define MR_EVT_LD_DELETED 0x008b #define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db #define MR_EVT_LD_OFFLINE 0x00fc #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 enum MR_PD_STATE { MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, MR_PD_STATE_UNCONFIGURED_BAD = 0x01, MR_PD_STATE_HOT_SPARE = 0x02, MR_PD_STATE_OFFLINE = 0x10, MR_PD_STATE_FAILED = 0x11, MR_PD_STATE_REBUILD = 0x14, MR_PD_STATE_ONLINE = 0x18, MR_PD_STATE_COPYBACK = 0x20, MR_PD_STATE_SYSTEM = 0x40, }; /* * defines the physical drive address structure */ #pragma pack(1) struct MR_PD_ADDRESS { u_int16_t deviceId; u_int16_t enclDeviceId; union { struct { u_int8_t enclIndex; u_int8_t slotNumber; } mrPdAddress; struct { u_int8_t enclPosition; u_int8_t enclConnectorIndex; } mrEnclAddress; } u1; u_int8_t scsiDevType; union { u_int8_t connectedPortBitmap; u_int8_t connectedPortNumbers; } u2; u_int64_t sasAddr[2]; }; #pragma pack() /* * defines the physical drive list structure */ #pragma pack(1) struct MR_PD_LIST { u_int32_t size; u_int32_t count; struct MR_PD_ADDRESS addr[1]; }; #pragma pack() #pragma pack(1) struct mrsas_pd_list { u_int16_t tid; u_int8_t driveType; u_int8_t driveState; }; #pragma pack() /* * defines the logical drive reference structure */ typedef union _MR_LD_REF { struct { u_int8_t targetId; u_int8_t reserved; u_int16_t seqNum; } ld_context; u_int32_t ref; } MR_LD_REF; /* * defines the logical drive list structure */ #pragma pack(1) struct MR_LD_LIST { u_int32_t ldCount; u_int32_t reserved; struct { MR_LD_REF ref; u_int8_t state; u_int8_t reserved[3]; u_int64_t size; } ldList[MAX_LOGICAL_DRIVES_EXT]; }; #pragma pack() /* * SAS controller properties */ #pragma pack(1) struct mrsas_ctrl_prop { u_int16_t seq_num; u_int16_t pred_fail_poll_interval; u_int16_t intr_throttle_count; u_int16_t intr_throttle_timeouts; u_int8_t rebuild_rate; u_int8_t patrol_read_rate; u_int8_t bgi_rate; u_int8_t cc_rate; u_int8_t recon_rate; u_int8_t cache_flush_interval; u_int8_t spinup_drv_count; u_int8_t spinup_delay; u_int8_t cluster_enable; u_int8_t coercion_mode; u_int8_t alarm_enable; u_int8_t disable_auto_rebuild; u_int8_t disable_battery_warn; u_int8_t ecc_bucket_size; u_int16_t ecc_bucket_leak_rate; u_int8_t restore_hotspare_on_insertion; u_int8_t expose_encl_devices; u_int8_t maintainPdFailHistory; u_int8_t disallowHostRequestReordering; u_int8_t abortCCOnError; u_int8_t loadBalanceMode; u_int8_t disableAutoDetectBackplane; u_int8_t snapVDSpace; /* * Add properties that can be controlled by a bit in the following * structure. */ struct { u_int32_t copyBackDisabled:1; u_int32_t SMARTerEnabled:1; u_int32_t prCorrectUnconfiguredAreas:1; u_int32_t useFdeOnly:1; u_int32_t disableNCQ:1; u_int32_t SSDSMARTerEnabled:1; u_int32_t SSDPatrolReadEnabled:1; u_int32_t enableSpinDownUnconfigured:1; u_int32_t autoEnhancedImport:1; u_int32_t enableSecretKeyControl:1; u_int32_t disableOnlineCtrlReset:1; u_int32_t allowBootWithPinnedCache:1; u_int32_t disableSpinDownHS:1; u_int32_t enableJBOD:1; u_int32_t disableCacheBypass:1; u_int32_t useDiskActivityForLocate:1; u_int32_t enablePI:1; u_int32_t preventPIImport:1; u_int32_t useGlobalSparesForEmergency:1; u_int32_t useUnconfGoodForEmergency:1; u_int32_t useEmergencySparesforSMARTer:1; u_int32_t forceSGPIOForQuadOnly:1; u_int32_t enableConfigAutoBalance:1; u_int32_t enableVirtualCache:1; u_int32_t enableAutoLockRecovery:1; u_int32_t disableImmediateIO:1; u_int32_t disableT10RebuildAssist:1; u_int32_t ignore64ldRestriction:1; u_int32_t enableSwZone:1; u_int32_t limitMaxRateSATA3G:1; u_int32_t reserved:2; } OnOffProperties; u_int8_t autoSnapVDSpace; u_int8_t viewSpace; u_int16_t spinDownTime; u_int8_t reserved[24]; }; #pragma pack() /* * SAS controller information */ struct mrsas_ctrl_info { /* * PCI device information */ struct { u_int16_t vendor_id; u_int16_t device_id; u_int16_t sub_vendor_id; u_int16_t sub_device_id; u_int8_t reserved[24]; } __packed pci; /* * Host interface information */ struct { u_int8_t PCIX:1; u_int8_t PCIE:1; u_int8_t iSCSI:1; u_int8_t SAS_3G:1; u_int8_t reserved_0:4; u_int8_t reserved_1[6]; u_int8_t port_count; u_int64_t port_addr[8]; } __packed host_interface; /* * Device (backend) interface information */ struct { u_int8_t SPI:1; u_int8_t SAS_3G:1; u_int8_t SATA_1_5G:1; u_int8_t SATA_3G:1; u_int8_t reserved_0:4; u_int8_t reserved_1[6]; u_int8_t port_count; u_int64_t port_addr[8]; } __packed device_interface; u_int32_t image_check_word; u_int32_t image_component_count; struct { char name[8]; char version[32]; char build_date[16]; char built_time[16]; } __packed image_component[8]; u_int32_t pending_image_component_count; struct { char name[8]; char version[32]; char build_date[16]; char build_time[16]; } __packed pending_image_component[8]; u_int8_t max_arms; u_int8_t max_spans; u_int8_t max_arrays; u_int8_t max_lds; char product_name[80]; char serial_no[32]; /* * Other physical/controller/operation information. Indicates the * presence of the hardware */ struct { u_int32_t bbu:1; u_int32_t alarm:1; u_int32_t nvram:1; u_int32_t uart:1; u_int32_t reserved:28; } __packed hw_present; u_int32_t current_fw_time; /* * Maximum data transfer sizes */ u_int16_t max_concurrent_cmds; u_int16_t max_sge_count; u_int32_t max_request_size; /* * Logical and physical device counts */ u_int16_t ld_present_count; u_int16_t ld_degraded_count; u_int16_t ld_offline_count; u_int16_t pd_present_count; u_int16_t pd_disk_present_count; u_int16_t pd_disk_pred_failure_count; u_int16_t pd_disk_failed_count; /* * Memory size information */ u_int16_t nvram_size; u_int16_t memory_size; u_int16_t flash_size; /* * Error counters */ u_int16_t mem_correctable_error_count; u_int16_t mem_uncorrectable_error_count; /* * Cluster information */ u_int8_t cluster_permitted; u_int8_t cluster_active; /* * Additional max data transfer sizes */ u_int16_t max_strips_per_io; /* * Controller capabilities structures */ struct { u_int32_t raid_level_0:1; u_int32_t raid_level_1:1; u_int32_t raid_level_5:1; u_int32_t raid_level_1E:1; u_int32_t raid_level_6:1; u_int32_t reserved:27; } __packed raid_levels; struct { u_int32_t rbld_rate:1; u_int32_t cc_rate:1; u_int32_t bgi_rate:1; u_int32_t recon_rate:1; u_int32_t patrol_rate:1; u_int32_t alarm_control:1; u_int32_t cluster_supported:1; u_int32_t bbu:1; u_int32_t spanning_allowed:1; u_int32_t dedicated_hotspares:1; u_int32_t revertible_hotspares:1; u_int32_t foreign_config_import:1; u_int32_t self_diagnostic:1; u_int32_t mixed_redundancy_arr:1; u_int32_t global_hot_spares:1; u_int32_t reserved:17; } __packed adapter_operations; struct { u_int32_t read_policy:1; u_int32_t write_policy:1; u_int32_t io_policy:1; u_int32_t access_policy:1; u_int32_t disk_cache_policy:1; u_int32_t reserved:27; } __packed ld_operations; struct { u_int8_t min; u_int8_t max; u_int8_t reserved[2]; } __packed stripe_sz_ops; struct { u_int32_t force_online:1; u_int32_t force_offline:1; u_int32_t force_rebuild:1; u_int32_t reserved:29; } __packed pd_operations; struct { u_int32_t ctrl_supports_sas:1; u_int32_t ctrl_supports_sata:1; u_int32_t allow_mix_in_encl:1; u_int32_t allow_mix_in_ld:1; u_int32_t allow_sata_in_cluster:1; u_int32_t reserved:27; } __packed pd_mix_support; /* * Define ECC single-bit-error bucket information */ u_int8_t ecc_bucket_count; u_int8_t reserved_2[11]; /* * Include the controller properties (changeable items) */ struct mrsas_ctrl_prop properties; /* * Define FW pkg version (set in envt v'bles on OEM basis) */ char package_version[0x60]; u_int64_t deviceInterfacePortAddr2[8]; u_int8_t reserved3[128]; struct { u_int16_t minPdRaidLevel_0:4; u_int16_t maxPdRaidLevel_0:12; u_int16_t minPdRaidLevel_1:4; u_int16_t maxPdRaidLevel_1:12; u_int16_t minPdRaidLevel_5:4; u_int16_t maxPdRaidLevel_5:12; u_int16_t minPdRaidLevel_1E:4; u_int16_t maxPdRaidLevel_1E:12; u_int16_t minPdRaidLevel_6:4; u_int16_t maxPdRaidLevel_6:12; u_int16_t minPdRaidLevel_10:4; u_int16_t maxPdRaidLevel_10:12; u_int16_t minPdRaidLevel_50:4; u_int16_t maxPdRaidLevel_50:12; u_int16_t minPdRaidLevel_60:4; u_int16_t maxPdRaidLevel_60:12; u_int16_t minPdRaidLevel_1E_RLQ0:4; u_int16_t maxPdRaidLevel_1E_RLQ0:12; u_int16_t minPdRaidLevel_1E0_RLQ0:4; u_int16_t maxPdRaidLevel_1E0_RLQ0:12; u_int16_t reserved[6]; } pdsForRaidLevels; u_int16_t maxPds; /* 0x780 */ u_int16_t maxDedHSPs; /* 0x782 */ u_int16_t maxGlobalHSPs; /* 0x784 */ u_int16_t ddfSize; /* 0x786 */ u_int8_t maxLdsPerArray; /* 0x788 */ u_int8_t partitionsInDDF; /* 0x789 */ u_int8_t lockKeyBinding; /* 0x78a */ u_int8_t maxPITsPerLd; /* 0x78b */ u_int8_t maxViewsPerLd; /* 0x78c */ u_int8_t maxTargetId; /* 0x78d */ u_int16_t maxBvlVdSize; /* 0x78e */ u_int16_t maxConfigurableSSCSize; /* 0x790 */ u_int16_t currentSSCsize; /* 0x792 */ char expanderFwVersion[12]; /* 0x794 */ u_int16_t PFKTrialTimeRemaining;/* 0x7A0 */ u_int16_t cacheMemorySize; /* 0x7A2 */ struct { /* 0x7A4 */ u_int32_t supportPIcontroller:1; u_int32_t supportLdPIType1:1; u_int32_t supportLdPIType2:1; u_int32_t supportLdPIType3:1; u_int32_t supportLdBBMInfo:1; u_int32_t supportShieldState:1; u_int32_t blockSSDWriteCacheChange:1; u_int32_t supportSuspendResumeBGops:1; u_int32_t supportEmergencySpares:1; u_int32_t supportSetLinkSpeed:1; u_int32_t supportBootTimePFKChange:1; u_int32_t supportJBOD:1; u_int32_t disableOnlinePFKChange:1; u_int32_t supportPerfTuning:1; u_int32_t supportSSDPatrolRead:1; u_int32_t realTimeScheduler:1; u_int32_t supportResetNow:1; u_int32_t supportEmulatedDrives:1; u_int32_t headlessMode:1; u_int32_t dedicatedHotSparesLimited:1; u_int32_t supportUnevenSpans:1; u_int32_t reserved:11; } adapterOperations2; u_int8_t driverVersion[32]; /* 0x7A8 */ u_int8_t maxDAPdCountSpinup60; /* 0x7C8 */ u_int8_t temperatureROC; /* 0x7C9 */ u_int8_t temperatureCtrl; /* 0x7CA */ u_int8_t reserved4; /* 0x7CB */ u_int16_t maxConfigurablePds; /* 0x7CC */ u_int8_t reserved5[2]; /* 0x7CD reserved */ struct { u_int32_t peerIsPresent:1; u_int32_t peerIsIncompatible:1; u_int32_t hwIncompatible:1; u_int32_t fwVersionMismatch:1; u_int32_t ctrlPropIncompatible:1; u_int32_t premiumFeatureMismatch:1; u_int32_t reserved:26; } cluster; char clusterId[16]; /* 0x7D4 */ char reserved6[4]; /* 0x7E4 RESERVED FOR IOV */ struct { /* 0x7E8 */ u_int32_t supportPersonalityChange:2; u_int32_t supportThermalPollInterval:1; u_int32_t supportDisableImmediateIO:1; u_int32_t supportT10RebuildAssist:1; u_int32_t supportMaxExtLDs:1; u_int32_t supportCrashDump:1; u_int32_t supportSwZone:1; u_int32_t supportDebugQueue:1; u_int32_t supportNVCacheErase:1; u_int32_t supportForceTo512e:1; u_int32_t supportHOQRebuild:1; u_int32_t supportAllowedOpsforDrvRemoval:1; u_int32_t supportDrvActivityLEDSetting:1; u_int32_t supportNVDRAM:1; u_int32_t supportForceFlash:1; u_int32_t supportDisableSESMonitoring:1; u_int32_t supportCacheBypassModes:1; u_int32_t supportSecurityonJBOD:1; u_int32_t discardCacheDuringLDDelete:1; - u_int32_t reserved:12; + u_int32_t supportTTYLogCompression:1; + u_int32_t supportCPLDUpdate:1; + u_int32_t supportDiskCacheSettingForSysPDs:1; + u_int32_t supportExtendedSSCSize:1; + u_int32_t useSeqNumJbodFP:1; + u_int32_t reserved:7; } adapterOperations3; u_int8_t pad[0x800 - 0x7EC]; /* 0x7EC */ } __packed; /* * When SCSI mid-layer calls driver's reset routine, driver waits for * MRSAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note * that the driver cannot _actually_ abort or reset pending commands. While * it is waiting for the commands to complete, it prints a diagnostic message * every MRSAS_RESET_NOTICE_INTERVAL seconds */ #define MRSAS_RESET_WAIT_TIME 180 #define MRSAS_INTERNAL_CMD_WAIT_TIME 180 #define MRSAS_IOC_INIT_WAIT_TIME 60 #define MRSAS_RESET_NOTICE_INTERVAL 5 #define MRSAS_IOCTL_CMD 0 #define MRSAS_DEFAULT_CMD_TIMEOUT 90 #define MRSAS_THROTTLE_QUEUE_DEPTH 16 /* * MSI-x regsiters offset defines */ #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) #define MR_MAX_REPLY_QUEUES_OFFSET (0x0000001F) #define MR_MAX_REPLY_QUEUES_EXT_OFFSET (0x003FC000) #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 /* * FW reports the maximum of number of commands that it can accept (maximum * commands that can be outstanding) at any time. The driver must report a * lower number to the mid layer because it can issue a few internal commands * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs * is shown below */ #define MRSAS_INT_CMDS 32 #define MRSAS_SKINNY_INT_CMDS 5 #define MRSAS_MAX_MSIX_QUEUES 128 /* * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit SGLs * based on the size of bus_addr_t */ #define IS_DMA64 (sizeof(bus_addr_t) == 8) #define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 #define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001 #define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002 #define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 #define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001 #define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 #define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) #define MFI_1068_PCSR_OFFSET 0x84 #define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 #define MFI_1068_FW_READY 0xDDDD0000 typedef union _MFI_CAPABILITIES { struct { u_int32_t support_fp_remote_lun:1; u_int32_t support_additional_msix:1; u_int32_t support_fastpath_wb:1; u_int32_t support_max_255lds:1; u_int32_t support_ndrive_r1_lb:1; u_int32_t support_core_affinity:1; u_int32_t security_protocol_cmds_fw:1; - u_int32_t reserved:25; + u_int32_t support_ext_queue_depth:1; + u_int32_t support_ext_io_size:1; + u_int32_t reserved:23; } mfi_capabilities; u_int32_t reg; } MFI_CAPABILITIES; #pragma pack(1) struct mrsas_sge32 { u_int32_t phys_addr; u_int32_t length; }; #pragma pack() #pragma pack(1) struct mrsas_sge64 { u_int64_t phys_addr; u_int32_t length; }; #pragma pack() #pragma pack() union mrsas_sgl { struct mrsas_sge32 sge32[1]; struct mrsas_sge64 sge64[1]; }; #pragma pack() #pragma pack(1) struct mrsas_header { u_int8_t cmd; /* 00e */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t lun; /* 05h */ u_int8_t cdb_len; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xferlen; /* 14h */ }; #pragma pack() #pragma pack(1) struct mrsas_init_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1; /* 03h */ MFI_CAPABILITIES driver_operations; /* 04h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t reserved_3; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t queue_info_new_phys_addr_lo; /* 18h */ u_int32_t queue_info_new_phys_addr_hi; /* 1Ch */ u_int32_t queue_info_old_phys_addr_lo; /* 20h */ u_int32_t queue_info_old_phys_addr_hi; /* 24h */ u_int32_t driver_ver_lo; /* 28h */ u_int32_t driver_ver_hi; /* 2Ch */ u_int32_t reserved_4[4]; /* 30h */ }; #pragma pack() #pragma pack(1) struct mrsas_io_frame { u_int8_t cmd; /* 00h */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t access_byte; /* 05h */ u_int8_t reserved_0; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t lba_count; /* 14h */ u_int32_t sense_buf_phys_addr_lo; /* 18h */ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */ u_int32_t start_lba_lo; /* 20h */ u_int32_t start_lba_hi; /* 24h */ union mrsas_sgl sgl; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_pthru_frame { u_int8_t cmd; /* 00h */ u_int8_t sense_len; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t scsi_status; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t lun; /* 05h */ u_int8_t cdb_len; /* 06h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t sense_buf_phys_addr_lo; /* 18h */ u_int32_t sense_buf_phys_addr_hi; /* 1Ch */ u_int8_t cdb[16]; /* 20h */ union mrsas_sgl sgl; /* 30h */ }; #pragma pack() #pragma pack(1) struct mrsas_dcmd_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1[4]; /* 03h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int32_t opcode; /* 18h */ union { /* 1Ch */ u_int8_t b[12]; u_int16_t s[6]; u_int32_t w[3]; } mbox; union mrsas_sgl sgl; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_abort_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_0; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_1; /* 03h */ MFI_CAPABILITIES driver_operations; /* 04h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t reserved_3; /* 12h */ u_int32_t reserved_4; /* 14h */ u_int32_t abort_context; /* 18h */ u_int32_t pad_1; /* 1Ch */ u_int32_t abort_mfi_phys_addr_lo; /* 20h */ u_int32_t abort_mfi_phys_addr_hi; /* 24h */ u_int32_t reserved_5[6]; /* 28h */ }; #pragma pack() #pragma pack(1) struct mrsas_smp_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_1; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t connection_status; /* 03h */ u_int8_t reserved_2[3]; /* 04h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int64_t sas_addr; /* 18h */ union { struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */ } sgl; }; #pragma pack() #pragma pack(1) struct mrsas_stp_frame { u_int8_t cmd; /* 00h */ u_int8_t reserved_1; /* 01h */ u_int8_t cmd_status; /* 02h */ u_int8_t reserved_2; /* 03h */ u_int8_t target_id; /* 04h */ u_int8_t reserved_3[2]; /* 05h */ u_int8_t sge_count; /* 07h */ u_int32_t context; /* 08h */ u_int32_t pad_0; /* 0Ch */ u_int16_t flags; /* 10h */ u_int16_t timeout; /* 12h */ u_int32_t data_xfer_len; /* 14h */ u_int16_t fis[10]; /* 18h */ u_int32_t stp_flags; union { struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */ } sgl; }; #pragma pack() union mrsas_frame { struct mrsas_header hdr; struct mrsas_init_frame init; struct mrsas_io_frame io; struct mrsas_pthru_frame pthru; struct mrsas_dcmd_frame dcmd; struct mrsas_abort_frame abort; struct mrsas_smp_frame smp; struct mrsas_stp_frame stp; u_int8_t raw_bytes[64]; }; #pragma pack(1) union mrsas_evt_class_locale { struct { u_int16_t locale; u_int8_t reserved; int8_t class; } __packed members; u_int32_t word; } __packed; #pragma pack() #pragma pack(1) struct mrsas_evt_log_info { u_int32_t newest_seq_num; u_int32_t oldest_seq_num; u_int32_t clear_seq_num; u_int32_t shutdown_seq_num; u_int32_t boot_seq_num; } __packed; #pragma pack() struct mrsas_progress { u_int16_t progress; u_int16_t elapsed_seconds; } __packed; struct mrsas_evtarg_ld { u_int16_t target_id; u_int8_t ld_index; u_int8_t reserved; } __packed; struct mrsas_evtarg_pd { u_int16_t device_id; u_int8_t encl_index; u_int8_t slot_number; } __packed; struct mrsas_evt_detail { u_int32_t seq_num; u_int32_t time_stamp; u_int32_t code; union mrsas_evt_class_locale cl; u_int8_t arg_type; u_int8_t reserved1[15]; union { struct { struct mrsas_evtarg_pd pd; u_int8_t cdb_length; u_int8_t sense_length; u_int8_t reserved[2]; u_int8_t cdb[16]; u_int8_t sense[64]; } __packed cdbSense; struct mrsas_evtarg_ld ld; struct { struct mrsas_evtarg_ld ld; u_int64_t count; } __packed ld_count; struct { u_int64_t lba; struct mrsas_evtarg_ld ld; } __packed ld_lba; struct { struct mrsas_evtarg_ld ld; u_int32_t prevOwner; u_int32_t newOwner; } __packed ld_owner; struct { u_int64_t ld_lba; u_int64_t pd_lba; struct mrsas_evtarg_ld ld; struct mrsas_evtarg_pd pd; } __packed ld_lba_pd_lba; struct { struct mrsas_evtarg_ld ld; struct mrsas_progress prog; } __packed ld_prog; struct { struct mrsas_evtarg_ld ld; u_int32_t prev_state; u_int32_t new_state; } __packed ld_state; struct { u_int64_t strip; struct mrsas_evtarg_ld ld; } __packed ld_strip; struct mrsas_evtarg_pd pd; struct { struct mrsas_evtarg_pd pd; u_int32_t err; } __packed pd_err; struct { u_int64_t lba; struct mrsas_evtarg_pd pd; } __packed pd_lba; struct { u_int64_t lba; struct mrsas_evtarg_pd pd; struct mrsas_evtarg_ld ld; } __packed pd_lba_ld; struct { struct mrsas_evtarg_pd pd; struct mrsas_progress prog; } __packed pd_prog; struct { struct mrsas_evtarg_pd pd; u_int32_t prevState; u_int32_t newState; } __packed pd_state; struct { u_int16_t vendorId; u_int16_t deviceId; u_int16_t subVendorId; u_int16_t subDeviceId; } __packed pci; u_int32_t rate; char str[96]; struct { u_int32_t rtc; u_int32_t elapsedSeconds; } __packed time; struct { u_int32_t ecar; u_int32_t elog; char str[64]; } __packed ecc; u_int8_t b[96]; u_int16_t s[48]; u_int32_t w[24]; u_int64_t d[12]; } args; char description[128]; } __packed; struct mrsas_irq_context { struct mrsas_softc *sc; uint32_t MSIxIndex; }; +enum MEGASAS_OCR_REASON { + FW_FAULT_OCR = 0, + SCSIIO_TIMEOUT_OCR = 1, + MFI_DCMD_TIMEOUT_OCR = 2, +}; + /* Controller management info added to support Linux Emulator */ #define MAX_MGMT_ADAPTERS 1024 struct mrsas_mgmt_info { u_int16_t count; struct mrsas_softc *sc_ptr[MAX_MGMT_ADAPTERS]; int max_index; }; #define PCI_TYPE0_ADDRESSES 6 #define PCI_TYPE1_ADDRESSES 2 #define PCI_TYPE2_ADDRESSES 5 typedef struct _MRSAS_DRV_PCI_COMMON_HEADER { u_int16_t vendorID; //(ro) u_int16_t deviceID; //(ro) u_int16_t command; //Device control u_int16_t status; u_int8_t revisionID; //(ro) u_int8_t progIf; //(ro) u_int8_t subClass; //(ro) u_int8_t baseClass; //(ro) u_int8_t cacheLineSize; //(ro +) u_int8_t latencyTimer; //(ro +) u_int8_t headerType; //(ro) u_int8_t bist; //Built in self test union { struct _MRSAS_DRV_PCI_HEADER_TYPE_0 { u_int32_t baseAddresses[PCI_TYPE0_ADDRESSES]; u_int32_t cis; u_int16_t subVendorID; u_int16_t subSystemID; u_int32_t romBaseAddress; u_int8_t capabilitiesPtr; u_int8_t reserved1[3]; u_int32_t reserved2; u_int8_t interruptLine; u_int8_t interruptPin; //(ro) u_int8_t minimumGrant; //(ro) u_int8_t maximumLatency; //(ro) } type0; /* * PCI to PCI Bridge */ struct _MRSAS_DRV_PCI_HEADER_TYPE_1 { u_int32_t baseAddresses[PCI_TYPE1_ADDRESSES]; u_int8_t primaryBus; u_int8_t secondaryBus; u_int8_t subordinateBus; u_int8_t secondaryLatency; u_int8_t ioBase; u_int8_t ioLimit; u_int16_t secondaryStatus; u_int16_t memoryBase; u_int16_t memoryLimit; u_int16_t prefetchBase; u_int16_t prefetchLimit; u_int32_t prefetchBaseUpper32; u_int32_t prefetchLimitUpper32; u_int16_t ioBaseUpper16; u_int16_t ioLimitUpper16; u_int8_t capabilitiesPtr; u_int8_t reserved1[3]; u_int32_t romBaseAddress; u_int8_t interruptLine; u_int8_t interruptPin; u_int16_t bridgeControl; } type1; /* * PCI to CARDBUS Bridge */ struct _MRSAS_DRV_PCI_HEADER_TYPE_2 { u_int32_t socketRegistersBaseAddress; u_int8_t capabilitiesPtr; u_int8_t reserved; u_int16_t secondaryStatus; u_int8_t primaryBus; u_int8_t secondaryBus; u_int8_t subordinateBus; u_int8_t secondaryLatency; struct { u_int32_t base; u_int32_t limit; } range [PCI_TYPE2_ADDRESSES - 1]; u_int8_t interruptLine; u_int8_t interruptPin; u_int16_t bridgeControl; } type2; } u; } MRSAS_DRV_PCI_COMMON_HEADER, *PMRSAS_DRV_PCI_COMMON_HEADER; #define MRSAS_DRV_PCI_COMMON_HEADER_SIZE sizeof(MRSAS_DRV_PCI_COMMON_HEADER) //64 bytes typedef struct _MRSAS_DRV_PCI_LINK_CAPABILITY { union { struct { u_int32_t linkSpeed:4; u_int32_t linkWidth:6; u_int32_t aspmSupport:2; u_int32_t losExitLatency:3; u_int32_t l1ExitLatency:3; u_int32_t rsvdp:6; u_int32_t portNumber:8; } bits; u_int32_t asUlong; } u; } MRSAS_DRV_PCI_LINK_CAPABILITY, *PMRSAS_DRV_PCI_LINK_CAPABILITY; #define MRSAS_DRV_PCI_LINK_CAPABILITY_SIZE sizeof(MRSAS_DRV_PCI_LINK_CAPABILITY) typedef struct _MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY { union { struct { u_int16_t linkSpeed:4; u_int16_t negotiatedLinkWidth:6; u_int16_t linkTrainingError:1; u_int16_t linkTraning:1; u_int16_t slotClockConfig:1; u_int16_t rsvdZ:3; } bits; u_int16_t asUshort; } u; u_int16_t reserved; } MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY, *PMRSAS_DRV_PCI_LINK_STATUS_CAPABILITY; #define MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY_SIZE sizeof(MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY) typedef struct _MRSAS_DRV_PCI_CAPABILITIES { MRSAS_DRV_PCI_LINK_CAPABILITY linkCapability; MRSAS_DRV_PCI_LINK_STATUS_CAPABILITY linkStatusCapability; } MRSAS_DRV_PCI_CAPABILITIES, *PMRSAS_DRV_PCI_CAPABILITIES; #define MRSAS_DRV_PCI_CAPABILITIES_SIZE sizeof(MRSAS_DRV_PCI_CAPABILITIES) /* PCI information */ typedef struct _MRSAS_DRV_PCI_INFORMATION { u_int32_t busNumber; u_int8_t deviceNumber; u_int8_t functionNumber; u_int8_t interruptVector; u_int8_t reserved1; MRSAS_DRV_PCI_COMMON_HEADER pciHeaderInfo; MRSAS_DRV_PCI_CAPABILITIES capability; u_int32_t domainID; u_int8_t reserved2[28]; } MRSAS_DRV_PCI_INFORMATION, *PMRSAS_DRV_PCI_INFORMATION; /******************************************************************* * per-instance data ********************************************************************/ struct mrsas_softc { device_t mrsas_dev; struct cdev *mrsas_cdev; + struct intr_config_hook mrsas_ich; + struct cdev *mrsas_linux_emulator_cdev; uint16_t device_id; struct resource *reg_res; int reg_res_id; bus_space_tag_t bus_tag; bus_space_handle_t bus_handle; bus_dma_tag_t mrsas_parent_tag; bus_dma_tag_t verbuf_tag; bus_dmamap_t verbuf_dmamap; void *verbuf_mem; bus_addr_t verbuf_phys_addr; bus_dma_tag_t sense_tag; bus_dmamap_t sense_dmamap; void *sense_mem; bus_addr_t sense_phys_addr; bus_dma_tag_t io_request_tag; bus_dmamap_t io_request_dmamap; void *io_request_mem; bus_addr_t io_request_phys_addr; bus_dma_tag_t chain_frame_tag; bus_dmamap_t chain_frame_dmamap; void *chain_frame_mem; bus_addr_t chain_frame_phys_addr; bus_dma_tag_t reply_desc_tag; bus_dmamap_t reply_desc_dmamap; void *reply_desc_mem; bus_addr_t reply_desc_phys_addr; bus_dma_tag_t ioc_init_tag; bus_dmamap_t ioc_init_dmamap; void *ioc_init_mem; bus_addr_t ioc_init_phys_mem; bus_dma_tag_t data_tag; struct cam_sim *sim_0; struct cam_sim *sim_1; struct cam_path *path_0; struct cam_path *path_1; struct mtx sim_lock; struct mtx pci_lock; struct mtx io_lock; struct mtx ioctl_lock; struct mtx mpt_cmd_pool_lock; struct mtx mfi_cmd_pool_lock; struct mtx raidmap_lock; struct mtx aen_lock; struct selinfo mrsas_select; uint32_t mrsas_aen_triggered; uint32_t mrsas_poll_waiting; struct sema ioctl_count_sema; uint32_t max_fw_cmds; uint32_t max_num_sge; struct resource *mrsas_irq[MAX_MSIX_COUNT]; void *intr_handle[MAX_MSIX_COUNT]; int irq_id[MAX_MSIX_COUNT]; struct mrsas_irq_context irq_context[MAX_MSIX_COUNT]; int msix_vectors; int msix_enable; uint32_t msix_reg_offset[16]; uint8_t mask_interrupts; + uint16_t max_chain_frame_sz; struct mrsas_mpt_cmd **mpt_cmd_list; struct mrsas_mfi_cmd **mfi_cmd_list; TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head; TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head; bus_addr_t req_frames_desc_phys; u_int8_t *req_frames_desc; u_int8_t *req_desc; bus_addr_t io_request_frames_phys; u_int8_t *io_request_frames; bus_addr_t reply_frames_desc_phys; u_int16_t last_reply_idx[MAX_MSIX_COUNT]; u_int32_t reply_q_depth; u_int32_t request_alloc_sz; u_int32_t reply_alloc_sz; u_int32_t io_frames_alloc_sz; u_int32_t chain_frames_alloc_sz; u_int16_t max_sge_in_main_msg; u_int16_t max_sge_in_chain; u_int8_t chain_offset_io_request; u_int8_t chain_offset_mfi_pthru; u_int32_t map_sz; u_int64_t map_id; + u_int64_t pd_seq_map_id; struct mrsas_mfi_cmd *map_update_cmd; + struct mrsas_mfi_cmd *jbod_seq_cmd; struct mrsas_mfi_cmd *aen_cmd; u_int8_t fast_path_io; void *chan; void *ocr_chan; u_int8_t adprecovery; u_int8_t remove_in_progress; u_int8_t ocr_thread_active; u_int8_t do_timedout_reset; u_int32_t reset_in_progress; u_int32_t reset_count; + + bus_dma_tag_t jbodmap_tag[2]; + bus_dmamap_t jbodmap_dmamap[2]; + void *jbodmap_mem[2]; + bus_addr_t jbodmap_phys_addr[2]; + bus_dma_tag_t raidmap_tag[2]; bus_dmamap_t raidmap_dmamap[2]; void *raidmap_mem[2]; bus_addr_t raidmap_phys_addr[2]; bus_dma_tag_t mficmd_frame_tag; bus_dma_tag_t mficmd_sense_tag; bus_dma_tag_t evt_detail_tag; bus_dmamap_t evt_detail_dmamap; struct mrsas_evt_detail *evt_detail_mem; bus_addr_t evt_detail_phys_addr; struct mrsas_ctrl_info *ctrl_info; bus_dma_tag_t ctlr_info_tag; bus_dmamap_t ctlr_info_dmamap; void *ctlr_info_mem; bus_addr_t ctlr_info_phys_addr; u_int32_t max_sectors_per_req; u_int32_t disableOnlineCtrlReset; mrsas_atomic_t fw_outstanding; u_int32_t mrsas_debug; u_int32_t mrsas_io_timeout; u_int32_t mrsas_fw_fault_check_delay; u_int32_t io_cmds_highwater; u_int8_t UnevenSpanSupport; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct proc *ocr_thread; u_int32_t last_seq_num; bus_dma_tag_t el_info_tag; bus_dmamap_t el_info_dmamap; void *el_info_mem; bus_addr_t el_info_phys_addr; struct mrsas_pd_list pd_list[MRSAS_MAX_PD]; struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD]; u_int8_t ld_ids[MRSAS_MAX_LD_IDS]; struct taskqueue *ev_tq; struct task ev_task; u_int32_t CurLdCount; u_int64_t reset_flags; int lb_pending_cmds; LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; u_int8_t secure_jbod_support; + u_int8_t use_seqnum_jbod_fp; u_int8_t max256vdSupport; u_int16_t fw_supported_vd_count; u_int16_t fw_supported_pd_count; u_int16_t drv_supported_vd_count; u_int16_t drv_supported_pd_count; u_int32_t max_map_sz; u_int32_t current_map_sz; u_int32_t old_map_sz; u_int32_t new_map_sz; u_int32_t drv_map_sz; /* Non dma-able memory. Driver local copy. */ MR_DRV_RAID_MAP_ALL *ld_drv_map[2]; }; /* Compatibility shims for different OS versions */ #if __FreeBSD_version >= 800001 #define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mrsas_kproc_exit(arg) kproc_exit(arg) #else #define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mrsas_kproc_exit(arg) kthread_exit(arg) #endif static __inline void mrsas_clear_bit(int b, volatile void *p) { atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f)); } static __inline void mrsas_set_bit(int b, volatile void *p) { atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f)); } static __inline int mrsas_test_bit(int b, volatile void *p) { return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f)); } #endif /* MRSAS_H */ Index: stable/10/sys/dev/mrsas/mrsas_cam.c =================================================================== --- stable/10/sys/dev/mrsas/mrsas_cam.c (revision 300735) +++ stable/10/sys/dev/mrsas/mrsas_cam.c (revision 300736) @@ -1,1247 +1,1385 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "dev/mrsas/mrsas.h" #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for pcpu.h */ #include /* XXX for PCPU_GET */ #define smp_processor_id() PCPU_GET(cpuid) /* * Function prototypes */ int mrsas_cam_attach(struct mrsas_softc *sc); int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb); int -mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, +mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb); int -mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, - union ccb *ccb, struct cam_sim *sim); +mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, + union ccb *ccb); int +mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, + union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible); +int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, u_int32_t device_id, MRSAS_RAID_SCSI_IO_REQUEST * io_request); void mrsas_xpt_freeze(struct mrsas_softc *sc); void mrsas_xpt_release(struct mrsas_softc *sc); void mrsas_cam_detach(struct mrsas_softc *sc); void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, u_int32_t ld_block_size); static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); static void mrsas_cam_poll(struct cam_sim *sim); static void mrsas_action(struct cam_sim *sim, union ccb *ccb); static void mrsas_scsiio_timeout(void *data); static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, union ccb *ccb); struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map, struct mrsas_softc *sc); extern void mrsas_isr(void *arg); extern void mrsas_aen_handler(struct mrsas_softc *sc); extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map); extern u_int16_t mrsas_get_updated_dev_handle(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm, u_int64_t block, u_int32_t count); +extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); /* * mrsas_cam_attach: Main entry to CAM subsystem * input: Adapter instance soft state * * This function is called from mrsas_attach() during initialization to perform * SIM allocations and XPT bus registration. If the kernel version is 7.4 or * earlier, it would also initiate a bus scan. */ int mrsas_cam_attach(struct mrsas_softc *sc) { struct cam_devq *devq; int mrsas_cam_depth; mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS; if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); return (ENOMEM); } /* * Create SIM for bus 0 and register, also create path */ sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, mrsas_cam_depth, devq); if (sc->sim_0 == NULL) { cam_simq_free(devq); device_printf(sc->mrsas_dev, "Cannot register SIM\n"); return (ENXIO); } /* Initialize taskqueue for Event Handling */ TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->ev_tq); /* Run the task queue with lowest priority */ taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq", device_get_nameunit(sc->mrsas_dev)); mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) { cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0, TRUE); /* passing true will free the * devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } mtx_unlock(&sc->sim_lock); /* * Create SIM for bus 1 and register, also create path */ sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, mrsas_cam_depth, devq); if (sc->sim_1 == NULL) { cam_simq_free(devq); device_printf(sc->mrsas_dev, "Cannot register SIM\n"); return (ENXIO); } mtx_lock(&sc->sim_lock); if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) { cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */ mtx_unlock(&sc->sim_lock); return (ENXIO); } if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1, TRUE); mtx_unlock(&sc->sim_lock); return (ENXIO); } mtx_unlock(&sc->sim_lock); #if (__FreeBSD_version <= 704000) if (mrsas_bus_scan(sc)) { device_printf(sc->mrsas_dev, "Error in bus scan.\n"); return (1); } #endif return (0); } /* * mrsas_cam_detach: De-allocates and teardown CAM * input: Adapter instance soft state * * De-registers and frees the paths and SIMs. */ void mrsas_cam_detach(struct mrsas_softc *sc) { if (sc->ev_tq != NULL) taskqueue_free(sc->ev_tq); mtx_lock(&sc->sim_lock); if (sc->path_0) xpt_free_path(sc->path_0); if (sc->sim_0) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0, FALSE); } if (sc->path_1) xpt_free_path(sc->path_1); if (sc->sim_1) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1, TRUE); } mtx_unlock(&sc->sim_lock); } /* * mrsas_action: SIM callback entry point * input: pointer to SIM pointer to CAM Control Block * * This function processes CAM subsystem requests. The type of request is stored * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier. */ static void mrsas_action(struct cam_sim *sim, union ccb *ccb) { struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { device_id = ccb_h->target_id; /* * bus 0 is LD, bus 1 is for system-PD */ if (cam_sim_bus(sim) == 1 && sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { ccb->ccb_h.status |= CAM_DEV_NOT_THERE; xpt_done(ccb); } else { if (mrsas_startio(sc, sim, ccb)) { ccb->ccb_h.status |= CAM_REQ_INVALID; xpt_done(ccb); } } break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_UA_ABORT; xpt_done(ccb); break; } case XPT_RESET_BUS: { xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: { ccb->cts.protocol = PROTO_SCSI; ccb->cts.protocol_version = SCSI_REV_2; ccb->cts.transport = XPORT_SPI; ccb->cts.transport_version = 2; ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; } case XPT_PATH_INQ: { ccb->cpi.version_num = 1; ccb->cpi.hba_inquiry = 0; ccb->cpi.target_sprt = 0; #if (__FreeBSD_version >= 902001) ccb->cpi.hba_misc = PIM_UNMAPPED; #else ccb->cpi.hba_misc = 0; #endif ccb->cpi.hba_eng_cnt = 0; ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; ccb->cpi.unit_number = cam_sim_unit(sim); ccb->cpi.bus_id = cam_sim_bus(sim); ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; ccb->cpi.base_transfer_speed = 150000; strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); strncpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN); strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); ccb->cpi.transport = XPORT_SPI; ccb->cpi.transport_version = 2; ccb->cpi.protocol = PROTO_SCSI; ccb->cpi.protocol_version = SCSI_REV_2; if (ccb->cpi.bus_id == 0) ccb->cpi.max_target = MRSAS_MAX_PD - 1; else ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1; #if (__FreeBSD_version > 704000) - ccb->cpi.maxio = MRSAS_MAX_IO_SIZE; + ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE; #endif ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } } /* * mrsas_scsiio_timeout: Callback function for IO timed out * input: mpt command context * * This function will execute after timeout value provided by ccb header from * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO * comming from CAM layer. This function is callback function for IO timeout * and it runs in no-sleep context. Set do_timedout_reset in Adapter context * so that it will execute OCR/Kill adpter from ocr_thread context. */ static void mrsas_scsiio_timeout(void *data) { struct mrsas_mpt_cmd *cmd; struct mrsas_softc *sc; cmd = (struct mrsas_mpt_cmd *)data; sc = cmd->sc; if (cmd->ccb_ptr == NULL) { printf("command timeout with NULL ccb\n"); return; } /* * Below callout is dummy entry so that it will be cancelled from * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based * on OCR enable/disable property of Controller from ocr_thread * context. */ #if (__FreeBSD_version >= 1000510) callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0, mrsas_scsiio_timeout, cmd, 0); #else callout_reset(&cmd->cm_callout, (600000 * hz) / 1000, mrsas_scsiio_timeout, cmd); #endif - sc->do_timedout_reset = 1; + sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR; if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); } /* * mrsas_startio: SCSI IO entry point * input: Adapter instance soft state * pointer to CAM Control Block * * This function is the SCSI IO entry point and it initiates IO processing. It * copies the IO and depending if the IO is read/write or inquiry, it would * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0 * if the command is sent to firmware successfully, otherwise it returns 1. */ static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, union ccb *ccb) { struct mrsas_mpt_cmd *cmd; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; + u_int8_t cmd_type; if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return (0); } ccb_h->status |= CAM_SIM_QUEUED; cmd = mrsas_get_mpt_cmd(sc); if (!cmd) { ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); return (0); } if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if (ccb_h->flags & CAM_DIR_IN) cmd->flags |= MRSAS_DIR_IN; if (ccb_h->flags & CAM_DIR_OUT) cmd->flags |= MRSAS_DIR_OUT; } else cmd->flags = MRSAS_DIR_NONE; /* no data */ /* For FreeBSD 9.2 and higher */ #if (__FreeBSD_version >= 902001) /* * XXX We don't yet support physical addresses here. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; case CAM_DATA_SG: device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n", __func__); mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; case CAM_DATA_VADDR: - if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { + if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; break; case CAM_DATA_BIO: + if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { + mrsas_release_mpt_cmd(cmd); + ccb_h->status = CAM_REQ_TOO_BIG; + goto done; + } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; goto done; } #else if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */ if (!(ccb_h->flags & CAM_SCATTER_VALID)) { - if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) { + if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_TOO_BIG; goto done; } cmd->length = csio->dxfer_len; if (cmd->length) cmd->data = csio->data_ptr; } else { mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; goto done; } } else { /* Data addresses are physical. */ mrsas_release_mpt_cmd(cmd); ccb_h->status = CAM_REQ_INVALID; ccb_h->status &= ~CAM_SIM_QUEUED; goto done; } #endif /* save ccb ptr */ cmd->ccb_ptr = ccb; req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); return (FAIL); } memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); cmd->request_desc = req_desc; if (ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); mtx_lock(&sc->raidmap_lock); /* Check for IO type READ-WRITE targeted for Logical Volume */ - if (mrsas_find_io_type(sim, ccb) == READ_WRITE_LDIO) { + cmd_type = mrsas_find_io_type(sim, ccb); + switch (cmd_type) { + case READ_WRITE_LDIO: /* Build READ-WRITE IO for Logical Volume */ - if (mrsas_build_ldio(sc, cmd, ccb)) { - device_printf(sc->mrsas_dev, "Build LDIO failed.\n"); + if (mrsas_build_ldio_rw(sc, cmd, ccb)) { + device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); return (1); } - } else { - if (mrsas_build_dcdb(sc, cmd, ccb, sim)) { - device_printf(sc->mrsas_dev, "Build DCDB failed.\n"); + break; + case NON_READ_WRITE_LDIO: + /* Build NON READ-WRITE IO for Logical Volume */ + if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) { + device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n"); mtx_unlock(&sc->raidmap_lock); return (1); } + break; + case READ_WRITE_SYSPDIO: + case NON_READ_WRITE_SYSPDIO: + if (sc->secure_jbod_support && + (cmd_type == NON_READ_WRITE_SYSPDIO)) { + /* Build NON-RW IO for JBOD */ + if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) { + device_printf(sc->mrsas_dev, + "Build SYSPDIO failed.\n"); + mtx_unlock(&sc->raidmap_lock); + return (1); + } + } else { + /* Build RW IO for JBOD */ + if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) { + device_printf(sc->mrsas_dev, + "Build SYSPDIO failed.\n"); + mtx_unlock(&sc->raidmap_lock); + return (1); + } + } } mtx_unlock(&sc->raidmap_lock); if (cmd->flags == MRSAS_DIR_IN) /* from device */ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; req_desc = cmd->request_desc; req_desc->SCSIIO.SMID = cmd->index; /* * Start timer for IO timeout. Default timeout value is 90 second. */ #if (__FreeBSD_version >= 1000510) callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0, mrsas_scsiio_timeout, cmd, 0); #else callout_reset(&cmd->cm_callout, (600000 * hz) / 1000, mrsas_scsiio_timeout, cmd); #endif mrsas_atomic_inc(&sc->fw_outstanding); if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) sc->io_cmds_highwater++; mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return (0); done: xpt_done(ccb); return (0); } /* * mrsas_find_io_type: Determines if IO is read/write or inquiry * input: pointer to CAM Control Block * * This function determines if the IO is read/write or inquiry. It returns a 1 * if the IO is read/write and 0 if it is inquiry. */ int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb) { struct ccb_scsiio *csio = &(ccb->csio); switch (csio->cdb_io.cdb_bytes[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: return (cam_sim_bus(sim) ? READ_WRITE_SYSPDIO : READ_WRITE_LDIO); default: return (cam_sim_bus(sim) ? NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO); } } /* * mrsas_get_mpt_cmd: Get a cmd from free command pool * input: Adapter instance soft state * * This function removes an MPT command from the command free list and * initializes it. */ struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc) { struct mrsas_mpt_cmd *cmd = NULL; mtx_lock(&sc->mpt_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); + } else { + goto out; } + memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); cmd->data = NULL; cmd->length = 0; cmd->flags = 0; cmd->error_code = 0; cmd->load_balance = 0; cmd->ccb_ptr = NULL; - mtx_unlock(&sc->mpt_cmd_pool_lock); +out: + mtx_unlock(&sc->mpt_cmd_pool_lock); return cmd; } /* * mrsas_release_mpt_cmd: Return a cmd to free command pool * input: Command packet for return to free command pool * * This function returns an MPT command to the free command list. */ void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) { struct mrsas_softc *sc = cmd->sc; mtx_lock(&sc->mpt_cmd_pool_lock); cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); mtx_unlock(&sc->mpt_cmd_pool_lock); return; } /* * mrsas_get_request_desc: Get request descriptor from array * input: Adapter instance soft state * SMID index * * This function returns a pointer to the request descriptor. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) { u_int8_t *p; if (index >= sc->max_fw_cmds) { device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index); return NULL; } p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p; } /* - * mrsas_build_ldio: Builds an LDIO command + * mrsas_build_ldio_rw: Builds an LDIO command * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the LDIO command packet. It returns 0 if the command is * built successfully, otherwise it returns a 1. */ int -mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, +mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); u_int32_t device_id; MRSAS_RAID_SCSI_IO_REQUEST *io_request; device_id = ccb_h->target_id; io_request = cmd->io_request; io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->RaidContext.status = 0; io_request->RaidContext.exStatus = 0; /* just the cdb len, other flags zero, and ORed-in later for FP */ io_request->IoFlags = csio->cdb_len; if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); io_request->DataLength = cmd->length; if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { - if (cmd->sge_count > MRSAS_MAX_SGL) { + if (cmd->sge_count > sc->max_num_sge) { device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); return (FAIL); } + /* + * numSGE store lower 8 bit of sge_count. numSGEExt store + * higher 8 bit of sge_count + */ io_request->RaidContext.numSGE = cmd->sge_count; + io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8); + } else { device_printf(sc->mrsas_dev, "Data map/load failed.\n"); return (FAIL); } return (0); } /* * mrsas_setup_io: Set up data including Fast Path I/O * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * * This function builds the DCDB inquiry command. It returns 0 if the command * is built successfully, otherwise it returns a 1. */ int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb, u_int32_t device_id, MRSAS_RAID_SCSI_IO_REQUEST * io_request) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); struct IO_REQUEST_INFO io_info; MR_DRV_RAID_MAP_ALL *map_ptr; u_int8_t fp_possible; u_int32_t start_lba_hi, start_lba_lo, ld_block_size; u_int32_t datalength = 0; start_lba_lo = 0; start_lba_hi = 0; fp_possible = 0; /* * READ_6 (0x08) or WRITE_6 (0x0A) cdb */ if (csio->cdb_len == 6) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) | (u_int32_t)csio->cdb_io.cdb_bytes[3]; start_lba_lo &= 0x1FFFFF; } /* * READ_10 (0x28) or WRITE_6 (0x2A) cdb */ else if (csio->cdb_len == 10) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } /* * READ_12 (0xA8) or WRITE_12 (0xAA) cdb */ else if (csio->cdb_len == 12) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | ((u_int32_t)csio->cdb_io.cdb_bytes[9]); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } /* * READ_16 (0x88) or WRITE_16 (0xx8A) cdb */ else if (csio->cdb_len == 16) { datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | ((u_int32_t)csio->cdb_io.cdb_bytes[13]); start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[9]); start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | ((u_int32_t)csio->cdb_io.cdb_bytes[5]); } memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; switch (ccb_h->flags & CAM_DIR_MASK) { case CAM_DIR_IN: io_info.isRead = 1; break; case CAM_DIR_OUT: io_info.isRead = 0; break; case CAM_DIR_NONE: default: mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); break; } map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc); if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) { io_request->RaidContext.regLockFlags = 0; fp_possible = 0; } else { if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr)) fp_possible = io_info.fpOkForIo; } cmd->request_desc->SCSIIO.MSIxIndex = sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; if (fp_possible) { mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, start_lba_lo, ld_block_size); io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.nseg = 0x1; io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); } if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = mrsas_get_updated_dev_handle(sc, &sc->load_balance_info[device_id], &io_info); cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; } else cmd->load_balance = 0; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; } else { /* Not FP IO */ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.nseg = 0x1; } io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = device_id; } return (0); } /* - * mrsas_build_dcdb: Builds an DCDB command + * mrsas_build_ldio_nonrw: Builds an LDIO command * input: Adapter instance soft state * Pointer to command packet * Pointer to CCB * - * This function builds the DCDB inquiry command. It returns 0 if the command - * is built successfully, otherwise it returns a 1. + * This function builds the LDIO command packet. It returns 0 if the command is + * built successfully, otherwise it returns a 1. */ int -mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, - union ccb *ccb, struct cam_sim *sim) +mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, + union ccb *ccb) { struct ccb_hdr *ccb_h = &(ccb->ccb_h); u_int32_t device_id; - MR_DRV_RAID_MAP_ALL *map_ptr; MRSAS_RAID_SCSI_IO_REQUEST *io_request; io_request = cmd->io_request; device_id = ccb_h->target_id; - map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; - /* - * Check if this is RW for system PD or - * it's a NON RW for sys PD and there is NO secure jbod FW support - */ - if (cam_sim_bus(sim) == 1 && - sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) { + /* FW path for LD Non-RW (SCSI management commands) */ + io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; + io_request->DevHandle = device_id; + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->DevHandle = - map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; - io_request->RaidContext.RAIDFlags = - MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << - MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; - cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - cmd->request_desc->SCSIIO.MSIxIndex = - sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; + io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->LUN[1] = ccb_h->target_lun & 0xF; + io_request->DataLength = cmd->length; - if (sc->secure_jbod_support && (mrsas_find_io_type(sim, ccb) == NON_READ_WRITE_SYSPDIO)) { - /* system pd firmware path */ - io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; - cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - } else { - /* system pd fast path */ - io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; - io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; - io_request->RaidContext.regLockFlags = 0; - io_request->RaidContext.regLockRowLBA = 0; - io_request->RaidContext.regLockLength = 0; + if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { + if (cmd->sge_count > sc->max_num_sge) { + device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" + "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); + return (1); + } + /* + * numSGE store lower 8 bit of sge_count. numSGEExt store + * higher 8 bit of sge_count + */ + io_request->RaidContext.numSGE = cmd->sge_count; + io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8); + } else { + device_printf(sc->mrsas_dev, "Data map/load failed.\n"); + return (1); + } + return (0); +} - cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << - MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); +/* + * mrsas_build_syspdio: Builds an DCDB command + * input: Adapter instance soft state + * Pointer to command packet + * Pointer to CCB + * + * This function builds the DCDB inquiry command. It returns 0 if the command + * is built successfully, otherwise it returns a 1. + */ +int +mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, + union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible) +{ + struct ccb_hdr *ccb_h = &(ccb->ccb_h); + u_int32_t device_id; + MR_DRV_RAID_MAP_ALL *local_map_ptr; + MRSAS_RAID_SCSI_IO_REQUEST *io_request; + struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; - /* - * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH - * Because the NON RW cmds will now go via FW Queue - * and not the Exception queue - */ - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) - io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; - } + pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1]; + + io_request = cmd->io_request; + device_id = ccb_h->target_id; + local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; + io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; + io_request->RaidContext.regLockFlags = 0; + io_request->RaidContext.regLockRowLBA = 0; + io_request->RaidContext.regLockLength = 0; + + /* If FW supports PD sequence number */ + if (sc->use_seqnum_jbod_fp && + sc->pd_list[device_id].driveType == 0x00) { + //printf("Using Drv seq num\n"); + io_request->RaidContext.VirtualDiskTgtId = device_id + 255; + io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum; + io_request->DevHandle = pd_sync->seq[device_id].devHandle; + io_request->RaidContext.regLockFlags |= + (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); + io_request->RaidContext.Type = MPI2_TYPE_CUDA; + io_request->RaidContext.nseg = 0x1; + } else if (sc->fast_path_io) { + //printf("Using LD RAID map\n"); + io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.configSeqNum = 0; + local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; + io_request->DevHandle = + local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { - /* FW path for SysPD or LD Non-RW (SCSI management commands) */ + //printf("Using FW PATH\n"); + /* Want to send all IO via FW path */ + io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.configSeqNum = 0; + io_request->DevHandle = 0xFFFF; + } + + cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; + cmd->request_desc->SCSIIO.MSIxIndex = + sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; + + if (!fp_possible) { + /* system pd firmware path */ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + io_request->RaidContext.timeoutValue = + local_map_ptr->raidMap.fpPdIoTimeoutSec; + io_request->RaidContext.VirtualDiskTgtId = device_id; + } else { + /* system pd fast path */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; + + /* + * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH + * Because the NON RW cmds will now go via FW Queue + * and not the Exception queue + */ + io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } - io_request->RaidContext.VirtualDiskTgtId = device_id; io_request->LUN[1] = ccb_h->target_lun & 0xF; io_request->DataLength = cmd->length; if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { if (cmd->sge_count > sc->max_num_sge) { device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); return (1); } + /* + * numSGE store lower 8 bit of sge_count. numSGEExt store + * higher 8 bit of sge_count + */ io_request->RaidContext.numSGE = cmd->sge_count; + io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8); } else { device_printf(sc->mrsas_dev, "Data map/load failed.\n"); return (1); } return (0); } /* * mrsas_map_request: Map and load data * input: Adapter instance soft state * Pointer to command packet * * For data from OS, map and load the data buffer into bus space. The SG list * is built in the callback. If the bus dmamap load is not successful, * cmd->error_code will contain the error code and a 1 is returned. */ int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, union ccb *ccb) { u_int32_t retcode = 0; struct cam_sim *sim; sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); if (cmd->data != NULL) { /* Map data buffer into bus space */ mtx_lock(&sc->io_lock); #if (__FreeBSD_version >= 902001) retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb, mrsas_data_load_cb, cmd, 0); #else retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data, cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT); #endif mtx_unlock(&sc->io_lock); if (retcode) device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); if (retcode == EINPROGRESS) { device_printf(sc->mrsas_dev, "request load in progress\n"); mrsas_freeze_simq(cmd, sim); } } if (cmd->error_code) return (1); return (retcode); } /* * mrsas_unmap_request: Unmap and unload data * input: Adapter instance soft state * Pointer to command packet * * This function unmaps and unloads data from OS. */ void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) { if (cmd->data != NULL) { if (cmd->flags & MRSAS_DIR_IN) bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); if (cmd->flags & MRSAS_DIR_OUT) bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); mtx_lock(&sc->io_lock); bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); mtx_unlock(&sc->io_lock); } } /* * mrsas_data_load_cb: Callback entry point * input: Pointer to command packet as argument * Pointer to segment * Number of segments Error * * This is the callback function of the bus dma map load. It builds the SG * list. */ static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; struct mrsas_softc *sc = cmd->sc; MRSAS_RAID_SCSI_IO_REQUEST *io_request; pMpi25IeeeSgeChain64_t sgl_ptr; int i = 0, sg_processed = 0; if (error) { cmd->error_code = error; device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error); if (error == EFBIG) { cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; return; } } if (cmd->flags & MRSAS_DIR_IN) bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_PREREAD); if (cmd->flags & MRSAS_DIR_OUT) bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_PREWRITE); if (nseg > sc->max_num_sge) { device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n"); return; } io_request = cmd->io_request; sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } if (nseg != 0) { for (i = 0; i < nseg; i++) { sgl_ptr->Address = segs[i].ds_addr; sgl_ptr->Length = segs[i].ds_len; sgl_ptr->Flags = 0; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { if (i == nseg - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; } sgl_ptr++; sg_processed = i + 1; if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && (nseg > sc->max_sge_in_main_msg)) { pMpi25IeeeSgeChain64_t sg_chain; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) { + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) { if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = sc->chain_offset_io_request; else cmd->io_request->ChainOffset = 0; } else cmd->io_request->ChainOffset = sc->chain_offset_io_request; sg_chain = sgl_ptr; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); sg_chain->Address = cmd->chain_frame_phys_addr; sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; } } } cmd->sge_count = nseg; } /* * mrsas_freeze_simq: Freeze SIM queue * input: Pointer to command packet * Pointer to SIM * * This function freezes the sim queue. */ static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) { union ccb *ccb = (union ccb *)(cmd->ccb_ptr); xpt_freeze_simq(sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } void mrsas_xpt_freeze(struct mrsas_softc *sc) { xpt_freeze_simq(sc->sim_0, 1); xpt_freeze_simq(sc->sim_1, 1); } void mrsas_xpt_release(struct mrsas_softc *sc) { xpt_release_simq(sc->sim_0, 1); xpt_release_simq(sc->sim_1, 1); } /* * mrsas_cmd_done: Perform remaining command completion * input: Adapter instance soft state Pointer to command packet * * This function calls ummap request and releases the MPT command. */ void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) { callout_stop(&cmd->cm_callout); mrsas_unmap_request(sc, cmd); mtx_lock(&sc->sim_lock); xpt_done(cmd->ccb_ptr); cmd->ccb_ptr = NULL; mtx_unlock(&sc->sim_lock); mrsas_release_mpt_cmd(cmd); } /* * mrsas_cam_poll: Polling entry point * input: Pointer to SIM * * This is currently a stub function. */ static void mrsas_cam_poll(struct cam_sim *sim) { + int i; struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); - mrsas_isr((void *)sc); + if (sc->msix_vectors != 0){ + for (i=0; imsix_vectors; i++){ + mrsas_complete_cmd(sc, i); + } + } else { + mrsas_complete_cmd(sc, 0); + } } /* * mrsas_bus_scan: Perform bus scan * input: Adapter instance soft state * * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not * be called in FreeBSD 8.x and later versions, where the bus scan is * automatic. */ int mrsas_bus_scan(struct mrsas_softc *sc) { union ccb *ccb_0; union ccb *ccb_1; if ((ccb_0 = xpt_alloc_ccb()) == NULL) { return (ENOMEM); } if ((ccb_1 = xpt_alloc_ccb()) == NULL) { xpt_free_ccb(ccb_0); return (ENOMEM); } mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); mtx_unlock(&sc->sim_lock); return (EIO); } if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb_0); xpt_free_ccb(ccb_1); mtx_unlock(&sc->sim_lock); return (EIO); } mtx_unlock(&sc->sim_lock); xpt_rescan(ccb_0); xpt_rescan(ccb_1); return (0); } /* * mrsas_bus_scan_sim: Perform bus scan per SIM * input: adapter instance soft state * * This function will be called from Event handler on LD creation/deletion, * JBOD on/off. */ int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) { return (ENOMEM); } mtx_lock(&sc->sim_lock); if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); mtx_unlock(&sc->sim_lock); return (EIO); } mtx_unlock(&sc->sim_lock); xpt_rescan(ccb); return (0); } Index: stable/10/sys/dev/mrsas/mrsas_fp.c =================================================================== --- stable/10/sys/dev/mrsas/mrsas_fp.c (revision 300735) +++ stable/10/sys/dev/mrsas/mrsas_fp.c (revision 300736) @@ -1,1554 +1,1557 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include /* * Function prototypes */ u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc); u_int8_t mrsas_get_best_arm_pd(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); u_int16_t mrsas_get_updated_dev_handle(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor); u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error); u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor); void mrsas_update_load_balance_params(struct mrsas_softc *sc, MR_DRV_RAID_MAP_ALL * map, PLD_LOAD_BALANCE_INFO lbInfo); void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, u_int32_t ld_block_size); static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map); static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map); static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map); static MR_LD_SPAN * MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map); static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map); static MR_SPAN_BLOCK_INFO * MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); void MR_PopulateDrvRaidMap(struct mrsas_softc *sc); /* * Spanset related function prototypes Added for PRL11 configuration (Uneven * span support) */ void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo); static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map); static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map); static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error); static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map); /* * Spanset related defines Added for PRL11 configuration(Uneven span support) */ #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize #define SPAN_ROW_DATA_SIZE(map_, ld, index_) \ MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize #define SPAN_INVALID 0xff #define SPAN_DEBUG 0 /* * Related Defines */ typedef u_int64_t REGION_KEY; typedef u_int32_t REGION_LEN; #define MR_LD_STATE_OPTIMAL 3 #define FALSE 0 #define TRUE 1 #define LB_PENDING_CMDS_DEFAULT 4 /* * Related Macros */ #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) ) #define swap32(x) \ ((unsigned int)( \ (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) /* * In-line functions for mod and divide of 64-bit dividend and 32-bit * divisor. Assumes a check for a divisor of zero is not possible. * * @param dividend: Dividend * @param divisor: Divisor * @return remainder */ #define mega_mod64(dividend, divisor) ({ \ int remainder; \ remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \ remainder;}) #define mega_div64_32(dividend, divisor) ({ \ int quotient; \ quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \ quotient;}) /* * Various RAID map access functions. These functions access the various * parts of the RAID map and returns the appropriate parameters. */ MR_LD_RAID * MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) { return (&map->raidMap.ldSpanMap[ld].ldRaid); } u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) { return (map->raidMap.ldSpanMap[ld].ldRaid.targetId); } static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; } static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; } static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.arMapInfo[ar].pd[arm]; } static MR_LD_SPAN * MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_DRV_RAID_MAP_ALL * map) { return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; } static MR_SPAN_BLOCK_INFO * MR_LdSpanInfoGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map) { return &map->raidMap.ldSpanMap[ld].spanBlock[0]; } u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid; u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE; ld = MR_TargetIdToLdGet(ldTgtId, map); /* * Check if logical drive was removed. */ if (ld >= MAX_LOGICAL_DRIVES) return ldBlockSize; raid = MR_LdRaidGet(ld, map); ldBlockSize = raid->logicalBlockLength; if (!ldBlockSize) ldBlockSize = MRSAS_SCSIBLOCKSIZE; return ldBlockSize; } /* * This function will Populate Driver Map using firmware raid map */ void MR_PopulateDrvRaidMap(struct mrsas_softc *sc) { MR_FW_RAID_MAP_ALL *fw_map_old = NULL; MR_FW_RAID_MAP *pFwRaidMap = NULL; unsigned int i; MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; if (sc->max256vdSupport) { memcpy(sc->ld_drv_map[sc->map_id & 1], sc->raidmap_mem[sc->map_id & 1], sc->current_map_sz); /* * New Raid map will not set totalSize, so keep expected * value for legacy code in ValidateMapInfo */ pDrvRaidMap->totalSize = sizeof(MR_FW_RAID_MAP_EXT); } else { fw_map_old = (MR_FW_RAID_MAP_ALL *) sc->raidmap_mem[(sc->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; #if VD_EXT_DEBUG for (i = 0; i < pFwRaidMap->ldCount; i++) { device_printf(sc->mrsas_dev, "Index 0x%x Target Id 0x%x Seq Num 0x%x Size 0/%lx\n", i, fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); } #endif memset(drv_map, 0, sc->drv_map_sz); pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = pFwRaidMap->ldCount; pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) { pDrvRaidMap->ldTgtIdToLd[i] = (u_int8_t)pFwRaidMap->ldTgtIdToLd[i]; } for (i = 0; i < pDrvRaidMap->ldCount; i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; #if VD_EXT_DEBUG device_printf(sc->mrsas_dev, "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, pFwRaidMap->ldSpanMap[i].ldRaid.targetId, pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, (u_int32_t)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); device_printf(sc->mrsas_dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x" "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n", i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, (u_int32_t)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); device_printf(sc->mrsas_dev, "drv raid map all %p raid map %p LD RAID MAP %p/%p\n", drv_map, pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid, &pDrvRaidMap->ldSpanMap[i].ldRaid); #endif } memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, sizeof(MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo, sizeof(MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } } /* * MR_ValidateMapInfo: Validate RAID map * input: Adapter instance soft state * * This function checks and validates the loaded RAID map. It returns 0 if * successful, and 1 otherwise. */ u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc) { if (!sc) { return 1; } MR_PopulateDrvRaidMap(sc); MR_DRV_RAID_MAP_ALL *drv_map = sc->ld_drv_map[(sc->map_id & 1)]; MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; u_int32_t expected_map_size; drv_map = sc->ld_drv_map[(sc->map_id & 1)]; pDrvRaidMap = &drv_map->raidMap; PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) & sc->log_to_span; if (sc->max256vdSupport) expected_map_size = sizeof(MR_FW_RAID_MAP_EXT); else expected_map_size = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP)) + (sizeof(MR_LD_SPAN_MAP) * pDrvRaidMap->ldCount); if (pDrvRaidMap->totalSize != expected_map_size) { device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", expected_map_size); device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP)); device_printf(sc->mrsas_dev, "pDrvRaidMap->totalSize=%x\n", pDrvRaidMap->totalSize); return 1; } if (sc->UnevenSpanSupport) { mr_update_span_set(drv_map, ldSpanInfo); } mrsas_update_load_balance_params(sc, drv_map, sc->load_balance_info); return 0; } /* * * Function to print info about span set created in driver from FW raid map * * Inputs: map * ldSpanInfo: ld map span info per HBA instance * * */ #if SPAN_DEBUG static int getSpanInfo(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) { u_int8_t span; u_int32_t element; MR_LD_RAID *raid; LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; int ldCount; u_int16_t ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) { continue; } raid = MR_LdRaidGet(ld, map); printf("LD %x: span_depth=%x\n", ld, raid->spanDepth); for (span = 0; span < raid->spanDepth; span++) printf("Span=%x, number of quads=%x\n", span, map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements); for (element = 0; element < MAX_QUAD_DEPTH; element++) { span_set = &(ldSpanInfo[ld].span_set[element]); if (span_set->span_row_data_width == 0) break; printf("Span Set %x: width=%x, diff=%x\n", element, (unsigned int)span_set->span_row_data_width, (unsigned int)span_set->diff); printf("logical LBA start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->log_start_lba, (long unsigned int)span_set->log_end_lba); printf("span row start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->span_row_start, (long unsigned int)span_set->span_row_end); printf("data row start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->data_row_start, (long unsigned int)span_set->data_row_end); printf("data strip start=0x%08lx, end=0x%08lx\n", (long unsigned int)span_set->data_strip_start, (long unsigned int)span_set->data_strip_end); for (span = 0; span < raid->spanDepth; span++) { if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= element + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; printf("Span=%x, Quad=%x, diff=%x\n", span, element, quad->diff); printf("offset_in_span=0x%08lx\n", (long unsigned int)quad->offsetInSpan); printf("logical start=0x%08lx, end=0x%08lx\n", (long unsigned int)quad->logStart, (long unsigned int)quad->logEnd); } } } } return 0; } #endif /* * * This routine calculates the Span block for given row using spanset. * * Inputs : HBA instance * ld: Logical drive number * row: Row number * map: LD map * * Outputs : span - Span number block * - Absolute Block number in the physical disk * div_error - Devide error code. */ u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; u_int32_t span, info; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; if (quad->diff == 0) { *div_error = 1; return span; } if (quad->logStart <= row && row <= quad->logEnd && (mega_mod64(row - quad->logStart, quad->diff)) == 0) { if (span_blk != NULL) { u_int64_t blk; blk = mega_div64_32 ((row - quad->logStart), quad->diff); blk = (blk + quad->offsetInSpan) << raid->stripeShift; *span_blk = blk; } return span; } } } return SPAN_INVALID; } /* * * This routine calculates the row for given strip using spanset. * * Inputs : HBA instance * ld: Logical drive number * Strip: Strip * map: LD map * * Outputs : row - row associated with strip */ static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t info, strip_offset, span, span_offset; u_int64_t span_set_Strip, span_set_Row; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; span_set_Strip = strip - span_set->data_strip_start; strip_offset = mega_mod64(span_set_Strip, span_set->span_row_data_width); span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; else break; } mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx " "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip, (unsigned long long)span_set_Strip, (unsigned long long)span_set_Row, (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset); mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip, (unsigned long long)span_set->data_row_start + (unsigned long long)span_set_Row + (span_offset - 1)); return (span_set->data_row_start + span_set_Row + (span_offset - 1)); } return -1LLU; } /* * * This routine calculates the Start Strip for given row using spanset. * * Inputs: HBA instance * ld: Logical drive number * row: Row number * map: LD map * * Outputs : Strip - Start strip associated with row */ static u_int64_t get_strip_from_row(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; MR_QUAD_ELEMENT *quad; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t span, info; u_int64_t strip; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (row > span_set->data_row_end) continue; for (span = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; if (quad->logStart <= row && row <= quad->logEnd && mega_mod64((row - quad->logStart), quad->diff) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - quad->logStart), quad->diff); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; return strip; } } } mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug - get_strip_from_row: returns invalid " "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row); return -1; } /* * ***************************************************************************** * * * This routine calculates the Physical Arm for given strip using spanset. * * Inputs : HBA instance * Logical drive number * Strip * LD map * * Outputs : Phys Arm - Phys Arm associated with strip */ static u_int32_t get_arm_from_strip(struct mrsas_softc *sc, u_int32_t ld, u_int64_t strip, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); LD_SPAN_SET *span_set; PLD_SPAN_INFO ldSpanInfo = sc->log_to_span; u_int32_t info, strip_offset, span, span_offset; for (info = 0; info < MAX_QUAD_DEPTH; info++) { span_set = &(ldSpanInfo[ld].span_set[info]); if (span_set->span_row_data_width == 0) break; if (strip > span_set->data_strip_end) continue; strip_offset = (u_int32_t)mega_mod64 ((strip - span_set->data_strip_start), span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements >= info + 1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = span_set->strip_offset[span]; else break; } mrsas_dprint(sc, MRSAS_PRL11, "AVAGO PRL11: get_arm_from_strip: " "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, (long unsigned int)strip, (strip_offset - span_offset)); return (strip_offset - span_offset); } mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: - get_arm_from_strip: returns invalid arm" " for ld=%x strip=%lx\n", ld, (long unsigned int)strip); return -1; } /* This Function will return Phys arm */ u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); /* Need to check correct default value */ u_int32_t arm = 0; switch (raid->level) { case 0: case 5: case 6: arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); break; case 1: /* start with logical arm */ arm = get_arm_from_strip(sc, ld, stripe, map); arm *= 2; break; } return arm; } /* * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe using spanset * * Inputs : * sc - HBA instance * ld - Logical drive number * stripRow: Stripe number * stripRef: Reference in stripe * * Outputs : span - Span number block - Absolute Block * number in the physical disk */ static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u_int32_t pd, arRef; u_int8_t physArm, span; u_int64_t row; u_int8_t retval = TRUE; u_int64_t *pdBlock = &io_info->pdBlock; u_int16_t *pDevHandle = &io_info->devHandle; u_int32_t logArm, rowMod, armQ, arm; u_int8_t do_invader = 0; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) do_invader = 1; /* Get row and span from io_info for Uneven Span IO. */ row = io_info->start_row; span = io_info->start_span; if (raid->level == 6) { logArm = get_arm_from_strip(sc, ld, stripRow, map); rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; arm = armQ + 1 + logArm; if (arm >= SPAN_ROW_SIZE(map, ld, span)) arm -= SPAN_ROW_SIZE(map, ld, span); physArm = (u_int8_t)arm; } else /* Calculate the arm */ physArm = get_arm(sc, ld, span, stripRow, map); arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; if ((raid->level >= 5) && ((!do_invader) || (do_invader && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { pd = MR_ArPdGet(arRef, physArm + 1, map); if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->spanArm; return retval; } /* * MR_BuildRaidContext: Set up Fast path RAID context * * This function will initiate command processing. The start/end row and strip * information is calculated then the lock is acquired. This function will * return 0 if region lock was acquired OR return num strips. */ u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid; u_int32_t ld, stripSize, stripe_mask; u_int64_t endLba, endStrip, endRow, start_row, start_strip; REGION_KEY regStart; REGION_LEN regSize; u_int8_t num_strips, numRows; u_int16_t ref_in_start_stripe, ref_in_end_stripe; u_int64_t ldStartBlock; u_int32_t numBlocks, ldTgtId; u_int8_t isRead, stripIdx; u_int8_t retval = 0; u_int8_t startlba_span = SPAN_INVALID; u_int64_t *pdBlock = &io_info->pdBlock; int error_code = 0; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; io_info->IoforUnevenSpan = 0; io_info->start_span = SPAN_INVALID; ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); if (raid->rowDataSize == 0) { if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) return FALSE; else if (sc->UnevenSpanSupport) { io_info->IoforUnevenSpan = 1; } else { mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x," " but there is _NO_ UnevenSpanSupport\n", MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); return FALSE; } } stripSize = 1 << raid->stripeShift; stripe_mask = stripSize - 1; /* * calculate starting row and stripe, and number of strips and rows */ start_strip = ldStartBlock >> raid->stripeShift; ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask); endLba = ldStartBlock + numBlocks - 1; ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (u_int8_t)(endStrip - start_strip + 1); /* End strip */ if (io_info->IoforUnevenSpan) { start_row = get_row_from_strip(sc, ld, start_strip, map); endRow = get_row_from_strip(sc, ld, endStrip, map); if (raid->spanDepth == 1) { startlba_span = 0; *pdBlock = start_row << raid->stripeShift; } else { startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row, pdBlock, map, &error_code); if (error_code == 1) { mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d. Send IO w/o region lock.\n", __func__, __LINE__); return FALSE; } } if (startlba_span == SPAN_INVALID) { mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: return from %s %d for row 0x%llx," "start strip %llx endSrip %llx\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip); return FALSE; } io_info->start_span = startlba_span; io_info->start_row = start_row; mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug: Check Span number from %s %d for row 0x%llx, " " start strip 0x%llx endSrip 0x%llx span 0x%x\n", __func__, __LINE__, (unsigned long long)start_row, (unsigned long long)start_strip, (unsigned long long)endStrip, startlba_span); mrsas_dprint(sc, MRSAS_PRL11, "AVAGO Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n", (unsigned long long)start_row, (unsigned long long)endRow, startlba_span); } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); } numRows = (u_int8_t)(endRow - start_row + 1); /* get the row count */ /* * Calculate region info. (Assume region at start of first row, and * assume this IO needs the full row - will adjust if not true.) */ regStart = start_row << raid->stripeShift; regSize = stripSize; /* Check if we can send this I/O via FastPath */ if (raid->capability.fpCapable) { if (isRead) io_info->fpOkForIo = (raid->capability.fpReadCapable && ((num_strips == 1) || raid->capability.fpReadAcrossStripe)); else io_info->fpOkForIo = (raid->capability.fpWriteCapable && ((num_strips == 1) || raid->capability.fpWriteAcrossStripe)); } else io_info->fpOkForIo = FALSE; if (numRows == 1) { if (num_strips == 1) { regStart += ref_in_start_stripe; regSize = numBlocks; } } else if (io_info->IoforUnevenSpan == 0) { /* * For Even span region lock optimization. If the start strip * is the last in the start row */ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { regStart += ref_in_start_stripe; /* * initialize count to sectors from startRef to end * of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows - 2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == endRow * raid->rowDataSize) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } else { if (start_strip == (get_strip_from_row(sc, ld, start_row, map) + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { regStart += ref_in_start_stripe; /* * initialize count to sectors from startRef to end * of strip */ regSize = stripSize - ref_in_start_stripe; } /* add complete rows in the middle of the transfer */ if (numRows > 2) regSize += (numRows - 2) << raid->stripeShift; /* if IO ends within first strip of last row */ if (endStrip == get_strip_from_row(sc, ld, endRow, map)) regSize += ref_in_end_stripe + 1; else regSize += stripSize; } pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; pRAID_Context->regLockRowLBA = regStart; pRAID_Context->regLockLength = regSize; pRAID_Context->configSeqNum = raid->seqNum; /* * Get Phy Params only if FP capable, or else leave it to MR firmware * to do the calculation. */ if (io_info->fpOkForIo) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(sc, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(sc, ld, start_strip, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible */ if (io_info->devHandle == MR_PD_INVALID) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { retval = io_info->IoforUnevenSpan ? mr_spanset_get_phy_params(sc, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map) : MR_GetPhyParams(sc, ld, start_strip + stripIdx, ref_in_start_stripe, io_info, pRAID_Context, map); if (!retval) return TRUE; } } #if SPAN_DEBUG /* Just for testing what arm we get for strip. */ get_arm_from_strip(sc, ld, start_strip, map); #endif return TRUE; } /* * * This routine pepare spanset info from Valid Raid map and store it into local * copy of ldSpanInfo per instance data structure. * * Inputs : LD map * ldSpanInfo per HBA instance * */ void mr_update_span_set(MR_DRV_RAID_MAP_ALL * map, PLD_SPAN_INFO ldSpanInfo) { u_int8_t span, count; u_int32_t element, span_row_width; u_int64_t span_row; MR_LD_RAID *raid; LD_SPAN_SET *span_set, *span_set_prev; MR_QUAD_ELEMENT *quad; int ldCount; u_int16_t ld; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, map); if (ld >= MAX_LOGICAL_DRIVES) continue; raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { if (map->raidMap.ldSpanMap[ld].spanBlock[span]. block_span_info.noElements < element + 1) continue; /* TO-DO */ span_set = &(ldSpanInfo[ld].span_set[element]); quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[element]; span_set->diff = quad->diff; for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { if (map->raidMap.ldSpanMap[ld].spanBlock[count]. block_span_info.noElements >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize; #if SPAN_DEBUG printf("AVAGO Debug span %x rowDataSize %x\n", count, MR_LdSpanPtrGet(ld, count, map)->spanRowDataSize); #endif } } span_set->span_row_data_width = span_row_width; span_row = mega_div64_32(((quad->logEnd - quad->logStart) + quad->diff), quad->diff); if (element == 0) { span_set->log_start_lba = 0; span_set->log_end_lba = ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = 0; span_set->span_row_end = span_row - 1; span_set->data_strip_start = 0; span_set->data_strip_end = (span_row * span_row_width) - 1; span_set->data_row_start = 0; span_set->data_row_end = (span_row * quad->diff) - 1; } else { span_set_prev = &(ldSpanInfo[ld].span_set[element - 1]); span_set->log_start_lba = span_set_prev->log_end_lba + 1; span_set->log_end_lba = span_set->log_start_lba + ((span_row << raid->stripeShift) * span_row_width) - 1; span_set->span_row_start = span_set_prev->span_row_end + 1; span_set->span_row_end = span_set->span_row_start + span_row - 1; span_set->data_strip_start = span_set_prev->data_strip_end + 1; span_set->data_strip_end = span_set->data_strip_start + (span_row * span_row_width) - 1; span_set->data_row_start = span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + (span_row * quad->diff) - 1; } break; } if (span == raid->spanDepth) break; /* no quads remain */ } } #if SPAN_DEBUG getSpanInfo(map, ldSpanInfo); /* to get span set info */ #endif } /* * mrsas_update_load_balance_params: Update load balance parmas * Inputs: * sc - driver softc instance * drv_map - driver RAID map * lbInfo - Load balance info * * This function updates the load balance parameters for the LD config of a two * drive optimal RAID-1. */ void mrsas_update_load_balance_params(struct mrsas_softc *sc, MR_DRV_RAID_MAP_ALL * drv_map, PLD_LOAD_BALANCE_INFO lbInfo) { int ldCount; u_int16_t ld; MR_LD_RAID *raid; if (sc->lb_pending_cmds > 128 || sc->lb_pending_cmds < 1) sc->lb_pending_cmds = LB_PENDING_CMDS_DEFAULT; for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { ld = MR_TargetIdToLdGet(ldCount, drv_map); if (ld >= MAX_LOGICAL_DRIVES_EXT) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } raid = MR_LdRaidGet(ld, drv_map); if ((raid->level != 1) || (raid->ldState != MR_LD_STATE_OPTIMAL)) { lbInfo[ldCount].loadBalanceFlag = 0; continue; } lbInfo[ldCount].loadBalanceFlag = 1; } } /* * mrsas_set_pd_lba: Sets PD LBA * input: io_request pointer * CDB length * io_info pointer * Pointer to CCB * Local RAID map pointer * Start block of IO Block Size * * Used to set the PD logical block address in CDB for FP IOs. */ void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, u_int32_t ld_block_size) { MR_LD_RAID *raid; u_int32_t ld; u_int64_t start_blk = io_info->pdBlock; u_int8_t *cdb = io_request->CDB.CDB32; u_int32_t num_blocks = io_info->numBlocks; u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0; struct ccb_hdr *ccb_h = &(ccb->ccb_h); /* Check if T10 PI (DIF) is enabled for this LD */ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD; cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN; if (ccb_h->flags == CAM_DIR_OUT) cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32; else cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32; cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL; /* LBA */ cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff); cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff); cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff); cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff); cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff); cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff); cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff); cdb[19] = (u_int8_t)(start_blk & 0xff); /* Logical block reference tag */ io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; io_request->IoFlags = 32; /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff); cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff); cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff); cdb[31] = (u_int8_t)(num_blocks & 0xff); /* set SCSI IO EEDP Flags */ if (ccb_h->flags == CAM_DIR_OUT) { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; } else { io_request->EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; } io_request->Control |= (0x4 << 26); io_request->EEDPBlockSize = ld_block_size; } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && (start_blk <= 0xffffffff)) { if (cdb_len == 16) { opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[14]; control = cdb[15]; } else { opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[6] = groupnum; cdb[9] = control; /* Transfer length */ cdb[8] = (u_int8_t)(num_blocks & 0xff); cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); io_request->IoFlags = 10; /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ switch (cdb_len) { case 6: opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; control = cdb[5]; break; case 10: opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[6]; control = cdb[9]; break; case 12: opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16; flagvals = cdb[1]; groupnum = cdb[10]; control = cdb[11]; break; } memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[1] = flagvals; cdb[14] = groupnum; cdb[15] = control; /* Transfer length */ cdb[13] = (u_int8_t)(num_blocks & 0xff); cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff); cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff); cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff); io_request->IoFlags = 16; /* Specify 16-byte cdb */ cdb_len = 16; } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) { /* convert to 10 byte CDB */ opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10; control = cdb[5]; memset(cdb, 0, sizeof(io_request->CDB.CDB32)); cdb[0] = opcode; cdb[9] = control; /* Set transfer length */ cdb[8] = (u_int8_t)(num_blocks & 0xff); cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff); /* Specify 10-byte cdb */ cdb_len = 10; } /* Fall through normal case, just load LBA here */ u_int8_t val = cdb[1] & 0xE0; switch (cdb_len) { case 6: cdb[3] = (u_int8_t)(start_blk & 0xff); cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff); cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f); break; case 10: cdb[5] = (u_int8_t)(start_blk & 0xff); cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); break; - case 12: - cdb[5] = (u_int8_t)(start_blk & 0xff); - cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff); - cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff); - cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff); - break; case 16: cdb[9] = (u_int8_t)(start_blk & 0xff); cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff); cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff); cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff); cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff); cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff); cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff); cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff); break; } } } /* * mrsas_get_best_arm_pd: Determine the best spindle arm * Inputs: * sc - HBA instance * lbInfo - Load balance info * io_info - IO request info * * This function determines and returns the best arm by looking at the * parameters of the last PD access. */ u_int8_t mrsas_get_best_arm_pd(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) { MR_LD_RAID *raid; MR_DRV_RAID_MAP_ALL *drv_map; u_int16_t pend0, pend1, ld; u_int64_t diff0, diff1; u_int8_t bestArm, pd0, pd1, span, arm; u_int32_t arRef, span_row_size; u_int64_t block = io_info->ldStartBlock; u_int32_t count = io_info->numBlocks; span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK) >> RAID_CTX_SPANARM_SPAN_SHIFT); arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); drv_map = sc->ld_drv_map[(sc->map_id & 1)]; ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); raid = MR_LdRaidGet(ld, drv_map); span_row_size = sc->UnevenSpanSupport ? SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize; arRef = MR_LdSpanArrayGet(ld, span, drv_map); pd0 = MR_ArPdGet(arRef, arm, drv_map); pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? (arm + 1 - span_row_size) : arm + 1, drv_map); /* get the pending cmds for the data and mirror arms */ pend0 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd0]); pend1 = mrsas_atomic_read(&lbInfo->scsi_pending_cmds[pd1]); /* Determine the disk whose head is nearer to the req. block */ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); bestArm = (diff0 <= diff1 ? arm : arm ^ 1); if ((bestArm == arm && pend0 > pend1 + sc->lb_pending_cmds) || (bestArm != arm && pend1 > pend0 + sc->lb_pending_cmds)) bestArm ^= 1; /* Update the last accessed block on the correct pd */ lbInfo->last_accessed_block[bestArm == arm ? pd0 : pd1] = block + count - 1; io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; #if SPAN_DEBUG if (arm != bestArm) printf("AVAGO Debug R1 Load balance occur - span 0x%x arm 0x%x bestArm 0x%x " "io_info->span_arm 0x%x\n", span, arm, bestArm, io_info->span_arm); #endif return io_info->pd_after_lb; } /* * mrsas_get_updated_dev_handle: Get the update dev handle * Inputs: * sc - Adapter instance soft state * lbInfo - Load balance info * io_info - io_info pointer * * This function determines and returns the updated dev handle. */ u_int16_t mrsas_get_updated_dev_handle(struct mrsas_softc *sc, PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info) { u_int8_t arm_pd; u_int16_t devHandle; MR_DRV_RAID_MAP_ALL *drv_map; drv_map = sc->ld_drv_map[(sc->map_id & 1)]; /* get best new arm */ arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info); devHandle = MR_PdDevHandleGet(arm_pd, drv_map); mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); return devHandle; } /* * MR_GetPhyParams: Calculates arm, span, and block * Inputs: Adapter soft state * Logical drive number (LD) * Stripe number(stripRow) * Reference in stripe (stripRef) * * Outputs: Absolute Block number in the physical disk * * This routine calculates the arm, span and block for the specified stripe and * reference in stripe. */ u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, MR_DRV_RAID_MAP_ALL * map) { MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u_int32_t pd, arRef; u_int8_t physArm, span; u_int64_t row; u_int8_t retval = TRUE; int error_code = 0; u_int64_t *pdBlock = &io_info->pdBlock; u_int16_t *pDevHandle = &io_info->devHandle; u_int32_t rowMod, armQ, arm, logArm; u_int8_t do_invader = 0; - if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) + if ((sc->device_id == MRSAS_INVADER) || + (sc->device_id == MRSAS_FURY) || + (sc->device_id == MRSAS_INTRUDER) || + (sc->device_id == MRSAS_INTRUDER_24)) do_invader = 1; row = mega_div64_32(stripRow, raid->rowDataSize); if (raid->level == 6) { /* logical arm within row */ logArm = mega_mod64(stripRow, raid->rowDataSize); if (raid->rowSize == 0) return FALSE; rowMod = mega_mod64(row, raid->rowSize); /* get logical row mod */ armQ = raid->rowSize - 1 - rowMod; /* index of Q drive */ arm = armQ + 1 + logArm;/* data always logically follows Q */ if (arm >= raid->rowSize) /* handle wrap condition */ arm -= raid->rowSize; physArm = (u_int8_t)arm; } else { if (raid->modFactor == 0) return FALSE; physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map); } if (raid->spanDepth == 1) { span = 0; *pdBlock = row << raid->stripeShift; } else { span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); if (error_code == 1) return FALSE; } /* Get the array on which this span is present */ arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); /* Get the Pd. */ if (pd != MR_PD_INVALID) /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ if ((raid->level >= 5) && ((!do_invader) || (do_invader && raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get Alternate Pd. */ pd = MR_ArPdGet(arRef, physArm + 1, map); if (pd != MR_PD_INVALID) /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); } } *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = pRAID_Context->spanArm; return retval; } /* * MR_GetSpanBlock: Calculates span block * Inputs: LD * row PD * span block * RAID map pointer * * Outputs: Span number Error code * * This routine calculates the span from the span block info. */ u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk, MR_DRV_RAID_MAP_ALL * map, int *div_error) { MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); MR_QUAD_ELEMENT *quad; MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u_int32_t span, j; u_int64_t blk, debugBlk; for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { quad = &pSpanBlock->block_span_info.quad[j]; if (quad->diff == 0) { *div_error = 1; return span; } if (quad->logStart <= row && row <= quad->logEnd && (mega_mod64(row - quad->logStart, quad->diff)) == 0) { if (span_blk != NULL) { blk = mega_div64_32((row - quad->logStart), quad->diff); debugBlk = blk; blk = (blk + quad->offsetInSpan) << raid->stripeShift; *span_blk = blk; } return span; } } } return span; } Index: stable/10/sys/dev/mrsas/mrsas_ioctl.c =================================================================== --- stable/10/sys/dev/mrsas/mrsas_ioctl.c (revision 300735) +++ stable/10/sys/dev/mrsas/mrsas_ioctl.c (revision 300736) @@ -1,507 +1,530 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include /* * Function prototypes */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); static int mrsas_create_frame_pool(struct mrsas_softc *sc); static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); extern struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); /* * mrsas_passthru: Handle pass-through commands * input: Adapter instance soft state argument pointer * * This function is called from mrsas_ioctl() to handle pass-through and ioctl * commands to Firmware. */ int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd) { struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; #ifdef COMPAT_FREEBSD32 struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg; #endif union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw); struct mrsas_mfi_cmd *cmd = NULL; bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE]; bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE]; void *ioctl_data_mem[MAX_IOCTL_SGE]; bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; bus_dma_tag_t ioctl_sense_tag = 0; bus_dmamap_t ioctl_sense_dmamap = 0; void *ioctl_sense_mem = 0; bus_addr_t ioctl_sense_phys_addr = 0; int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0; struct mrsas_sge32 *kern_sge32; unsigned long *sense_ptr; uint8_t *iov_base_ptrin = NULL; size_t iov_len = 0; /* * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In * this case do nothing and return 0 to it as status. */ if (in_cmd->dcmd.opcode == 0) { device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__); user_ioc->frame.hdr.cmd_status = MFI_STAT_OK; return (0); } /* Validate SGL length */ if (user_ioc->sge_count > MAX_IOCTL_SGE) { device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", __func__, user_ioc->sge_count); return (ENOENT); } /* Get a command */ cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n"); return (ENOMEM); } /* * User's IOCTL packet has 2 frames (maximum). Copy those two frames * into our cmd's frames. cmd->frame's context will get overwritten * when we copy from user's frames. So set that value alone * separately */ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses MFI * frames. E.g, RAID configuration changes, LD property changes etc * are accomplishes through different kinds of MFI frames. The driver * needs to care only about substituting user buffers with kernel * buffers in SGLs. The location of SGL is embedded in the struct * iocpacket itself. */ kern_sge32 = (struct mrsas_sge32 *) ((unsigned long)cmd->frame + user_ioc->sgl_off); + memset(ioctl_data_tag, 0, (sizeof(bus_dma_tag_t) * MAX_IOCTL_SGE)); + memset(ioctl_data_dmamap, 0, (sizeof(bus_dmamap_t) * MAX_IOCTL_SGE)); + memset(ioctl_data_mem, 0, (sizeof(void *) * MAX_IOCTL_SGE)); + memset(ioctl_data_phys_addr, 0, (sizeof(bus_addr_t) * MAX_IOCTL_SGE)); + /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; ioctl_data_size = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; ioctl_data_size = user_ioc32->sgl[i].iov_len; #endif } if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_data_size, 1, ioctl_data_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_data_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i], (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i], ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb, &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n"); ret = ENOMEM; goto out; } /* Save the physical address and length */ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i]; if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { kern_sge32[i].length = user_ioc->sgl[i].iov_len; iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { kern_sge32[i].length = user_ioc32->sgl[i].iov_len; iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } /* Copy in data from user space */ ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); goto out; } } ioctl_sense_size = user_ioc->sense_len; if (user_ioc->sense_len) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_sense_size, 1, ioctl_sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap, ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb, &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n"); ret = ENOMEM; goto out; } sense_ptr = (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off); *sense_ptr = ioctl_sense_phys_addr; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; - mrsas_issue_blocked_cmd(sc, cmd); + ret = mrsas_issue_blocked_cmd(sc, cmd); + if (ret == ETIMEDOUT) { + mrsas_dprint(sc, MRSAS_OCR, + "IOCTL command is timed out, initiating OCR\n"); + sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; + ret = EAGAIN; + goto out; + } cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n"); goto out; } } /* * copy out the sense */ if (user_ioc->sense_len) { /* * sense_buff points to the location that has the user sense * buffer address */ sense_ptr = (unsigned long *)((unsigned long)user_ioc->frame.raw + user_ioc->sense_off); ret = copyout(ioctl_sense_mem, (unsigned long *)*sense_ptr, user_ioc->sense_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n"); goto out; } } /* * Return command status to user space */ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u_int8_t)); out: /* * Release sense buffer */ if (user_ioc->sense_len) { if (ioctl_sense_phys_addr) bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap); if (ioctl_sense_mem != NULL) bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap); if (ioctl_sense_tag != NULL) bus_dma_tag_destroy(ioctl_sense_tag); } /* * Release data buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; #endif } if (ioctl_data_phys_addr[i]) bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]); if (ioctl_data_mem[i] != NULL) bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], ioctl_data_dmamap[i]); if (ioctl_data_tag[i] != NULL) bus_dma_tag_destroy(ioctl_data_tag[i]); } /* Free command */ mrsas_release_mfi_cmd(cmd); return (ret); } /* * mrsas_alloc_mfi_cmds: Allocates the command packets * input: Adapter instance soft state * * Each IOCTL or passthru command that is issued to the FW are wrapped in a * local data structure called mrsas_mfi_cmd. The frame embedded in this * mrsas_mfi is issued to FW. The array is used only to look up the * mrsas_mfi_cmd given the context. The free commands are maintained in a * linked list. */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd; struct mrsas_mfi_cmd *cmd; max_cmd = MRSAS_MAX_MFI_CMDS; /* * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n"); return (ENOMEM); } memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd), M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mfi_cmd_list[j], M_MRSAS); free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; return (ENOMEM); } } for (i = 0; i < max_cmd; i++) { cmd = sc->mfi_cmd_list[i]; memset(cmd, 0, sizeof(struct mrsas_mfi_cmd)); cmd->index = i; cmd->ccb_ptr = NULL; cmd->sc = sc; TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); } /* create a frame pool and assign one frame to each command */ if (mrsas_create_frame_pool(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n"); /* Free the frames */ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, cmd); } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); return (ENOMEM); } return (0); } /* * mrsas_create_frame_pool: Creates DMA pool for cmd frames * input: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value * of context and could cause FW crash. */ static int mrsas_create_frame_pool(struct mrsas_softc *sc) { int i; struct mrsas_mfi_cmd *cmd; if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MRSAS_MFI_FRAME_SIZE, 1, MRSAS_MFI_FRAME_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->mficmd_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n"); return (ENOMEM); } for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; cmd->frame = mrsas_alloc_frame(sc, cmd); if (cmd->frame == NULL) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (ENOMEM); } + /* + * For MFI controllers. + * max_num_sge = 60 + * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) + * Totl 960 byte (15 MFI frame of 64 byte) + * + * Fusion adapter require only 3 extra frame. + * max_num_sge = 16 (defined as MAX_IOCTL_SGE) + * max_sge_sz = 12 byte (sizeof megasas_sge64) + * Total 192 byte (3 MFI frame of 64 byte) + */ memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } return (0); } /* * mrsas_alloc_frame: Allocates MFI Frames * input: Adapter soft state * * Create bus DMA memory tag and dmamap and load memory for MFI frames. Returns * virtual memory pointer to allocated region. */ void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE; if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem, BUS_DMA_NOWAIT, &cmd->frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (NULL); } if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap, cmd->frame_mem, frame_size, mrsas_alloc_cb, &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (NULL); } return (cmd->frame_mem); } /* * mrsas_alloc_cb: Callback function of bus_dmamap_load() * input: callback argument, * machine dependent type that describes DMA segments, * number of segments, * error code. * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_free_frames: Frees memory for MFI frames * input: Adapter soft state * * Deallocates MFI frames memory. Called from mrsas_free_mem() during detach * and error case during creation of frame pool. */ void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->frame_phys_addr) bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap); if (cmd->frame_mem != NULL) bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap); } Index: stable/10 =================================================================== --- stable/10 (revision 300735) +++ stable/10 (revision 300736) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r299666-299672