Index: head/sys/dev/mpt/mpt.h =================================================================== --- head/sys/dev/mpt/mpt.h (revision 233402) +++ head/sys/dev/mpt/mpt.h (revision 233403) @@ -1,1331 +1,1333 @@ /* $FreeBSD$ */ /*- * Generic defines for LSI '909 FC adapters. * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /* * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2004, 2005 Justin T. Gibbs * Copyright (c) 2005, WHEEL Sp. z o.o. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _MPT_H_ #define _MPT_H_ /********************************* OS Includes ********************************/ #include #include #include #include #include #if __FreeBSD_version < 500000 #include #include #include #include #else #include #include #include #include #include #include #endif #include #include #include #include #include #if __FreeBSD_version < 500000 #include #include #endif #ifdef __sparc64__ #include #include #endif #include #if __FreeBSD_version < 500000 #include #include #else #include #include #endif #include #include "opt_ddb.h" /**************************** Register Definitions ****************************/ #include /******************************* MPI Definitions ******************************/ #include #include #include #include #include /* XXX For mpt_debug.c */ #include #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low)) #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low)) /****************************** Misc Definitions ******************************/ /* #define MPT_TEST_MULTIPATH 1 */ #define MPT_OK (0) #define MPT_FAIL (0x10000) #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array)) #define MPT_ROLE_NONE 0 #define MPT_ROLE_INITIATOR 1 #define MPT_ROLE_TARGET 2 #define MPT_ROLE_BOTH 3 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR #define MPT_INI_ID_NONE -1 /**************************** Forward Declarations ****************************/ struct mpt_softc; struct mpt_personality; typedef struct req_entry request_t; /************************* Personality Module Support *************************/ typedef int mpt_load_handler_t(struct mpt_personality *); typedef int mpt_probe_handler_t(struct mpt_softc *); typedef int mpt_attach_handler_t(struct mpt_softc *); typedef int mpt_enable_handler_t(struct mpt_softc *); typedef void mpt_ready_handler_t(struct mpt_softc *); typedef int mpt_event_handler_t(struct mpt_softc *, request_t *, MSG_EVENT_NOTIFY_REPLY *); typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/); /* XXX Add return value and use for veto? */ typedef void mpt_shutdown_handler_t(struct mpt_softc *); typedef void mpt_detach_handler_t(struct mpt_softc *); typedef int mpt_unload_handler_t(struct mpt_personality *); struct mpt_personality { const char *name; uint32_t id; /* Assigned identifier. */ u_int use_count; /* Instances using personality*/ mpt_load_handler_t *load; /* configure personailty */ #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load) mpt_probe_handler_t *probe; /* configure personailty */ mpt_attach_handler_t *attach; /* initialize device instance */ mpt_enable_handler_t *enable; /* enable device */ mpt_ready_handler_t *ready; /* final open for business */ mpt_event_handler_t *event; /* Handle MPI event. */ mpt_reset_handler_t *reset; /* Re-init after reset. */ mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */ mpt_detach_handler_t *detach; /* release device instance */ mpt_unload_handler_t *unload; /* Shutdown personality */ #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload) }; int mpt_modevent(module_t, int, void *); /* Maximum supported number of personalities. */ #define MPT_MAX_PERSONALITIES (15) #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \ MODULE_DEPEND(name, dep, vmin, vpref, vmax) #define DECLARE_MPT_PERSONALITY(name, order) \ static moduledata_t name##_mod = { \ #name, mpt_modevent, &name##_personality \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \ MODULE_VERSION(name, 1); \ MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1) /******************************* Bus DMA Support ******************************/ /* XXX Need to update bus_dmamap_sync to take a range argument. */ #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \ bus_dmamap_sync(dma_tag, dmamap, op) #if __FreeBSD_version < 600000 #define bus_get_dma_tag(x) NULL #endif #if __FreeBSD_version >= 501102 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ busdma_lock_mutex, &(mpt)->mpt_lock, \ dma_tagp) #else #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) \ bus_dma_tag_create(parent_tag, alignment, boundary, \ lowaddr, highaddr, filter, filterarg, \ maxsize, nsegments, maxsegsz, flags, \ dma_tagp) #endif struct mpt_map_info { struct mpt_softc *mpt; int error; uint32_t phys; }; void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); /* **************************** NewBUS interrupt Crock ************************/ #if __FreeBSD_version < 700031 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \ bus_setup_intr(d, i, f, if, ifa, hp) #else #define mpt_setup_intr bus_setup_intr #endif /* **************************** NewBUS CAM Support ****************************/ #if __FreeBSD_version < 700049 #define mpt_xpt_bus_register(sim, parent, bus) \ xpt_bus_register(sim, bus) #else #define mpt_xpt_bus_register xpt_bus_register #endif /**************************** Kernel Thread Support ***************************/ #if __FreeBSD_version > 800001 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mpt_kthread_exit(status) \ kproc_exit(status) #elif __FreeBSD_version > 500005 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) #define mpt_kthread_exit(status) \ kthread_exit(status) #else #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, fmtstr, arg) #define mpt_kthread_exit(status) \ kthread_exit(status) #endif /********************************** Endianess *********************************/ #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag) #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag) #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag) #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag) #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag) #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag) #if _BYTE_ORDER == _BIG_ENDIAN void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *); void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *); void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *); void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *); void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *); void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *); void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *); void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *); void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *); void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *); void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *); void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *); void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *); void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *); void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *); #else #define mpt2host_sge_simple_union(x) do { ; } while (0) #define mpt2host_iocfacts_reply(x) do { ; } while (0) #define mpt2host_portfacts_reply(x) do { ; } while (0) #define mpt2host_config_page_ioc2(x) do { ; } while (0) #define mpt2host_config_page_ioc3(x) do { ; } while (0) #define mpt2host_config_page_scsi_port_0(x) do { ; } while (0) #define mpt2host_config_page_scsi_port_1(x) do { ; } while (0) #define host2mpt_config_page_scsi_port_1(x) do { ; } while (0) #define mpt2host_config_page_scsi_port_2(x) do { ; } while (0) #define mpt2host_config_page_scsi_device_0(x) do { ; } while (0) #define mpt2host_config_page_scsi_device_1(x) do { ; } while (0) #define host2mpt_config_page_scsi_device_1(x) do { ; } while (0) #define mpt2host_config_page_fc_port_0(x) do { ; } while (0) #define mpt2host_config_page_fc_port_1(x) do { ; } while (0) #define host2mpt_config_page_fc_port_1(x) do { ; } while (0) #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0) #define mpt2host_config_page_raid_phys_disk_0(x) \ do { ; } while (0) #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0) #endif /**************************** MPI Transaction State ***************************/ typedef enum { REQ_STATE_NIL = 0x00, REQ_STATE_FREE = 0x01, REQ_STATE_ALLOCATED = 0x02, REQ_STATE_QUEUED = 0x04, REQ_STATE_DONE = 0x08, REQ_STATE_TIMEDOUT = 0x10, REQ_STATE_NEED_WAKEUP = 0x20, REQ_STATE_LOCKED = 0x80, /* can't be freed */ REQ_STATE_MASK = 0xFF } mpt_req_state_t; struct req_entry { TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */ mpt_req_state_t state; /* Request State Information */ uint16_t index; /* Index of this entry */ uint16_t IOCStatus; /* Completion status */ uint16_t ResponseCode; /* TMF Response Code */ uint16_t serno; /* serial number */ union ccb *ccb; /* CAM request */ void *req_vbuf; /* Virtual Address of Entry */ void *sense_vbuf; /* Virtual Address of sense data */ bus_addr_t req_pbuf; /* Physical Address of Entry */ bus_addr_t sense_pbuf; /* Physical Address of sense data */ bus_dmamap_t dmap; /* DMA map for data buffers */ struct req_entry *chain; /* for SGE overallocations */ struct callout callout; /* Timeout for the request */ }; typedef struct mpt_config_params { u_int Action; u_int PageVersion; u_int PageLength; u_int PageNumber; u_int PageType; u_int PageAddress; u_int ExtPageLength; u_int ExtPageType; } cfgparms_t; /**************************** MPI Target State Info ***************************/ typedef struct { uint32_t reply_desc; /* current reply descriptor */ uint32_t resid; /* current data residual */ uint32_t bytes_xfered; /* current relative offset */ union ccb *ccb; /* pointer to currently active ccb */ request_t *req; /* pointer to currently active assist request */ uint32_t is_local : 1, nxfers : 31; uint32_t tag_id; enum { TGT_STATE_NIL, TGT_STATE_LOADING, TGT_STATE_LOADED, TGT_STATE_IN_CAM, TGT_STATE_SETTING_UP_FOR_DATA, TGT_STATE_MOVING_DATA, TGT_STATE_MOVING_DATA_AND_STATUS, TGT_STATE_SENDING_STATUS } state; } mpt_tgt_state_t; /* * When we get an incoming command it has its own tag which is called the * IoIndex. This is the value we gave that particular command buffer when * we originally assigned it. It's just a number, really. The FC card uses * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which * contains pointers the request_t structures related to that IoIndex. * * What *we* do is construct a tag out of the index for the target command * which owns the incoming ATIO plus a rolling sequence number. */ #define MPT_MAKE_TAGID(mpt, req, ioindex) \ ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff)) #ifdef INVARIANTS #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b) #else #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18] #endif #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \ (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)])) STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr); #define MPT_MAX_LUNS 256 typedef struct { struct mpt_hdr_stailq atios; struct mpt_hdr_stailq inots; int enabled; } tgt_resource_t; #define MPT_MAX_ELS 64 /**************************** Handler Registration ****************************/ /* * Global table of registered reply handlers. The * handler is indicated by byte 3 of the request * index submitted to the IOC. This allows the * driver core to perform generic processing without * any knowledge of per-personality behavior. * * MPT_NUM_REPLY_HANDLERS must be a power of 2 * to allow the easy generation of a mask. * * The handler offsets used by the core are hard coded * allowing faster code generation when assigning a handler * to a request. All "personalities" must use the * the handler registration mechanism. * * The IOC handlers that are rarely executed are placed * at the tail of the table to make it more likely that * all commonly executed handlers fit in a single cache * line. */ #define MPT_NUM_REPLY_HANDLERS (32) #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0) #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1) #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2) typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame); typedef union { mpt_reply_handler_t *reply_handler; } mpt_handler_t; typedef enum { MPT_HANDLER_REPLY, MPT_HANDLER_EVENT, MPT_HANDLER_RESET, MPT_HANDLER_SHUTDOWN } mpt_handler_type; struct mpt_handler_record { LIST_ENTRY(mpt_handler_record) links; mpt_handler_t handler; }; LIST_HEAD(mpt_handler_list, mpt_handler_record); /* * The handler_id is currently unused but would contain the * handler ID used in the MsgContext field to allow direction * of replies to the handler. Registrations that don't require * a handler id can pass in NULL for the handler_id. * * Deregistrations for handlers without a handler id should * pass in MPT_HANDLER_ID_NONE. */ #define MPT_HANDLER_ID_NONE (0xFFFFFFFF) int mpt_register_handler(struct mpt_softc *, mpt_handler_type, mpt_handler_t, uint32_t *); int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type, mpt_handler_t, uint32_t); /******************* Per-Controller Instance Data Structures ******************/ TAILQ_HEAD(req_queue, req_entry); /* Structure for saving proper values for modifyable PCI config registers */ struct mpt_pci_cfg { uint16_t Command; uint16_t LatencyTimer_LineSize; uint32_t IO_BAR; uint32_t Mem0_BAR[2]; uint32_t Mem1_BAR[2]; uint32_t ROM_BAR; uint8_t IntLine; uint32_t PMCSR; }; typedef enum { MPT_RVF_NONE = 0x0, MPT_RVF_ACTIVE = 0x1, MPT_RVF_ANNOUNCED = 0x2, MPT_RVF_UP2DATE = 0x4, MPT_RVF_REFERENCED = 0x8, MPT_RVF_WCE_CHANGED = 0x10 } mpt_raid_volume_flags; struct mpt_raid_volume { CONFIG_PAGE_RAID_VOL_0 *config_page; MPI_RAID_VOL_INDICATOR sync_progress; mpt_raid_volume_flags flags; u_int quiesced_disks; }; typedef enum { MPT_RDF_NONE = 0x00, MPT_RDF_ACTIVE = 0x01, MPT_RDF_ANNOUNCED = 0x02, MPT_RDF_UP2DATE = 0x04, MPT_RDF_REFERENCED = 0x08, MPT_RDF_QUIESCING = 0x10, MPT_RDF_QUIESCED = 0x20 } mpt_raid_disk_flags; struct mpt_raid_disk { CONFIG_PAGE_RAID_PHYS_DISK_0 config_page; struct mpt_raid_volume *volume; u_int member_number; u_int pass_thru_active; mpt_raid_disk_flags flags; }; struct mpt_evtf_record { MSG_EVENT_NOTIFY_REPLY reply; uint32_t context; LIST_ENTRY(mpt_evtf_record) links; }; LIST_HEAD(mpt_evtf_list, mpt_evtf_record); struct mptsas_devinfo { uint16_t dev_handle; uint16_t parent_dev_handle; uint16_t enclosure_handle; uint16_t slot; uint8_t phy_num; uint8_t physical_port; uint8_t target_id; uint8_t bus; uint64_t sas_address; uint32_t device_info; }; struct mptsas_phyinfo { uint16_t handle; uint8_t phy_num; uint8_t port_id; uint8_t negotiated_link_rate; uint8_t hw_link_rate; uint8_t programmed_link_rate; uint8_t sas_port_add_phy; struct mptsas_devinfo identify; struct mptsas_devinfo attached; }; struct mptsas_portinfo { uint16_t num_phys; struct mptsas_phyinfo *phy_info; }; struct mpt_softc { device_t dev; #if __FreeBSD_version < 500000 uint32_t mpt_islocked; int mpt_splsaved; #else struct mtx mpt_lock; int mpt_locksetup; #endif uint32_t mpt_pers_mask; uint32_t : 7, unit : 8, ready : 1, fw_uploaded : 1, msi_enable : 1, twildcard : 1, tenabled : 1, do_cfg_role : 1, raid_enabled : 1, raid_mwce_set : 1, getreqwaiter : 1, shutdwn_raid : 1, shutdwn_recovery: 1, outofbeer : 1, disabled : 1, is_spi : 1, is_sas : 1, is_fc : 1, is_1078 : 1; u_int cfg_role; u_int role; /* role: none, ini, target, both */ u_int verbose; #ifdef MPT_TEST_MULTIPATH int failure_id; #endif /* * IOC Facts */ MSG_IOC_FACTS_REPLY ioc_facts; /* * Port Facts */ MSG_PORT_FACTS_REPLY * port_facts; #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers /* * Device Configuration Information */ union { struct mpt_spi_cfg { CONFIG_PAGE_SCSI_PORT_0 _port_page0; CONFIG_PAGE_SCSI_PORT_1 _port_page1; CONFIG_PAGE_SCSI_PORT_2 _port_page2; CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; int _ini_id; uint16_t _tag_enable; uint16_t _disc_enable; } spi; #define mpt_port_page0 cfg.spi._port_page0 #define mpt_port_page1 cfg.spi._port_page1 #define mpt_port_page2 cfg.spi._port_page2 #define mpt_dev_page0 cfg.spi._dev_page0 #define mpt_dev_page1 cfg.spi._dev_page1 #define mpt_ini_id cfg.spi._ini_id #define mpt_tag_enable cfg.spi._tag_enable #define mpt_disc_enable cfg.spi._disc_enable struct mpi_fc_cfg { CONFIG_PAGE_FC_PORT_0 _port_page0; uint32_t _port_speed; #define mpt_fcport_page0 cfg.fc._port_page0 #define mpt_fcport_speed cfg.fc._port_speed } fc; } cfg; #if __FreeBSD_version >= 500000 /* * Device config information stored up for sysctl to access */ union { struct { unsigned int initiator_id; } spi; struct { char wwnn[19]; char wwpn[19]; } fc; } scinfo; #endif /* Controller Info for RAID information */ CONFIG_PAGE_IOC_2 * ioc_page2; CONFIG_PAGE_IOC_3 * ioc_page3; /* Raid Data */ struct mpt_raid_volume* raid_volumes; struct mpt_raid_disk* raid_disks; u_int raid_max_volumes; u_int raid_max_disks; u_int raid_page0_len; u_int raid_wakeup; u_int raid_rescan; u_int raid_resync_rate; u_int raid_mwce_setting; u_int raid_queue_depth; u_int raid_nonopt_volumes; struct proc *raid_thread; struct callout raid_timer; /* * PCI Hardware info */ int pci_msi_count; struct resource * pci_irq; /* Interrupt map for chip */ void * ih; /* Interrupt handle */ +#if 0 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ +#endif /* * DMA Mapping Stuff */ struct resource * pci_reg; /* Register map for chip */ bus_space_tag_t pci_st; /* Bus tag for registers */ bus_space_handle_t pci_sh; /* Bus handle for registers */ /* PIO versions of above. */ struct resource * pci_pio_reg; bus_space_tag_t pci_pio_st; bus_space_handle_t pci_pio_sh; bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ bus_dmamap_t reply_dmap; /* DMA map for reply memory */ uint8_t *reply; /* KVA of reply memory */ bus_addr_t reply_phys; /* BusAddr of reply memory */ bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ bus_dmamap_t request_dmap; /* DMA map for request memroy */ uint8_t *request; /* KVA of Request memory */ bus_addr_t request_phys; /* BusAddr of request memory */ uint32_t max_seg_cnt; /* calculated after IOC facts */ uint32_t max_cam_seg_cnt;/* calculated from MAXPHYS*/ /* * Hardware management */ u_int reset_cnt; /* * CAM && Software Management */ request_t *request_pool; struct req_queue request_free_list; struct req_queue request_pending_list; struct req_queue request_timeout_list; struct cam_sim *sim; struct cam_path *path; struct cam_sim *phydisk_sim; struct cam_path *phydisk_path; struct proc *recovery_thread; request_t *tmf_req; /* * Deferred frame acks due to resource shortage. */ struct mpt_evtf_list ack_frames; /* * Target Mode Support */ uint32_t scsi_tgt_handler_id; request_t ** tgt_cmd_ptrs; request_t ** els_cmd_ptrs; /* FC only */ /* * *snork*- this is chosen to be here *just in case* somebody * forgets to point to it exactly and we index off of trt with * CAM_LUN_WILDCARD. */ tgt_resource_t trt_wildcard; /* wildcard luns */ tgt_resource_t trt[MPT_MAX_LUNS]; uint16_t tgt_cmds_allocated; uint16_t els_cmds_allocated; /* FC only */ uint16_t timeouts; /* timeout count */ uint16_t success; /* successes afer timeout */ uint16_t sequence; /* Sequence Number */ uint16_t pad3; /* Paired port in some dual adapters configurations */ struct mpt_softc * mpt2; /* FW Image management */ uint32_t fw_image_size; uint8_t *fw_image; bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */ bus_dmamap_t fw_dmap; /* DMA map for firmware image */ bus_addr_t fw_phys; /* BusAddr of firmware image */ /* SAS Topology */ struct mptsas_portinfo *sas_portinfo; /* Shutdown Event Handler. */ eventhandler_tag eh; /* Userland management interface. */ struct cdev *cdev; TAILQ_ENTRY(mpt_softc) links; }; static __inline void mpt_assign_serno(struct mpt_softc *, request_t *); static __inline void mpt_assign_serno(struct mpt_softc *mpt, request_t *req) { if ((req->serno = mpt->sequence++) == 0) { req->serno = mpt->sequence++; } } /***************************** Locking Primitives *****************************/ #if __FreeBSD_version < 500000 #define MPT_IFLAGS INTR_TYPE_CAM #define MPT_LOCK(mpt) mpt_lockspl(mpt) #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt) #define MPT_OWNED(mpt) mpt->mpt_islocked #define MPT_LOCK_ASSERT(mpt) #define MPTLOCK_2_CAMLOCK MPT_UNLOCK #define CAMLOCK_2_MPTLOCK MPT_LOCK #define MPT_LOCK_SETUP(mpt) #define MPT_LOCK_DESTROY(mpt) static __inline void mpt_lockspl(struct mpt_softc *mpt); static __inline void mpt_unlockspl(struct mpt_softc *mpt); static __inline void mpt_lockspl(struct mpt_softc *mpt) { int s; s = splcam(); if (mpt->mpt_islocked++ == 0) { mpt->mpt_splsaved = s; } else { splx(s); panic("Recursed lock with mask: 0x%x", s); } } static __inline void mpt_unlockspl(struct mpt_softc *mpt) { if (mpt->mpt_islocked) { if (--mpt->mpt_islocked == 0) { splx(mpt->mpt_splsaved); } } else panic("Negative lock count"); } static __inline int mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, const char *wmesg, int timo) { int saved_cnt; int saved_spl; int error; KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep")); saved_cnt = mpt->mpt_islocked; saved_spl = mpt->mpt_splsaved; mpt->mpt_islocked = 0; error = tsleep(ident, priority, wmesg, timo); KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup")); mpt->mpt_islocked = saved_cnt; mpt->mpt_splsaved = saved_spl; return (error); } #define mpt_req_timeout(req, ticks, func, arg) \ callout_reset(&(req)->callout, (ticks), (func), (arg)); #define mpt_req_untimeout(req, func, arg) \ callout_stop(&(req)->callout) #define mpt_callout_init(mpt, c) \ callout_init(c) #define mpt_callout_drain(mpt, c) \ callout_stop(c) #else #if 1 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE #define MPT_LOCK_SETUP(mpt) \ mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \ mpt->mpt_locksetup = 1 #define MPT_LOCK_DESTROY(mpt) \ if (mpt->mpt_locksetup) { \ mtx_destroy(&mpt->mpt_lock); \ mpt->mpt_locksetup = 0; \ } #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock) #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock) #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock) #define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED) #define MPTLOCK_2_CAMLOCK(mpt) #define CAMLOCK_2_MPTLOCK(mpt) #define mpt_sleep(mpt, ident, priority, wmesg, timo) \ msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo) #define mpt_req_timeout(req, ticks, func, arg) \ callout_reset(&(req)->callout, (ticks), (func), (arg)) #define mpt_req_untimeout(req, func, arg) \ callout_stop(&(req)->callout) #define mpt_callout_init(mpt, c) \ callout_init_mtx(c, &(mpt)->mpt_lock, 0) #define mpt_callout_drain(mpt, c) \ callout_drain(c) #else #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY #define MPT_LOCK_SETUP(mpt) do { } while (0) #define MPT_LOCK_DESTROY(mpt) do { } while (0) #define MPT_LOCK_ASSERT(mpt) mtx_assert(&Giant, MA_OWNED) #define MPT_LOCK(mpt) mtx_lock(&Giant) #define MPT_UNLOCK(mpt) mtx_unlock(&Giant) #define MPTLOCK_2_CAMLOCK(mpt) #define CAMLOCK_2_MPTLOCK(mpt) #define mpt_req_timeout(req, ticks, func, arg) \ callout_reset(&(req)->callout, (ticks), (func), (arg)) #define mpt_req_untimeout(req, func, arg) \ callout_stop(&(req)->callout) #define mpt_callout_init(mpt, c) \ callout_init(c, 0) #define mpt_callout_drain(mpt, c) \ callout_drain(c) static __inline int mpt_sleep(struct mpt_softc *, void *, int, const char *, int); static __inline int mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t) { int r; r = tsleep(i, p, w, t); return (r); } #endif #endif /******************************* Register Access ******************************/ static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t); static __inline uint32_t mpt_read(struct mpt_softc *, int); static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t); static __inline uint32_t mpt_pio_read(struct mpt_softc *, int); static __inline void mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val) { bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); } static __inline uint32_t mpt_read(struct mpt_softc *mpt, int offset) { return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); } /* * Some operations (e.g. diagnostic register writes while the ARM proccessor * is disabled), must be performed using "PCI pio" operations. On non-PCI * busses, these operations likely map to normal register accesses. */ static __inline void mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val) { KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource")); bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val); } static __inline uint32_t mpt_pio_read(struct mpt_softc *mpt, int offset) { KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource")); return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset)); } /*********************** Reply Frame/Request Management ***********************/ /* Max MPT Reply we are willing to accept (must be power of 2) */ #define MPT_REPLY_SIZE 256 /* * Must be less than 16384 in order for target mode to work */ #define MPT_MAX_REQUESTS(mpt) 512 #define MPT_REQUEST_AREA 512 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */ #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) #define MPT_CONTEXT_CB_SHIFT (16) #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT) #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT) #define MPT_CONTEXT_TO_CBI(x) \ (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1)) #define MPT_CONTEXT_REQI_MASK 0xFFFF #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK) /* * Convert a 32bit physical address returned from IOC to an * offset into our reply frame memory or the kvm address needed * to access the data. The returned address is only the low * 32 bits, so mask our base physical address accordingly. */ #define MPT_REPLY_BADDR(x) \ (x << 1) #define MPT_REPLY_OTOV(m, i) \ ((void *)(&m->reply[i])) #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \ do { \ if (mpt->verbose > MPT_PRT_DEBUG) \ mpt_dump_reply_frame(mpt, reply_frame); \ } while(0) static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt); static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr); /* * Give the reply buffer back to the IOC after we have * finished processing it. */ static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr) { mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr); } /* Get a reply from the IOC */ static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt) { return mpt_read(mpt, MPT_OFFSET_REPLY_Q); } void mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int); /************************** Scatter Gather Management **************************/ /* MPT_RQSL- size of request frame, in bytes */ #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2) /* MPT_NSGL- how many SG entries can fit in a request frame size */ #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION)) /* MPT_NRFM- how many request frames can fit in each request alloc we make */ #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt)) /* * MPT_NSGL_FIRST- # of SG elements that can fit after * an I/O request but still within the request frame. * Do this safely based upon SGE_IO_UNION. * * Note that the first element is *within* the SCSI request. */ #define MPT_NSGL_FIRST(mpt) \ ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \ sizeof (SGE_IO_UNION)) /***************************** IOC Initialization *****************************/ int mpt_reset(struct mpt_softc *, int /*reinit*/); /****************************** Debugging ************************************/ void mpt_dump_data(struct mpt_softc *, const char *, void *, int); void mpt_dump_request(struct mpt_softc *, request_t *); enum { MPT_PRT_ALWAYS, MPT_PRT_FATAL, MPT_PRT_ERROR, MPT_PRT_WARN, MPT_PRT_INFO, MPT_PRT_NEGOTIATION, MPT_PRT_DEBUG, MPT_PRT_DEBUG1, MPT_PRT_DEBUG2, MPT_PRT_DEBUG3, MPT_PRT_TRACE, MPT_PRT_NONE=100 }; #if __FreeBSD_version > 500000 #define mpt_lprt(mpt, level, ...) \ do { \ if (level <= (mpt)->verbose) \ mpt_prt(mpt, __VA_ARGS__); \ } while (0) #if 0 #define mpt_lprtc(mpt, level, ...) \ do { \ if (level <= (mpt)->verbose) \ mpt_prtc(mpt, __VA_ARGS__); \ } while (0) #endif #else void mpt_lprt(struct mpt_softc *, int, const char *, ...) __printflike(3, 4); #if 0 void mpt_lprtc(struct mpt_softc *, int, const char *, ...) __printflike(3, 4); #endif #endif void mpt_prt(struct mpt_softc *, const char *, ...) __printflike(2, 3); void mpt_prtc(struct mpt_softc *, const char *, ...) __printflike(2, 3); /**************************** Target Mode Related ***************************/ static __inline int mpt_cdblen(uint8_t, int); static __inline int mpt_cdblen(uint8_t cdb0, int maxlen) { int group = cdb0 >> 5; switch (group) { case 0: return (6); case 1: return (10); case 4: case 5: return (12); default: return (16); } } #ifdef INVARIANTS static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t); static __inline request_t * mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag) { uint16_t rtg = (tag >> 18); KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d", tag)); KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array")); KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer")); return (mpt->tgt_cmd_ptrs[rtg]); } #endif static __inline int mpt_req_on_free_list(struct mpt_softc *, request_t *); static __inline int mpt_req_on_pending_list(struct mpt_softc *, request_t *); /* * Is request on freelist? */ static __inline int mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req) { request_t *lrq; TAILQ_FOREACH(lrq, &mpt->request_free_list, links) { if (lrq == req) { return (1); } } return (0); } /* * Is request on pending list? */ static __inline int mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req) { request_t *lrq; TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) { if (lrq == req) { return (1); } } return (0); } #ifdef INVARIANTS static __inline void mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int); static __inline void mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int); /* * Make sure that req *is* part of one of the special lists */ static __inline void mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) { int i; for (i = 0; i < mpt->els_cmds_allocated; i++) { if (req == mpt->els_cmd_ptrs[i]) { return; } } for (i = 0; i < mpt->tgt_cmds_allocated; i++) { if (req == mpt->tgt_cmd_ptrs[i]) { return; } } panic("%s(%d): req %p:%u function %x not in els or tgt ptrs", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function); } /* * Make sure that req is *not* part of one of the special lists. */ static __inline void mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line) { int i; for (i = 0; i < mpt->els_cmds_allocated; i++) { KASSERT(req != mpt->els_cmd_ptrs[i], ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); } for (i = 0; i < mpt->tgt_cmds_allocated; i++) { KASSERT(req != mpt->tgt_cmd_ptrs[i], ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d", s, line, req, req->serno, ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i)); } } #endif /* * Task Management Types, purely for internal consumption */ typedef enum { MPT_ABORT_TASK_SET=1234, MPT_CLEAR_TASK_SET, MPT_TARGET_RESET, MPT_CLEAR_ACA, MPT_TERMINATE_TASK, MPT_NIL_TMT_VALUE=5678 } mpt_task_mgmt_t; /**************************** Unclassified Routines ***************************/ void mpt_send_cmd(struct mpt_softc *mpt, request_t *req); int mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply); int mpt_wait_req(struct mpt_softc *mpt, request_t *req, mpt_req_state_t state, mpt_req_state_t mask, int sleep_ok, int time_ms); void mpt_enable_ints(struct mpt_softc *mpt); void mpt_disable_ints(struct mpt_softc *mpt); int mpt_attach(struct mpt_softc *mpt); int mpt_shutdown(struct mpt_softc *mpt); int mpt_detach(struct mpt_softc *mpt); int mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd); request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok); void mpt_free_request(struct mpt_softc *mpt, request_t *req); void mpt_intr(void *arg); void mpt_check_doorbell(struct mpt_softc *mpt); void mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame); int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/, cfgparms_t *params, bus_addr_t /*addr*/, bus_size_t/*len*/, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber, uint32_t PageAddress, int ExtPageType, CONFIG_EXTENDED_PAGE_HEADER *rslt, int sleep_ok, int timeout_ms); int mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len, int sleep_ok, int timeout_ms); int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/, int /*PageNumber*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, size_t /*len*/, int /*sleep_ok*/, int /*timeout_ms*/); int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/, uint32_t /*PageAddress*/, CONFIG_PAGE_HEADER *, size_t /*len*/, int /*sleep_ok*/, int /*timeout_ms*/); static __inline int mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, PageAddress, hdr, len, sleep_ok, timeout_ms)); } static __inline int mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress, CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, int timeout_ms) { return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, PageAddress, hdr, len, sleep_ok, timeout_ms)); } /* mpt_debug.c functions */ void mpt_print_reply(void *vmsg); void mpt_print_db(uint32_t mb); void mpt_print_config_reply(void *vmsg); char *mpt_ioc_diag(uint32_t diag); void mpt_req_state(mpt_req_state_t state); void mpt_print_config_request(void *vmsg); void mpt_print_request(void *vmsg); void mpt_dump_sgl(SGE_IO_UNION *se, int offset); #endif /* _MPT_H_ */ Index: head/sys/dev/mpt/mpt_pci.c =================================================================== --- head/sys/dev/mpt/mpt_pci.c (revision 233402) +++ head/sys/dev/mpt/mpt_pci.c (revision 233403) @@ -1,961 +1,890 @@ /*- * PCI specific probe and attach routines for LSI Fusion Adapters * FreeBSD Version. * * Copyright (c) 2000, 2001 by Greg Ansley * Partially derived from Matt Jacob's ISP driver. * Copyright (c) 1997, 1998, 1999, 2000, 2001, 2002 by Matthew Jacob * Feral Software * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /* * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include + #if __FreeBSD_version < 700000 #define pci_msix_count(x) 0 #define pci_msi_count(x) 0 #define pci_alloc_msi(x, y) 1 #define pci_alloc_msix(x, y) 1 #define pci_release_msi(x) do { ; } while (0) #endif -#ifndef PCI_VENDOR_LSI -#define PCI_VENDOR_LSI 0x1000 -#endif +/* + * XXX it seems no other MPT driver knows about the following chips. + */ -#ifndef PCI_PRODUCT_LSI_FC909 -#define PCI_PRODUCT_LSI_FC909 0x0620 +#ifndef MPI_MANUFACTPAGE_DEVICEID_FC909_FB +#define MPI_MANUFACTPAGE_DEVICEID_FC909_FB 0x0620 #endif -#ifndef PCI_PRODUCT_LSI_FC909A -#define PCI_PRODUCT_LSI_FC909A 0x0621 +#ifndef MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB +#define MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB 0x0625 #endif -#ifndef PCI_PRODUCT_LSI_FC919 -#define PCI_PRODUCT_LSI_FC919 0x0624 +#ifndef MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB +#define MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB 0x0623 #endif -#ifndef PCI_PRODUCT_LSI_FC919_LAN -#define PCI_PRODUCT_LSI_FC919_LAN 0x0625 +#ifndef MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB +#define MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB 0x0627 #endif -#ifndef PCI_PRODUCT_LSI_FC929 -#define PCI_PRODUCT_LSI_FC929 0x0622 +#ifndef MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB +#define MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB 0x0629 #endif -#ifndef PCI_PRODUCT_LSI_FC929_LAN -#define PCI_PRODUCT_LSI_FC929_LAN 0x0623 +#ifndef MPI_MANUFACTPAGE_DEVID_SAS1068A_FB +#define MPI_MANUFACTPAGE_DEVID_SAS1068A_FB 0x0055 #endif -#ifndef PCI_PRODUCT_LSI_FC929X -#define PCI_PRODUCT_LSI_FC929X 0x0626 +#ifndef MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB +#define MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB 0x007C #endif -#ifndef PCI_PRODUCT_LSI_FC929X_LAN -#define PCI_PRODUCT_LSI_FC929X_LAN 0x0627 -#endif - -#ifndef PCI_PRODUCT_LSI_FC919X -#define PCI_PRODUCT_LSI_FC919X 0x0628 -#endif - -#ifndef PCI_PRODUCT_LSI_FC919X_LAN -#define PCI_PRODUCT_LSI_FC919X_LAN 0x0629 -#endif - -#ifndef PCI_PRODUCT_LSI_FC7X04X -#define PCI_PRODUCT_LSI_FC7X04X 0x0640 -#endif - -#ifndef PCI_PRODUCT_LSI_FC646 -#define PCI_PRODUCT_LSI_FC646 0x0646 -#endif - -#ifndef PCI_PRODUCT_LSI_1030 -#define PCI_PRODUCT_LSI_1030 0x0030 -#endif - -#ifndef PCI_PRODUCT_LSI_1030ZC -#define PCI_PRODUCT_LSI_1030ZC 0x0031 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1064 -#define PCI_PRODUCT_LSI_SAS1064 0x0050 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1064A -#define PCI_PRODUCT_LSI_SAS1064A 0x005C -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1064E -#define PCI_PRODUCT_LSI_SAS1064E 0x0056 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1066 -#define PCI_PRODUCT_LSI_SAS1066 0x005E -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1066E -#define PCI_PRODUCT_LSI_SAS1066E 0x005A -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1068 -#define PCI_PRODUCT_LSI_SAS1068 0x0054 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1068A -#define PCI_PRODUCT_LSI_SAS1068A 0x0055 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1068E -#define PCI_PRODUCT_LSI_SAS1068E 0x0058 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1078 -#define PCI_PRODUCT_LSI_SAS1078 0x0062 -#endif - -#ifndef PCI_PRODUCT_LSI_SAS1078DE -#define PCI_PRODUCT_LSI_SAS1078DE 0x007C -#endif - #ifndef PCIM_CMD_SERRESPEN #define PCIM_CMD_SERRESPEN 0x0100 #endif static int mpt_pci_probe(device_t); static int mpt_pci_attach(device_t); static void mpt_free_bus_resources(struct mpt_softc *mpt); static int mpt_pci_detach(device_t); static int mpt_pci_shutdown(device_t); static int mpt_dma_mem_alloc(struct mpt_softc *mpt); static void mpt_dma_mem_free(struct mpt_softc *mpt); -static void mpt_read_config_regs(struct mpt_softc *mpt); #if 0 +static void mpt_read_config_regs(struct mpt_softc *mpt); static void mpt_set_config_regs(struct mpt_softc *mpt); #endif static void mpt_pci_intr(void *); static device_method_t mpt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mpt_pci_probe), DEVMETHOD(device_attach, mpt_pci_attach), DEVMETHOD(device_detach, mpt_pci_detach), DEVMETHOD(device_shutdown, mpt_pci_shutdown), - { 0, 0 } + DEVMETHOD_END }; static driver_t mpt_driver = { "mpt", mpt_methods, sizeof(struct mpt_softc) }; static devclass_t mpt_devclass; -DRIVER_MODULE(mpt, pci, mpt_driver, mpt_devclass, 0, 0); +DRIVER_MODULE(mpt, pci, mpt_driver, mpt_devclass, NULL, NULL); MODULE_DEPEND(mpt, pci, 1, 1, 1); MODULE_VERSION(mpt, 1); static int mpt_pci_probe(device_t dev) { - char *desc; + const char *desc; - if (pci_get_vendor(dev) != PCI_VENDOR_LSI) { + if (pci_get_vendor(dev) != MPI_MANUFACTPAGE_VENDORID_LSILOGIC) return (ENXIO); - } switch (pci_get_device(dev)) { - case PCI_PRODUCT_LSI_FC909: + case MPI_MANUFACTPAGE_DEVICEID_FC909_FB: desc = "LSILogic FC909 FC Adapter"; break; - case PCI_PRODUCT_LSI_FC909A: + case MPI_MANUFACTPAGE_DEVICEID_FC909: desc = "LSILogic FC909A FC Adapter"; break; - case PCI_PRODUCT_LSI_FC919: + case MPI_MANUFACTPAGE_DEVICEID_FC919: desc = "LSILogic FC919 FC Adapter"; break; - case PCI_PRODUCT_LSI_FC919_LAN: + case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB: desc = "LSILogic FC919 LAN Adapter"; break; - case PCI_PRODUCT_LSI_FC929: + case MPI_MANUFACTPAGE_DEVICEID_FC929: desc = "Dual LSILogic FC929 FC Adapter"; break; - case PCI_PRODUCT_LSI_FC929_LAN: + case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: desc = "Dual LSILogic FC929 LAN Adapter"; break; - case PCI_PRODUCT_LSI_FC919X: + case MPI_MANUFACTPAGE_DEVICEID_FC919X: desc = "LSILogic FC919 FC PCI-X Adapter"; break; - case PCI_PRODUCT_LSI_FC919X_LAN: + case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB: desc = "LSILogic FC919 LAN PCI-X Adapter"; break; - case PCI_PRODUCT_LSI_FC929X: + case MPI_MANUFACTPAGE_DEVICEID_FC929X: desc = "Dual LSILogic FC929X 2Gb/s FC PCI-X Adapter"; break; - case PCI_PRODUCT_LSI_FC929X_LAN: + case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB: desc = "Dual LSILogic FC929X LAN PCI-X Adapter"; break; - case PCI_PRODUCT_LSI_FC646: + case MPI_MANUFACTPAGE_DEVICEID_FC949E: desc = "Dual LSILogic FC7X04X 4Gb/s FC PCI-Express Adapter"; break; - case PCI_PRODUCT_LSI_FC7X04X: + case MPI_MANUFACTPAGE_DEVICEID_FC949X: desc = "Dual LSILogic FC7X04X 4Gb/s FC PCI-X Adapter"; break; - case PCI_PRODUCT_LSI_1030: - case PCI_PRODUCT_LSI_1030ZC: + case MPI_MANUFACTPAGE_DEVID_53C1030: + case MPI_MANUFACTPAGE_DEVID_53C1030ZC: desc = "LSILogic 1030 Ultra4 Adapter"; break; - case PCI_PRODUCT_LSI_SAS1064: - case PCI_PRODUCT_LSI_SAS1064A: - case PCI_PRODUCT_LSI_SAS1064E: - case PCI_PRODUCT_LSI_SAS1066: - case PCI_PRODUCT_LSI_SAS1066E: - case PCI_PRODUCT_LSI_SAS1068: - case PCI_PRODUCT_LSI_SAS1068A: - case PCI_PRODUCT_LSI_SAS1068E: - case PCI_PRODUCT_LSI_SAS1078: - case PCI_PRODUCT_LSI_SAS1078DE: + case MPI_MANUFACTPAGE_DEVID_SAS1064: + case MPI_MANUFACTPAGE_DEVID_SAS1064A: + case MPI_MANUFACTPAGE_DEVID_SAS1064E: + case MPI_MANUFACTPAGE_DEVID_SAS1066: + case MPI_MANUFACTPAGE_DEVID_SAS1066E: + case MPI_MANUFACTPAGE_DEVID_SAS1068: + case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB: + case MPI_MANUFACTPAGE_DEVID_SAS1068E: + case MPI_MANUFACTPAGE_DEVID_SAS1078: + case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB: desc = "LSILogic SAS/SATA Adapter"; break; default: return (ENXIO); } device_set_desc(dev, desc); return (0); } #if __FreeBSD_version < 500000 static void mpt_set_options(struct mpt_softc *mpt) { int bitmap; bitmap = 0; if (getenv_int("mpt_disable", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->disabled = 1; } } bitmap = 0; if (getenv_int("mpt_debug", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG; } } bitmap = 0; if (getenv_int("mpt_debug1", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG1; } } bitmap = 0; if (getenv_int("mpt_debug2", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG2; } } bitmap = 0; if (getenv_int("mpt_debug3", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG3; } } mpt->cfg_role = MPT_ROLE_DEFAULT; bitmap = 0; if (getenv_int("mpt_nil_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role = 0; } mpt->do_cfg_role = 1; } bitmap = 0; if (getenv_int("mpt_tgt_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_TARGET; } mpt->do_cfg_role = 1; } bitmap = 0; if (getenv_int("mpt_ini_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_INITIATOR; } mpt->do_cfg_role = 1; } mpt->msi_enable = 0; } #else static void mpt_set_options(struct mpt_softc *mpt) { int tval; tval = 0; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "disable", &tval) == 0 && tval != 0) { mpt->disabled = 1; } tval = 0; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "debug", &tval) == 0 && tval != 0) { mpt->verbose = tval; } tval = -1; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "role", &tval) == 0 && tval >= 0 && tval <= 3) { mpt->cfg_role = tval; mpt->do_cfg_role = 1; } tval = 0; mpt->msi_enable = 0; if (mpt->is_sas) mpt->msi_enable = 1; if (resource_int_value(device_get_name(mpt->dev), device_get_unit(mpt->dev), "msi_enable", &tval) == 0) { mpt->msi_enable = tval; } } #endif static void mpt_link_peer(struct mpt_softc *mpt) { struct mpt_softc *mpt2; if (mpt->unit == 0) { return; } /* * XXX: depends on probe order */ mpt2 = (struct mpt_softc *)devclass_get_softc(mpt_devclass,mpt->unit-1); if (mpt2 == NULL) { return; } if (pci_get_vendor(mpt2->dev) != pci_get_vendor(mpt->dev)) { return; } if (pci_get_device(mpt2->dev) != pci_get_device(mpt->dev)) { return; } mpt->mpt2 = mpt2; mpt2->mpt2 = mpt; if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_prt(mpt, "linking with peer (mpt%d)\n", device_get_unit(mpt2->dev)); } } static void mpt_unlink_peer(struct mpt_softc *mpt) { if (mpt->mpt2) { mpt->mpt2->mpt2 = NULL; } } static int mpt_pci_attach(device_t dev) { struct mpt_softc *mpt; int iqd; uint32_t data, cmd; int mpt_io_bar, mpt_mem_bar; - /* Allocate the softc structure */ mpt = (struct mpt_softc*)device_get_softc(dev); - if (mpt == NULL) { - device_printf(dev, "cannot allocate softc\n"); - return (ENOMEM); - } - memset(mpt, 0, sizeof(struct mpt_softc)); + switch (pci_get_device(dev)) { - case PCI_PRODUCT_LSI_FC909: - case PCI_PRODUCT_LSI_FC909A: - case PCI_PRODUCT_LSI_FC919: - case PCI_PRODUCT_LSI_FC919_LAN: - case PCI_PRODUCT_LSI_FC929: - case PCI_PRODUCT_LSI_FC929_LAN: - case PCI_PRODUCT_LSI_FC929X: - case PCI_PRODUCT_LSI_FC929X_LAN: - case PCI_PRODUCT_LSI_FC919X: - case PCI_PRODUCT_LSI_FC919X_LAN: - case PCI_PRODUCT_LSI_FC646: - case PCI_PRODUCT_LSI_FC7X04X: + case MPI_MANUFACTPAGE_DEVICEID_FC909_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC909: + case MPI_MANUFACTPAGE_DEVICEID_FC919: + case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC929: + case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC929X: + case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC919X: + case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC949E: + case MPI_MANUFACTPAGE_DEVICEID_FC949X: mpt->is_fc = 1; break; - case PCI_PRODUCT_LSI_SAS1078: - case PCI_PRODUCT_LSI_SAS1078DE: + case MPI_MANUFACTPAGE_DEVID_SAS1078: + case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB: mpt->is_1078 = 1; /* FALLTHROUGH */ - case PCI_PRODUCT_LSI_SAS1064: - case PCI_PRODUCT_LSI_SAS1064A: - case PCI_PRODUCT_LSI_SAS1064E: - case PCI_PRODUCT_LSI_SAS1066: - case PCI_PRODUCT_LSI_SAS1066E: - case PCI_PRODUCT_LSI_SAS1068: - case PCI_PRODUCT_LSI_SAS1068A: - case PCI_PRODUCT_LSI_SAS1068E: + case MPI_MANUFACTPAGE_DEVID_SAS1064: + case MPI_MANUFACTPAGE_DEVID_SAS1064A: + case MPI_MANUFACTPAGE_DEVID_SAS1064E: + case MPI_MANUFACTPAGE_DEVID_SAS1066: + case MPI_MANUFACTPAGE_DEVID_SAS1066E: + case MPI_MANUFACTPAGE_DEVID_SAS1068: + case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB: + case MPI_MANUFACTPAGE_DEVID_SAS1068E: mpt->is_sas = 1; break; default: mpt->is_spi = 1; break; } mpt->dev = dev; mpt->unit = device_get_unit(dev); mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT; mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT; mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT; mpt->verbose = MPT_PRT_NONE; mpt->role = MPT_ROLE_NONE; mpt->mpt_ini_id = MPT_INI_ID_NONE; #ifdef __sparc64__ if (mpt->is_spi) mpt->mpt_ini_id = OF_getscsinitid(dev); #endif mpt_set_options(mpt); if (mpt->verbose == MPT_PRT_NONE) { mpt->verbose = MPT_PRT_WARN; /* Print INFO level (if any) if bootverbose is set */ mpt->verbose += (bootverbose != 0)? 1 : 0; } /* Make sure memory access decoders are enabled */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "Memory accesses disabled"); return (ENXIO); } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd |= PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_BIOS, 4); data &= ~PCIM_BIOS_ENABLE; pci_write_config(dev, PCIR_BIOS, data, 4); /* * Is this part a dual? * If so, link with our partner (around yet) */ switch (pci_get_device(dev)) { - case PCI_PRODUCT_LSI_FC929: - case PCI_PRODUCT_LSI_FC929_LAN: - case PCI_PRODUCT_LSI_FC646: - case PCI_PRODUCT_LSI_FC7X04X: - case PCI_PRODUCT_LSI_1030: - case PCI_PRODUCT_LSI_1030ZC: + case MPI_MANUFACTPAGE_DEVICEID_FC929: + case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: + case MPI_MANUFACTPAGE_DEVICEID_FC949E: + case MPI_MANUFACTPAGE_DEVICEID_FC949X: + case MPI_MANUFACTPAGE_DEVID_53C1030: + case MPI_MANUFACTPAGE_DEVID_53C1030ZC: mpt_link_peer(mpt); break; default: break; } /* * Figure out which are the I/O and MEM Bars */ data = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_IO(data)) { /* BAR0 is IO, BAR1 is memory */ mpt_io_bar = 0; mpt_mem_bar = 1; } else { /* BAR0 is memory, BAR1 is IO */ mpt_mem_bar = 0; mpt_io_bar = 1; } /* * Set up register access. PIO mode is required for * certain reset operations (but must be disabled for * some cards otherwise). */ mpt_io_bar = PCIR_BAR(mpt_io_bar); mpt->pci_pio_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &mpt_io_bar, RF_ACTIVE); if (mpt->pci_pio_reg == NULL) { if (bootverbose) { device_printf(dev, "unable to map registers in PIO mode\n"); } } else { mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg); mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg); } - /* Allocate kernel virtual memory for the 9x9's Mem0 region */ mpt_mem_bar = PCIR_BAR(mpt_mem_bar); mpt->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mpt_mem_bar, RF_ACTIVE); if (mpt->pci_reg == NULL) { if (bootverbose || mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Unable to memory map registers.\n"); } if (mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Giving Up.\n"); goto bad; } if (bootverbose) { device_printf(dev, "Falling back to PIO mode.\n"); } mpt->pci_st = mpt->pci_pio_st; mpt->pci_sh = mpt->pci_pio_sh; } else { mpt->pci_st = rman_get_bustag(mpt->pci_reg); mpt->pci_sh = rman_get_bushandle(mpt->pci_reg); } /* Get a handle to the interrupt */ iqd = 0; if (mpt->msi_enable) { /* * First try to alloc an MSI-X message. If that * fails, then try to alloc an MSI message instead. */ if (pci_msix_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } if (iqd == 0 && pci_msi_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msi(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } } mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | (mpt->pci_msi_count ? 0 : RF_SHAREABLE)); if (mpt->pci_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } MPT_LOCK_SETUP(mpt); /* Disable interrupts at the part */ mpt_disable_ints(mpt); /* Register the interrupt handler */ if (mpt_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, NULL, mpt_pci_intr, mpt, &mpt->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* Allocate dma memory */ if (mpt_dma_mem_alloc(mpt)) { mpt_prt(mpt, "Could not allocate DMA memory\n"); goto bad; } +#if 0 /* * Save the PCI config register values * * Hard resets are known to screw up the BAR for diagnostic * memory accesses (Mem1). * * Using Mem1 is known to make the chip stop responding to * configuration space transfers, so we need to save it now */ mpt_read_config_regs(mpt); +#endif /* * Disable PIO until we need it */ if (mpt->is_sas) { pci_disable_io(dev, SYS_RES_IOPORT); } /* Initialize the hardware */ if (mpt->disabled == 0) { if (mpt_attach(mpt) != 0) { goto bad; } } else { mpt_prt(mpt, "device disabled at user request\n"); goto bad; } mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown, dev, SHUTDOWN_PRI_DEFAULT); if (mpt->eh == NULL) { mpt_prt(mpt, "shutdown event registration failed\n"); (void) mpt_detach(mpt); goto bad; } return (0); bad: mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_unlink_peer(mpt); MPT_LOCK_DESTROY(mpt); /* * but return zero to preserve unit numbering */ return (0); } /* * Free bus resources */ static void mpt_free_bus_resources(struct mpt_softc *mpt) { if (mpt->ih) { bus_teardown_intr(mpt->dev, mpt->pci_irq, mpt->ih); mpt->ih = NULL; } if (mpt->pci_irq) { bus_release_resource(mpt->dev, SYS_RES_IRQ, rman_get_rid(mpt->pci_irq), mpt->pci_irq); mpt->pci_irq = NULL; } if (mpt->pci_msi_count) { pci_release_msi(mpt->dev); mpt->pci_msi_count = 0; } if (mpt->pci_pio_reg) { bus_release_resource(mpt->dev, SYS_RES_IOPORT, rman_get_rid(mpt->pci_pio_reg), mpt->pci_pio_reg); mpt->pci_pio_reg = NULL; } if (mpt->pci_reg) { bus_release_resource(mpt->dev, SYS_RES_MEMORY, rman_get_rid(mpt->pci_reg), mpt->pci_reg); mpt->pci_reg = NULL; } MPT_LOCK_DESTROY(mpt); } /* * Disconnect ourselves from the system. */ static int mpt_pci_detach(device_t dev) { struct mpt_softc *mpt; mpt = (struct mpt_softc*)device_get_softc(dev); if (mpt) { mpt_disable_ints(mpt); mpt_detach(mpt); mpt_reset(mpt, /*reinit*/FALSE); mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_raid_free_mem(mpt); if (mpt->eh != NULL) { EVENTHANDLER_DEREGISTER(shutdown_post_sync, mpt->eh); } } return(0); } /* * Disable the hardware */ static int mpt_pci_shutdown(device_t dev) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)device_get_softc(dev); if (mpt) { int r; r = mpt_shutdown(mpt); return (r); } return(0); } static int mpt_dma_mem_alloc(struct mpt_softc *mpt) { size_t len; struct mpt_map_info mi; /* Check if we alreay have allocated the reply memory */ if (mpt->reply_phys != 0) { return 0; } len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt); #ifdef RELENG_4 mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } memset(mpt->request_pool, 0, len); #else mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } #endif /* * Create a parent dma tag for this device. * * Align at byte boundaries, * Limit to 32-bit addressing for request/reply queues. */ if (mpt_dma_tag_create(mpt, /*parent*/bus_get_dma_tag(mpt->dev), /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &mpt->parent_dmat) != 0) { mpt_prt(mpt, "cannot create parent dma tag\n"); return (1); } /* Create a child tag for reply buffers */ if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->reply_dmat) != 0) { mpt_prt(mpt, "cannot create a dma tag for replies\n"); return (1); } /* Allocate some DMA accessible memory for replies */ if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply, BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) { mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n", (u_long) (2 * PAGE_SIZE)); return (1); } mi.mpt = mpt; mi.error = 0; /* Load and lock it into "bus space" */ bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply, 2 * PAGE_SIZE, mpt_map_rquest, &mi, 0); if (mi.error) { mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n", mi.error); return (1); } mpt->reply_phys = mi.phys; return (0); } /* Deallocate memory that was allocated by mpt_dma_mem_alloc */ static void mpt_dma_mem_free(struct mpt_softc *mpt) { /* Make sure we aren't double destroying */ if (mpt->reply_dmat == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n"); return; } bus_dmamap_unload(mpt->reply_dmat, mpt->reply_dmap); bus_dmamem_free(mpt->reply_dmat, mpt->reply, mpt->reply_dmap); bus_dma_tag_destroy(mpt->reply_dmat); bus_dma_tag_destroy(mpt->parent_dmat); mpt->reply_dmat = NULL; free(mpt->request_pool, M_DEVBUF); mpt->request_pool = NULL; } +#if 0 /* Reads modifiable (via PCI transactions) config registers */ static void mpt_read_config_regs(struct mpt_softc *mpt) { mpt->pci_cfg.Command = pci_read_config(mpt->dev, PCIR_COMMAND, 2); mpt->pci_cfg.LatencyTimer_LineSize = pci_read_config(mpt->dev, PCIR_CACHELNSZ, 2); mpt->pci_cfg.IO_BAR = pci_read_config(mpt->dev, PCIR_BAR(0), 4); mpt->pci_cfg.Mem0_BAR[0] = pci_read_config(mpt->dev, PCIR_BAR(1), 4); mpt->pci_cfg.Mem0_BAR[1] = pci_read_config(mpt->dev, PCIR_BAR(2), 4); mpt->pci_cfg.Mem1_BAR[0] = pci_read_config(mpt->dev, PCIR_BAR(3), 4); mpt->pci_cfg.Mem1_BAR[1] = pci_read_config(mpt->dev, PCIR_BAR(4), 4); mpt->pci_cfg.ROM_BAR = pci_read_config(mpt->dev, PCIR_BIOS, 4); mpt->pci_cfg.IntLine = pci_read_config(mpt->dev, PCIR_INTLINE, 1); mpt->pci_cfg.PMCSR = pci_read_config(mpt->dev, 0x44, 4); } -#if 0 /* Sets modifiable config registers */ static void mpt_set_config_regs(struct mpt_softc *mpt) { uint32_t val; #define MPT_CHECK(reg, offset, size) \ val = pci_read_config(mpt->dev, offset, size); \ if (mpt->pci_cfg.reg != val) { \ mpt_prt(mpt, \ "Restoring " #reg " to 0x%X from 0x%X\n", \ mpt->pci_cfg.reg, val); \ } if (mpt->verbose >= MPT_PRT_DEBUG) { MPT_CHECK(Command, PCIR_COMMAND, 2); MPT_CHECK(LatencyTimer_LineSize, PCIR_CACHELNSZ, 2); MPT_CHECK(IO_BAR, PCIR_BAR(0), 4); MPT_CHECK(Mem0_BAR[0], PCIR_BAR(1), 4); MPT_CHECK(Mem0_BAR[1], PCIR_BAR(2), 4); MPT_CHECK(Mem1_BAR[0], PCIR_BAR(3), 4); MPT_CHECK(Mem1_BAR[1], PCIR_BAR(4), 4); MPT_CHECK(ROM_BAR, PCIR_BIOS, 4); MPT_CHECK(IntLine, PCIR_INTLINE, 1); MPT_CHECK(PMCSR, 0x44, 4); } #undef MPT_CHECK pci_write_config(mpt->dev, PCIR_COMMAND, mpt->pci_cfg.Command, 2); pci_write_config(mpt->dev, PCIR_CACHELNSZ, mpt->pci_cfg.LatencyTimer_LineSize, 2); pci_write_config(mpt->dev, PCIR_BAR(0), mpt->pci_cfg.IO_BAR, 4); pci_write_config(mpt->dev, PCIR_BAR(1), mpt->pci_cfg.Mem0_BAR[0], 4); pci_write_config(mpt->dev, PCIR_BAR(2), mpt->pci_cfg.Mem0_BAR[1], 4); pci_write_config(mpt->dev, PCIR_BAR(3), mpt->pci_cfg.Mem1_BAR[0], 4); pci_write_config(mpt->dev, PCIR_BAR(4), mpt->pci_cfg.Mem1_BAR[1], 4); pci_write_config(mpt->dev, PCIR_BIOS, mpt->pci_cfg.ROM_BAR, 4); pci_write_config(mpt->dev, PCIR_INTLINE, mpt->pci_cfg.IntLine, 1); pci_write_config(mpt->dev, 0x44, mpt->pci_cfg.PMCSR, 4); } #endif static void mpt_pci_intr(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); mpt_intr(mpt); MPT_UNLOCK(mpt); }